cmd/dockerd/docker.go
func main() {
// ********************************** NOTICE ********************************** //
if reexec.Init() {
// ********************************** NOTICE ********************************** //
return
}
// initial log formatting; this setting is updated after the daemon configuration is loaded.
logrus.SetFormatter(&logrus.TextFormatter{
TimestampFormat: jsonmessage.RFC3339NanoFixed,
FullTimestamp: true,
})
// Set terminal emulation based on platform as required.
_, stdout, stderr := term.StdStreams()
// @jhowardmsft - maybe there is a historic reason why on non-Windows, stderr is used
// here. However, on Windows it makes no sense and there is no need.
if runtime.GOOS == "windows" {
logrus.SetOutput(stdout)
} else {
logrus.SetOutput(stderr)
}
onError := func(err error) {
fmt.Fprintf(stderr, "%s\n", err)
os.Exit(1)
}
// ********************************** NOTICE ********************************** //
cmd, err := newDaemonCommand()
// ********************************** NOTICE ********************************** //
if err != nil {
onError(err)
}
cmd.SetOutput(stdout)
if err := cmd.Execute(); err != nil {
onError(err)
}
}
1) pkg/reexec/reexec.go#Init
var registeredInitializers = make(map[string]func())
// Register adds an initialization func under the specified name
func Register(name string, initializer func()) {
if _, exists := registeredInitializers[name]; exists {
panic(fmt.Sprintf("reexec func already registered under name %q", name))
}
registeredInitializers[name] = initializer
}
// Init is called as the first part of the exec process and returns true if an
// initialization function was called.
func Init() bool {
initializer, exists := registeredInitializers[os.Args[0]]
if exists {
initializer()
return true
}
return false
}
2) cmd/dockerd/docker.go#newDaemonCommand
func newDaemonCommand() (*cobra.Command, error) {
opts := newDaemonOptions(config.New())
cmd := &cobra.Command{
Use: "dockerd [OPTIONS]",
Short: "A self-sufficient runtime for containers.",
SilenceUsage: true,
SilenceErrors: true,
Args: cli.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
opts.flags = cmd.Flags()
// ********************************** NOTICE ********************************** //
return runDaemon(opts)
// ********************************** NOTICE ********************************** //
},
DisableFlagsInUseLine: true,
Version: fmt.Sprintf("%s, build %s", dockerversion.Version, dockerversion.GitCommit),
}
cli.SetupRootCommand(cmd)
flags := cmd.Flags()
flags.BoolP("version", "v", false, "Print version information and quit")
defaultDaemonConfigFile, err := getDefaultDaemonConfigFile()
if err != nil {
return nil, err
}
flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "Daemon configuration file")
opts.InstallFlags(flags)
if err := installConfigFlags(opts.daemonConfig, flags); err != nil {
return nil, err
}
installServiceFlags(flags)
return cmd, nil
}
2.1) cmd/dockerd/docker_unix.go#runDaemon
func runDaemon(opts *daemonOptions) error {
daemonCli := NewDaemonCli()
return daemonCli.start(opts)
}
// NewDaemonCli returns a daemon CLI
func NewDaemonCli() *DaemonCli {
return &DaemonCli{}
}
2.1.1) cmd/dockerd/daemon.go#DaemonCli.start
func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
stopc := make(chan bool)
defer close(stopc)
// warn from uuid package when running the daemon
uuid.Loggerf = logrus.Warnf
opts.SetDefaultOptions(opts.flags)
if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
return err
}
if err := configureDaemonLogs(cli.Config); err != nil {
return err
}
cli.configFile = &opts.configFile
cli.flags = opts.flags
if cli.Config.Debug {
debug.Enable()
}
if cli.Config.Experimental {
logrus.Warn("Running experimental build")
if cli.Config.IsRootless() {
logrus.Warn("Running in rootless mode. Cgroups, AppArmor, and CRIU are disabled.")
}
} else {
if cli.Config.IsRootless() {
return fmt.Errorf("rootless mode is supported only when running in experimental mode")
}
}
// return human-friendly error before creating files
if runtime.GOOS == "linux" && os.Geteuid() != 0 {
return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
}
system.InitLCOW(cli.Config.Experimental)
if err := setDefaultUmask(); err != nil {
return err
}
// Create the daemon root before we create ANY other files (PID, or migrate keys)
// to ensure the appropriate ACL is set (particularly relevant on Windows)
// ********************************** NOTICE ********************************** //
if err := daemon.CreateDaemonRoot(cli.Config); err != nil {
// ********************************** NOTICE ********************************** //
return err
}
if err := system.MkdirAll(cli.Config.ExecRoot, 0700, ""); err != nil {
return err
}
potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot}
if cli.Pidfile != "" {
pf, err := pidfile.New(cli.Pidfile)
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile)
defer func() {
if err := pf.Remove(); err != nil {
logrus.Error(err)
}
}()
}
// Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR
if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil {
// StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset
logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
}
// ********************************** NOTICE ********************************** //
serverConfig, err := newAPIServerConfig(cli)
// ********************************** NOTICE ********************************** //
if err != nil {
return errors.Wrap(err, "failed to create API server")
}
// ********************************** NOTICE ********************************** //
cli.api = apiserver.New(serverConfig)
// ********************************** NOTICE ********************************** //
hosts, err := loadListeners(cli, serverConfig)
if err != nil {
return errors.Wrap(err, "failed to load listeners")
}
ctx, cancel := context.WithCancel(context.Background())
if cli.Config.ContainerdAddr == "" && runtime.GOOS != "windows" {
systemContainerdAddr, ok, err := systemContainerdRunning(cli.Config.IsRootless())
if err != nil {
cancel()
return errors.Wrap(err, "could not determine whether the system containerd is running")
}
if !ok {
opts, err := cli.getContainerdDaemonOpts()
if err != nil {
cancel()
return errors.Wrap(err, "failed to generate containerd options")
}
r, err := supervisor.Start(ctx, filepath.Join(cli.Config.Root, "containerd"), filepath.Join(cli.Config.ExecRoot, "containerd"), opts...)
if err != nil {
cancel()
return errors.Wrap(err, "failed to start containerd")
}
cli.Config.ContainerdAddr = r.Address()
// Try to wait for containerd to shutdown
defer r.WaitTimeout(10 * time.Second)
} else {
cli.Config.ContainerdAddr = systemContainerdAddr
}
}
defer cancel()
signal.Trap(func() {
cli.stop()
<-stopc // wait for daemonCli.start() to return
}, logrus.StandardLogger())
// Notify that the API is active, but before daemon is set up.
preNotifySystem()
pluginStore := plugin.NewStore()
// ********************************** NOTICE ********************************** //
if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil {
// ********************************** NOTICE ********************************** //
logrus.Fatalf("Error creating middlewares: %v", err)
}
// ********************************** NOTICE ********************************** //
d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore)
// ********************************** NOTICE ********************************** //
if err != nil {
return errors.Wrap(err, "failed to start daemon")
}
d.StoreHosts(hosts)
// validate after NewDaemon has restored enabled plugins. Don't change order.
if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil {
return errors.Wrap(err, "failed to validate authorization plugin")
}
// TODO: move into startMetricsServer()
if cli.Config.MetricsAddress != "" {
if !d.HasExperimental() {
return errors.Wrap(err, "metrics-addr is only supported when experimental is enabled")
}
if err := startMetricsServer(cli.Config.MetricsAddress); err != nil {
return err
}
}
c, err := createAndStartCluster(cli, d)
if err != nil {
logrus.Fatalf("Error starting cluster component: %v", err)
}
// Restart all autostart containers which has a swarm endpoint
// and is not yet running now that we have successfully
// initialized the cluster.
d.RestartSwarmContainers()
logrus.Info("Daemon has completed initialization")
cli.d = d
routerOptions, err := newRouterOptions(cli.Config, d)
if err != nil {
return err
}
routerOptions.api = cli.api
routerOptions.cluster = c
// ********************************** NOTICE ********************************** //
initRouter(routerOptions)
// ********************************** NOTICE ********************************** //
go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
cli.setupConfigReloadTrap()
// The serve API routine never exits unless an error occurs
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go cli.api.Wait(serveAPIWait)
// after the daemon is done setting up we can notify systemd api
notifySystem()
// Daemon is fully initialized and handling API traffic
// Wait for serve API to complete
// ********************************** NOTICE ********************************** //
errAPI := <-serveAPIWait
// ********************************** NOTICE ********************************** //
c.Cleanup()
shutdownDaemon(d)
// Stop notification processing and any background processes
cancel()
if errAPI != nil {
return errors.Wrap(errAPI, "shutting down due to ServeAPI error")
}
return nil
}
2.1.1.1) api/server/server.go#New(返回Server)
// New returns a new instance of the server based on the specified configuration.
// It allocates resources which will be needed for ServeAPI(ports, unix-sockets).
func New(cfg *Config) *Server {
return &Server{
cfg: cfg,
}
}
// Server contains instance details for the server
type Server struct {
cfg *Config
servers []*HTTPServer
routers []router.Router
routerSwapper *routerSwapper
middlewares []middleware.Middleware
}
2.1.1.2) daemon/daemon.go#NewDaemon
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.Store) (daemon *Daemon, err error) {
setDefaultMtu(config)
registryService, err := registry.NewService(config.ServiceOptions)
if err != nil {
return nil, err
}
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit(); err != nil {
logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
}
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil {
return nil, err
}
// Do we have a disabled network?
config.DisableBridge = isBridgeNetworkDisabled(config)
// Setup the resolv.conf
setupResolvConf(config)
// Verify the platform is supported as a daemon
if !platformSupported {
return nil, errSystemNotSupported
}
// Validate platform-specific requirements
if err := checkSystem(); err != nil {
return nil, err
}
idMapping, err := setupRemappedRoot(config)
if err != nil {
return nil, err
}
rootIDs := idMapping.RootPair()
if err := setupDaemonProcess(config); err != nil {
return nil, err
}
// set up the tmpDir to use a canonical path
tmp, err := prepareTempDir(config.Root, rootIDs)
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
}
realTmp, err := getRealPath(tmp)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
}
if runtime.GOOS == "windows" {
if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) {
if err := system.MkdirAll(realTmp, 0700, ""); err != nil {
return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err)
}
}
os.Setenv("TEMP", realTmp)
os.Setenv("TMP", realTmp)
} else {
os.Setenv("TMPDIR", realTmp)
}
// ********************************** NOTICE ********************************** //
d := &Daemon{
configStore: config,
PluginStore: pluginStore,
startupDone: make(chan struct{}),
}
// ********************************** NOTICE ********************************** //
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
logrus.Error(err)
}
}
}()
if err := d.setGenericResources(config); err != nil {
return nil, err
}
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
stackDumpDir := config.Root
if execRoot := config.GetExecRoot(); execRoot != "" {
stackDumpDir = execRoot
}
d.setupDumpStackTrap(stackDumpDir)
if err := d.setupSeccompProfile(); err != nil {
return nil, err
}
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
// ensureDefaultAppArmorProfile does nothing if apparmor is disabled
if err := ensureDefaultAppArmorProfile(); err != nil {
logrus.Errorf(err.Error())
}
daemonRepo := filepath.Join(config.Root, "containers")
if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil {
return nil, err
}
// Create the directory where we'll store the runtime scripts (i.e. in
// order to support runtimeArgs)
daemonRuntimes := filepath.Join(config.Root, "runtimes")
if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil {
return nil, err
}
if err := d.loadRuntimes(); err != nil {
return nil, err
}
if runtime.GOOS == "windows" {
if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil {
return nil, err
}
}
// On Windows we don't support the environment variable, or a user supplied graphdriver
// as Windows has no choice in terms of which graphdrivers to use. It's a case of
// running Windows containers on Windows - windowsfilter, running Linux containers on Windows,
// lcow. Unix platforms however run a single graphdriver for all containers, and it can
// be set through an environment variable, a daemon start parameter, or chosen through
// initialization of the layerstore through driver priority order for example.
d.graphDrivers = make(map[string]string)
layerStores := make(map[string]layer.Store)
if runtime.GOOS == "windows" {
d.graphDrivers[runtime.GOOS] = "windowsfilter"
if system.LCOWSupported() {
d.graphDrivers["linux"] = "lcow"
}
} else {
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver
} else {
logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
}
d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead.
}
d.RegistryService = registryService
logger.RegisterPluginGetter(d.PluginStore)
metricsSockPath, err := d.listenMetricsSock()
if err != nil {
return nil, err
}
registerMetricsPluginCallback(d.PluginStore, metricsSockPath)
gopts := []grpc.DialOption{
grpc.WithInsecure(),
grpc.WithBackoffMaxDelay(3 * time.Second),
grpc.WithDialer(dialer.Dialer),
// TODO(stevvooe): We may need to allow configuration of this on the client.
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
}
if config.ContainerdAddr != "" {
d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(ContainersNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
}
}
// ********************************** NOTICE ********************************** //
createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) {
var pluginCli *containerd.Client
// Windows is not currently using containerd, keep the
// client as nil
if config.ContainerdAddr != "" {
pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(pluginexec.PluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
}
}
return pluginexec.New(ctx, getPluginExecRoot(config.Root), pluginCli, m)
}
// ********************************** NOTICE ********************************** //
// Plugin system initialization should happen before restore. Do not change order.
d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
Root: filepath.Join(config.Root, "plugins"),
ExecRoot: getPluginExecRoot(config.Root),
Store: d.PluginStore,
CreateExecutor: createPluginExec,
RegistryService: registryService,
LiveRestoreEnabled: config.LiveRestoreEnabled,
LogPluginEvent: d.LogPluginEvent, // todo: make private
AuthzMiddleware: config.AuthzMiddleware,
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create plugin manager")
}
if err := d.setupDefaultLogConfig(); err != nil {
return nil, err
}
for operatingSystem, gd := range d.graphDrivers {
layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{
Root: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: gd,
GraphDriverOptions: config.GraphOptions,
IDMapping: idMapping,
PluginGetter: d.PluginStore,
ExperimentalEnabled: config.Experimental,
OS: operatingSystem,
})
if err != nil {
return nil, err
}
}
// As layerstore initialization may set the driver
for os := range d.graphDrivers {
d.graphDrivers[os] = layerStores[os].DriverName()
}
// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
// operation only, so it is safe to pass *just* the runtime OS graphdriver.
if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil {
return nil, err
}
imageRoot := filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS])
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
lgrMap := make(map[string]image.LayerGetReleaser)
for os, ls := range layerStores {
lgrMap[os] = ls
}
// ********************************** NOTICE ********************************** //
imageStore, err := image.NewImageStore(ifs, lgrMap)
// ********************************** NOTICE ********************************** //
if err != nil {
return nil, err
}
// ********************************** NOTICE ********************************** //
d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
// ********************************** NOTICE ********************************** //
if err != nil {
return nil, err
}
trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
trustDir := filepath.Join(config.Root, "trust")
if err := system.MkdirAll(trustDir, 0700, ""); err != nil {
return nil, err
}
// We have a single tag/reference store for the daemon globally. However, it's
// stored under the graphdriver. On host platforms which only support a single
// container OS, but multiple selectable graphdrivers, this means depending on which
// graphdriver is chosen, the global reference store is under there. For
// platforms which support multiple container operating systems, this is slightly
// more problematic as where does the global ref store get located? Fortunately,
// for Windows, which is currently the only daemon supporting multiple container
// operating systems, the list of graphdrivers available isn't user configurable.
// For backwards compatibility, we just put it under the windowsfilter
// directory regardless.
refStoreLocation := filepath.Join(imageRoot, `repositories.json`)
rs, err := refstore.NewReferenceStore(refStoreLocation)
if err != nil {
return nil, fmt.Errorf("Couldn't create reference store repository: %s", err)
}
distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
if err != nil {
return nil, err
}
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as it's read-only
if err := d.initDiscovery(config); err != nil {
return nil, err
}
sysInfo := sysinfo.New(false)
// Check if Devices cgroup is mounted, it is hard requirement for container security,
// on Linux.
if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
return nil, errors.New("Devices cgroup isn't mounted")
}
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo
d.containers = container.NewMemoryStore()
if d.containersReplica, err = container.NewViewDB(); err != nil {
return nil, err
}
d.execCommands = exec.NewStore()
d.idIndex = truncindex.NewTruncIndex([]string{})
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.EventsService = events.New()
d.root = config.Root
d.idMapping = idMapping
d.seccompEnabled = sysInfo.Seccomp
d.apparmorEnabled = sysInfo.AppArmor
d.linkIndex = newLinkIndex()
// TODO: imageStore, distributionMetadataStore, and ReferenceStore are only
// used above to run migration. They could be initialized in ImageService
// if migration is called from daemon/images. layerStore might move as well.
// ********************************** NOTICE ********************************** //
d.imageService = images.NewImageService(images.ImageServiceConfig{
ContainerStore: d.containers,
DistributionMetadataStore: distributionMetadataStore,
EventsService: d.EventsService,
ImageStore: imageStore,
LayerStores: layerStores,
MaxConcurrentDownloads: *config.MaxConcurrentDownloads,
MaxConcurrentUploads: *config.MaxConcurrentUploads,
ReferenceStore: rs,
RegistryService: registryService,
TrustKey: trustKey,
})
// ********************************** NOTICE ********************************** //
go d.execCommandGC()
// ********************************** NOTICE ********************************** //
d.containerd, err = libcontainerd.NewClient(ctx, d.containerdCli, filepath.Join(config.ExecRoot, "containerd"), ContainersNamespace, d)
// ********************************** NOTICE ********************************** //
if err != nil {
return nil, err
}
// ********************************** NOTICE ********************************** //
if err := d.restore(); err != nil {
// ********************************** NOTICE ********************************** //
return nil, err
}
close(d.startupDone)
// FIXME: this method never returns an error
info, _ := d.SystemInfo()
engineInfo.WithValues(
dockerversion.Version,
dockerversion.GitCommit,
info.Architecture,
info.Driver,
info.KernelVersion,
info.OperatingSystem,
info.OSType,
info.ID,
).Set(1)
engineCpus.Set(float64(info.NCPU))
engineMemory.Set(float64(info.MemTotal))
gd := ""
for os, driver := range d.graphDrivers {
if len(gd) > 0 {
gd += ", "
}
gd += driver
if len(d.graphDrivers) > 1 {
gd = fmt.Sprintf("%s (%s)", gd, os)
}
}
logrus.WithFields(logrus.Fields{
"version": dockerversion.Version,
"commit": dockerversion.GitCommit,
"graphdriver(s)": gd,
}).Info("Docker daemon")
return d, nil
}
2.1.1.2.1) daemon/daemon.go#Daemon.restore
func (daemon *Daemon) restore() error {
var mapLock sync.Mutex
containers := make(map[string]*container.Container)
logrus.Info("Loading containers: start.")
// ********************************** NOTICE ********************************** //
dir, err := ioutil.ReadDir(daemon.repository)
// ********************************** NOTICE ********************************** //
if err != nil {
return err
}
// parallelLimit is the maximum number of parallel startup jobs that we
// allow (this is the limited used for all startup semaphores). The multipler
// (128) was chosen after some fairly significant benchmarking -- don't change
// it unless you've tested it significantly (this value is adjusted if
// RLIMIT_NOFILE is small to avoid EMFILE).
parallelLimit := adjustParallelLimit(len(dir), 128*runtime.NumCPU())
// Re-used for all parallel startup jobs.
var group sync.WaitGroup
sem := semaphore.NewWeighted(int64(parallelLimit))
for _, v := range dir {
group.Add(1)
go func(id string) {
defer group.Done()
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
// ********************************** NOTICE ********************************** //
container, err := daemon.load(id)
// ********************************** NOTICE ********************************** //
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
return
}
if !system.IsOSSupported(container.OS) {
logrus.Errorf("Failed to load container %v: %s (%q)", id, system.ErrNotSupportedOperatingSystem, container.OS)
return
}
// Ignore the container if it does not support the current driver being used by the graph
currentDriverForContainerOS := daemon.graphDrivers[container.OS]
if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS {
rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
return
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning())
mapLock.Lock()
containers[container.ID] = container
mapLock.Unlock()
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}(v.Name())
}
group.Wait()
removeContainers := make(map[string]*container.Container)
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for _, c := range containers {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container name %s: %s", c.ID, err)
mapLock.Lock()
delete(containers, c.ID)
mapLock.Unlock()
return
}
if err := daemon.Register(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
mapLock.Lock()
delete(containers, c.ID)
mapLock.Unlock()
return
}
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
}
}
}(c)
}
group.Wait()
for _, c := range containers {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
daemon.backportMountSpec(c)
if err := daemon.checkpointAndSave(c); err != nil {
logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk")
}
daemon.setStateCounter(c)
logrus.WithFields(logrus.Fields{
"container": c.ID,
"running": c.IsRunning(),
"paused": c.IsPaused(),
}).Debug("restoring container")
var (
err error
alive bool
ec uint32
exitedAt time.Time
)
alive, _, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio)
if err != nil && !errdefs.IsNotFound(err) {
logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err)
return
}
if !alive {
ec, exitedAt, err = daemon.containerd.DeleteTask(context.Background(), c.ID)
if err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
return
}
} else if !daemon.configStore.LiveRestoreEnabled {
if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container")
return
}
}
if c.IsRunning() || c.IsPaused() {
c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
if c.IsPaused() && alive {
s, err := daemon.containerd.Status(context.Background(), c.ID)
if err != nil {
logrus.WithError(err).WithField("container", c.ID).
Errorf("Failed to get container status")
} else {
logrus.WithField("container", c.ID).WithField("state", s).
Info("restored container paused")
switch s {
case libcontainerd.StatusPaused, libcontainerd.StatusPausing:
// nothing to do
case libcontainerd.StatusStopped:
alive = false
case libcontainerd.StatusUnknown:
logrus.WithField("container", c.ID).
Error("Unknown status for container during restore")
default:
// running
c.Lock()
c.Paused = false
daemon.setStateCounter(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
logrus.WithError(err).WithField("container", c.ID).
Error("Failed to update stopped container state")
}
c.Unlock()
}
}
}
if !alive {
c.Lock()
c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt})
daemon.Cleanup(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err)
}
c.Unlock()
}
// we call Mount and then Unmount to get BaseFs of the container
if err := daemon.Mount(c); err != nil {
// The mount is unlikely to fail. However, in case mount fails
// the container should be allowed to restore here. Some functionalities
// (like docker exec -u user) might be missing but container is able to be
// stopped/restarted/removed.
// See #29365 for related information.
// The error is only logged here.
logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err)
} else {
if err := daemon.Unmount(c); err != nil {
logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err)
}
}
c.ResetRestartManager(false)
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(c)
if err != nil {
logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
}
// get list of containers we need to restart
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock()
}
c.Lock()
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
c.RemovalInProgress = false
c.Dead = true
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err)
}
}
c.Unlock()
}(c)
}
group.Wait()
// ********************************** NOTICE ********************************** //
daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
// ********************************** NOTICE ********************************** //
if err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
}
// Now that all the containers are registered, register the links
for _, c := range containers {
group.Add(1)
go func(c *container.Container) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
}
sem.Release(1)
group.Done()
}(c)
}
group.Wait()
for c, notifier := range restartContainers {
group.Add(1)
go func(c *container.Container, chNotify chan struct{}) {
_ = sem.Acquire(context.Background(), 1)
logrus.Debugf("Starting container %s", c.ID)
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
children := daemon.children(c)
timeout := time.After(5 * time.Second)
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout:
}
}
}
// Make sure networks are available before starting
daemon.waitForNetworks(c)
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.Errorf("Failed to start container %s: %s", c.ID, err)
}
close(chNotify)
sem.Release(1)
group.Done()
}(c, notifier)
}
group.Wait()
for id := range removeContainers {
group.Add(1)
go func(cid string) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("Failed to remove container %s: %s", cid, err)
}
sem.Release(1)
group.Done()
}(id)
}
group.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers {
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume driver is not available.
if _, ok := restartContainers[c]; ok {
continue
} else if _, ok := removeContainers[c.ID]; ok {
// container is automatically removed, skip it.
continue
}
group.Add(1)
go func(c *container.Container) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.prepareMountPoints(c); err != nil {
logrus.Error(err)
}
sem.Release(1)
group.Done()
}(c)
}
group.Wait()
logrus.Info("Loading containers: done.")
return nil
}
—2.1.1.2.1.1) daemon/daemon_unix.go#Daemon.initNetworkController
func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) {
// ********************************** NOTICE ********************************** //
netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes)
// ********************************** NOTICE ********************************** //
if err != nil {
return nil, err
}
// ********************************** NOTICE ********************************** //
controller, err := libnetwork.New(netOptions...)
// ********************************** NOTICE ********************************** //
if err != nil {
return nil, fmt.Errorf("error obtaining controller instance: %v", err)
}
if len(activeSandboxes) > 0 {
logrus.Info("There are old running containers, the network config will not take affect")
return controller, nil
}
// Initialize default network on "null"
if n, _ := controller.NetworkByName("none"); n == nil {
if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
}
}
// Initialize default network on "host"
if n, _ := controller.NetworkByName("host"); n == nil {
if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
}
}
// Clear stale bridge network
if n, err := controller.NetworkByName("bridge"); err == nil {
if err = n.Delete(); err != nil {
return nil, fmt.Errorf("could not delete the default bridge network: %v", err)
}
if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled {
removeDefaultBridgeInterface()
}
}
if !config.DisableBridge {
// Initialize default driver "bridge"
// ********************************** NOTICE ********************************** //
if err := initBridgeDriver(controller, config); err != nil {
// ********************************** NOTICE ********************************** //
return nil, err
}
} else {
removeDefaultBridgeInterface()
}
return controller, nil
}
——2.1.1.2.1.1.1) daemon/daemon.go#Daemon.networkOptions
func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionExperimental(dconfig.Experimental))
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
// bridge
dd := runconfig.DefaultDaemonNetworkMode()
// bridge
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
if len(dconfig.NetworkConfig.DefaultAddressPools.Value()) > 0 {
options = append(options, nwconfig.OptionDefaultAddressPoolConfig(dconfig.NetworkConfig.DefaultAddressPools.Value()))
}
if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
}
if pg != nil {
options = append(options, nwconfig.OptionPluginGetter(pg))
}
options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU))
return options, nil
}
——2.1.1.2.1.1.2) libnetwork/controller.go#New
// New creates a new instance of network controller.
func New(cfgOptions ...config.Option) (NetworkController, error) {
c := &controller{
id: stringid.GenerateRandomID(),
cfg: config.ParseConfigOptions(cfgOptions...),
sandboxes: sandboxTable{},
svcRecords: make(map[string]svcInfo),
serviceBindings: make(map[serviceKey]*service),
agentInitDone: make(chan struct{}),
networkLocker: locker.New(),
DiagnosticServer: diagnostic.New(),
}
c.DiagnosticServer.Init()
if err := c.initStores(); err != nil {
return nil, err
}
drvRegistry, err := drvregistry.New(c.getStore(datastore.LocalScope), c.getStore(datastore.GlobalScope), c.RegisterDriver, nil, c.cfg.PluginGetter)
if err != nil {
return nil, err
}
for _, i := range getInitializers(c.cfg.Daemon.Experimental) {
var dcfg map[string]interface{}
// External plugins don't need config passed through daemon. They can
// bootstrap themselves
if i.ntype != "remote" {
dcfg = c.makeDriverConfig(i.ntype)
}
if err := drvRegistry.AddDriver(i.ntype, i.fn, dcfg); err != nil {
return nil, err
}
}
if err = initIPAMDrivers(drvRegistry, nil, c.getStore(datastore.GlobalScope), c.cfg.Daemon.DefaultAddressPool); err != nil {
return nil, err
}
c.drvRegistry = drvRegistry
if c.cfg != nil && c.cfg.Cluster.Watcher != nil {
if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil {
// Failing to initialize discovery is a bad situation to be in.
// But it cannot fail creating the Controller
logrus.Errorf("Failed to Initialize Discovery : %v", err)
}
}
c.WalkNetworks(populateSpecial)
// Reserve pools first before doing cleanup. Otherwise the
// cleanups of endpoint/network and sandbox below will
// generate many unnecessary warnings
c.reservePools()
// Cleanup resources
c.sandboxCleanup(c.cfg.ActiveSandboxes)
c.cleanupLocalEndpoints()
c.networkCleanup()
if err := c.startExternalKeyListener(); err != nil {
return nil, err
}
return c, nil
}
——2.1.1.2.1.1.3) daemon/daemon_unix.go#initBridgeDriver
func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error {
bridgeName := bridge.DefaultBridgeName
if config.BridgeConfig.Iface != "" {
bridgeName = config.BridgeConfig.Iface
}
netOption := map[string]string{
bridge.BridgeName: bridgeName,
bridge.DefaultBridge: strconv.FormatBool(true),
netlabel.DriverMTU: strconv.Itoa(config.Mtu),
bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq),
bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication),
}
// --ip processing
if config.BridgeConfig.DefaultIP != nil {
netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String()
}
var (
ipamV4Conf *libnetwork.IpamConf
ipamV6Conf *libnetwork.IpamConf
)
ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName)
if err != nil {
return errors.Wrap(err, "list bridge addresses failed")
}
nw := nwList[0]
if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return errors.Wrap(err, "parse CIDR failed")
}
// Iterate through in case there are multiple addresses for the bridge
for _, entry := range nwList {
if fCIDR.Contains(entry.IP) {
nw = entry
break
}
}
}
ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String()
hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask)
if hip.IsGlobalUnicast() {
ipamV4Conf.Gateway = nw.IP.String()
}
if config.BridgeConfig.IP != "" {
ipamV4Conf.PreferredPool = config.BridgeConfig.IP
ip, _, err := net.ParseCIDR(config.BridgeConfig.IP)
if err != nil {
return err
}
ipamV4Conf.Gateway = ip.String()
} else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
}
if config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return err
}
ipamV4Conf.SubPool = fCIDR.String()
}
if config.BridgeConfig.DefaultGatewayIPv4 != nil {
ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String()
}
var deferIPv6Alloc bool
if config.BridgeConfig.FixedCIDRv6 != "" {
_, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6)
if err != nil {
return err
}
// In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has
// at least 48 host bits, we need to guarantee the current behavior where the containers'
// IPv6 addresses will be constructed based on the containers' interface MAC address.
// We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints
// on this network until after the driver has created the endpoint and returned the
// constructed address. Libnetwork will then reserve this address with the ipam driver.
ones, _ := fCIDRv6.Mask.Size()
deferIPv6Alloc = ones <= 80
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.PreferredPool = fCIDRv6.String()
// In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6
// address belongs to the same network, we need to inform libnetwork about it, so
// that it can be reserved with IPAM and it will not be given away to somebody else
for _, nw6 := range nw6List {
if fCIDRv6.Contains(nw6.IP) {
ipamV6Conf.Gateway = nw6.IP.String()
break
}
}
}
if config.BridgeConfig.DefaultGatewayIPv6 != nil {
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String()
}
v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
v6Conf := []*libnetwork.IpamConf{}
if ipamV6Conf != nil {
v6Conf = append(v6Conf, ipamV6Conf)
}
// Initialize default network on "bridge" with the same name
// ********************************** NOTICE ********************************** //
_, err = controller.NewNetwork("bridge", "bridge", "",
libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(netOption),
libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc))
// ********************************** NOTICE ********************************** //
if err != nil {
return fmt.Errorf("Error creating default \"bridge\" network: %v", err)
}
return nil
}
// NewNetwork creates a new network of the specified network type. The options
// are network specific and modeled in a generic way.
func (c *controller) NewNetwork(networkType, name string, id string, options ...NetworkOption) (Network, error) {
if id != "" {
c.networkLocker.Lock(id)
defer c.networkLocker.Unlock(id)
if _, err := c.NetworkByID(id); err == nil {
return nil, NetworkNameError(id)
}
}
if !config.IsValidName(name) {
return nil, ErrInvalidName(name)
}
if id == "" {
id = stringid.GenerateRandomID()
}
defaultIpam := defaultIpamForNetworkType(networkType)
// Construct the network object
network := &network{
name: name,
networkType: networkType,
generic: map[string]interface{}{netlabel.GenericData: make(map[string]string)},
ipamType: defaultIpam,
id: id,
created: time.Now(),
ctrlr: c,
persist: true,
drvOnce: &sync.Once{},
loadBalancerMode: loadBalancerModeDefault,
}
network.processOptions(options...)
if err := network.validateConfiguration(); err != nil {
return nil, err
}
var (
cap *driverapi.Capability
err error
)
// Reset network types, force local scope and skip allocation and
// plumbing for configuration networks. Reset of the config-only
// network drivers is needed so that this special network is not
// usable by old engine versions.
if network.configOnly {
network.scope = datastore.LocalScope
network.networkType = "null"
goto addToStore
}
_, cap, err = network.resolveDriver(network.networkType, true)
if err != nil {
return nil, err
}
if network.scope == datastore.LocalScope && cap.DataScope == datastore.GlobalScope {
return nil, types.ForbiddenErrorf("cannot downgrade network scope for %s networks", networkType)
}
if network.ingress && cap.DataScope != datastore.GlobalScope {
return nil, types.ForbiddenErrorf("Ingress network can only be global scope network")
}
// At this point the network scope is still unknown if not set by user
if (cap.DataScope == datastore.GlobalScope || network.scope == datastore.SwarmScope) &&
!c.isDistributedControl() && !network.dynamic {
if c.isManager() {
// For non-distributed controlled environment, globalscoped non-dynamic networks are redirected to Manager
return nil, ManagerRedirectError(name)
}
return nil, types.ForbiddenErrorf("Cannot create a multi-host network from a worker node. Please create the network from a manager node.")
}
if network.scope == datastore.SwarmScope && c.isDistributedControl() {
return nil, types.ForbiddenErrorf("cannot create a swarm scoped network when swarm is not active")
}
// Make sure we have a driver available for this network type
// before we allocate anything.
if _, err := network.driver(true); err != nil {
return nil, err
}
// From this point on, we need the network specific configuration,
// which may come from a configuration-only network
if network.configFrom != "" {
t, err := c.getConfigNetwork(network.configFrom)
if err != nil {
return nil, types.NotFoundErrorf("configuration network %q does not exist", network.configFrom)
}
if err := t.applyConfigurationTo(network); err != nil {
return nil, types.InternalErrorf("Failed to apply configuration: %v", err)
}
defer func() {
if err == nil {
if err := t.getEpCnt().IncEndpointCnt(); err != nil {
logrus.Warnf("Failed to update reference count for configuration network %q on creation of network %q: %v",
t.Name(), network.Name(), err)
}
}
}()
}
err = network.ipamAllocate()
if err != nil {
return nil, err
}
defer func() {
if err != nil {
network.ipamRelease()
}
}()
err = c.addNetwork(network)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
if e := network.deleteNetwork(); e != nil {
logrus.Warnf("couldn't roll back driver network on network %s creation failure: %v", network.name, err)
}
}
}()
// XXX If the driver type is "overlay" check the options for DSR
// being set. If so, set the network's load balancing mode to DSR.
// This should really be done in a network option, but due to
// time pressure to get this in without adding changes to moby,
// swarm and CLI, it is being implemented as a driver-specific
// option. Unfortunately, drivers can't influence the core
// "libnetwork.network" data type. Hence we need this hack code
// to implement in this manner.
if gval, ok := network.generic[netlabel.GenericData]; ok && network.networkType == "overlay" {
optMap := gval.(map[string]string)
if _, ok := optMap[overlayDSROptionString]; ok {
network.loadBalancerMode = loadBalancerModeDSR
}
}
addToStore:
// First store the endpoint count, then the network. To avoid to
// end up with a datastore containing a network and not an epCnt,
// in case of an ungraceful shutdown during this function call.
epCnt := &endpointCnt{n: network}
if err = c.updateToStore(epCnt); err != nil {
return nil, err
}
defer func() {
if err != nil {
if e := c.deleteFromStore(epCnt); e != nil {
logrus.Warnf("could not rollback from store, epCnt %v on failure (%v): %v", epCnt, err, e)
}
}
}()
network.epCnt = epCnt
if err = c.updateToStore(network); err != nil {
return nil, err
}
defer func() {
if err != nil {
if e := c.deleteFromStore(network); e != nil {
logrus.Warnf("could not rollback from store, network %v on failure (%v): %v", network, err, e)
}
}
}()
if network.configOnly {
return network, nil
}
joinCluster(network)
defer func() {
if err != nil {
network.cancelDriverWatches()
if e := network.leaveCluster(); e != nil {
logrus.Warnf("Failed to leave agent cluster on network %s on failure (%v): %v", network.name, err, e)
}
}
}()
if network.hasLoadBalancerEndpoint() {
if err = network.createLoadBalancerSandbox(); err != nil {
return nil, err
}
}
if !c.isDistributedControl() {
c.Lock()
arrangeIngressFilterRule()
c.Unlock()
}
c.arrangeUserFilterRule()
return network, nil
}
2.1.1.3) cmd/dockerd/daemon.go#initRouter(为Server配置路由)
func initRouter(opts routerOptions) {
decoder := runconfig.ContainerDecoder{}
// ********************************** NOTICE ********************************** //
// 配置路由
routers := []router.Router{
// we need to add the checkpoint router before the container router or the DELETE gets masked
checkpointrouter.NewRouter(opts.daemon, decoder),
container.NewRouter(opts.daemon, decoder),
image.NewRouter(opts.daemon.ImageService()),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache, opts.buildkit, opts.features),
volume.NewRouter(opts.daemon.VolumesService()),
build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
sessionrouter.NewRouter(opts.sessionManager),
swarmrouter.NewRouter(opts.cluster),
pluginrouter.NewRouter(opts.daemon.PluginManager()),
distributionrouter.NewRouter(opts.daemon.ImageService()),
}
// ********************************** NOTICE ********************************** //
if opts.daemon.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(opts.daemon, opts.cluster))
}
if opts.daemon.HasExperimental() {
for _, r := range routers {
for _, route := range r.Routes() {
if experimental, ok := route.(router.ExperimentalRoute); ok {
experimental.Enable()
}
}
}
}
// ********************************** NOTICE ********************************** //
opts.api.InitRouter(routers...)
// ********************************** NOTICE ********************************** //
}
2.1.1.3.1) api/server/router/container/container.go#NewRouter
// NewRouter initializes a new container router
func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
r := &containerRouter{
backend: b,
decoder: decoder,
}
r.initRoutes()
return r
}
// initRoutes initializes the routes in container router
func (r *containerRouter) initRoutes() {
r.routes = []router.Route{
// HEAD
router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive),
// GET
router.NewGetRoute("/containers/json", r.getContainersJSON),
router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport),
router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges),
router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName),
router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop),
router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs),
router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats),
router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach),
router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID),
router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive),
// POST
router.NewPostRoute("/containers/create", r.postContainersCreate),
router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill),
router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause),
router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause),
router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart),
router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart),
router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop),
router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12
router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
router.NewPostRoute("/containers/prune", r.postContainersPrune),
router.NewPostRoute("/commit", r.postCommit),
// PUT
router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),
// DELETE
router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers),
}
}
2.1.1.3.2) api/server/server.go#Server.InitRouter
// InitRouter initializes the list of routers for the server.
// This method also enables the Go profiler.
func (s *Server) InitRouter(routers ...router.Router) {
s.routers = append(s.routers, routers...)
// ********************************** NOTICE ********************************** //
m := s.createMux()
// ********************************** NOTICE ********************************** //
s.routerSwapper = &routerSwapper{
router: m,
}
}
// createMux initializes the main router the server uses.
func (s *Server) createMux() *mux.Router {
m := mux.NewRouter()
logrus.Debug("Registering routers")
for _, apiRouter := range s.routers {
for _, r := range apiRouter.Routes() {
// ********************************** NOTICE ********************************** //
f := s.makeHTTPHandler(r.Handler())
logrus.Debugf("Registering %s, %s", r.Method(), r.Path())
// ********************************** NOTICE ********************************** //
m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f)
// ********************************** NOTICE ********************************** //
m.Path(r.Path()).Methods(r.Method()).Handler(f)
}
}
debugRouter := debug.NewRouter()
s.routers = append(s.routers, debugRouter)
for _, r := range debugRouter.Routes() {
f := s.makeHTTPHandler(r.Handler())
m.Path("/debug" + r.Path()).Handler(f)
}
notFoundHandler := httputils.MakeErrorHandler(pageNotFoundError{})
m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
m.NotFoundHandler = notFoundHandler
m.MethodNotAllowedHandler = notFoundHandler
return m
}