cmd/dockerd/docker.go

  1. func main() {
  2. // ********************************** NOTICE ********************************** //
  3. if reexec.Init() {
  4. // ********************************** NOTICE ********************************** //
  5. return
  6. }
  7. // initial log formatting; this setting is updated after the daemon configuration is loaded.
  8. logrus.SetFormatter(&logrus.TextFormatter{
  9. TimestampFormat: jsonmessage.RFC3339NanoFixed,
  10. FullTimestamp: true,
  11. })
  12. // Set terminal emulation based on platform as required.
  13. _, stdout, stderr := term.StdStreams()
  14. // @jhowardmsft - maybe there is a historic reason why on non-Windows, stderr is used
  15. // here. However, on Windows it makes no sense and there is no need.
  16. if runtime.GOOS == "windows" {
  17. logrus.SetOutput(stdout)
  18. } else {
  19. logrus.SetOutput(stderr)
  20. }
  21. onError := func(err error) {
  22. fmt.Fprintf(stderr, "%s\n", err)
  23. os.Exit(1)
  24. }
  25. // ********************************** NOTICE ********************************** //
  26. cmd, err := newDaemonCommand()
  27. // ********************************** NOTICE ********************************** //
  28. if err != nil {
  29. onError(err)
  30. }
  31. cmd.SetOutput(stdout)
  32. if err := cmd.Execute(); err != nil {
  33. onError(err)
  34. }
  35. }

1) pkg/reexec/reexec.go#Init

  1. var registeredInitializers = make(map[string]func())
  2. // Register adds an initialization func under the specified name
  3. func Register(name string, initializer func()) {
  4. if _, exists := registeredInitializers[name]; exists {
  5. panic(fmt.Sprintf("reexec func already registered under name %q", name))
  6. }
  7. registeredInitializers[name] = initializer
  8. }
  9. // Init is called as the first part of the exec process and returns true if an
  10. // initialization function was called.
  11. func Init() bool {
  12. initializer, exists := registeredInitializers[os.Args[0]]
  13. if exists {
  14. initializer()
  15. return true
  16. }
  17. return false
  18. }

2) cmd/dockerd/docker.go#newDaemonCommand

  1. func newDaemonCommand() (*cobra.Command, error) {
  2. opts := newDaemonOptions(config.New())
  3. cmd := &cobra.Command{
  4. Use: "dockerd [OPTIONS]",
  5. Short: "A self-sufficient runtime for containers.",
  6. SilenceUsage: true,
  7. SilenceErrors: true,
  8. Args: cli.NoArgs,
  9. RunE: func(cmd *cobra.Command, args []string) error {
  10. opts.flags = cmd.Flags()
  11. // ********************************** NOTICE ********************************** //
  12. return runDaemon(opts)
  13. // ********************************** NOTICE ********************************** //
  14. },
  15. DisableFlagsInUseLine: true,
  16. Version: fmt.Sprintf("%s, build %s", dockerversion.Version, dockerversion.GitCommit),
  17. }
  18. cli.SetupRootCommand(cmd)
  19. flags := cmd.Flags()
  20. flags.BoolP("version", "v", false, "Print version information and quit")
  21. defaultDaemonConfigFile, err := getDefaultDaemonConfigFile()
  22. if err != nil {
  23. return nil, err
  24. }
  25. flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "Daemon configuration file")
  26. opts.InstallFlags(flags)
  27. if err := installConfigFlags(opts.daemonConfig, flags); err != nil {
  28. return nil, err
  29. }
  30. installServiceFlags(flags)
  31. return cmd, nil
  32. }

2.1) cmd/dockerd/docker_unix.go#runDaemon

  1. func runDaemon(opts *daemonOptions) error {
  2. daemonCli := NewDaemonCli()
  3. return daemonCli.start(opts)
  4. }
  5. // NewDaemonCli returns a daemon CLI
  6. func NewDaemonCli() *DaemonCli {
  7. return &DaemonCli{}
  8. }

2.1.1) cmd/dockerd/daemon.go#DaemonCli.start

  1. func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
  2. stopc := make(chan bool)
  3. defer close(stopc)
  4. // warn from uuid package when running the daemon
  5. uuid.Loggerf = logrus.Warnf
  6. opts.SetDefaultOptions(opts.flags)
  7. if cli.Config, err = loadDaemonCliConfig(opts); err != nil {
  8. return err
  9. }
  10. if err := configureDaemonLogs(cli.Config); err != nil {
  11. return err
  12. }
  13. cli.configFile = &opts.configFile
  14. cli.flags = opts.flags
  15. if cli.Config.Debug {
  16. debug.Enable()
  17. }
  18. if cli.Config.Experimental {
  19. logrus.Warn("Running experimental build")
  20. if cli.Config.IsRootless() {
  21. logrus.Warn("Running in rootless mode. Cgroups, AppArmor, and CRIU are disabled.")
  22. }
  23. } else {
  24. if cli.Config.IsRootless() {
  25. return fmt.Errorf("rootless mode is supported only when running in experimental mode")
  26. }
  27. }
  28. // return human-friendly error before creating files
  29. if runtime.GOOS == "linux" && os.Geteuid() != 0 {
  30. return fmt.Errorf("dockerd needs to be started with root. To see how to run dockerd in rootless mode with unprivileged user, see the documentation")
  31. }
  32. system.InitLCOW(cli.Config.Experimental)
  33. if err := setDefaultUmask(); err != nil {
  34. return err
  35. }
  36. // Create the daemon root before we create ANY other files (PID, or migrate keys)
  37. // to ensure the appropriate ACL is set (particularly relevant on Windows)
  38. // ********************************** NOTICE ********************************** //
  39. if err := daemon.CreateDaemonRoot(cli.Config); err != nil {
  40. // ********************************** NOTICE ********************************** //
  41. return err
  42. }
  43. if err := system.MkdirAll(cli.Config.ExecRoot, 0700, ""); err != nil {
  44. return err
  45. }
  46. potentiallyUnderRuntimeDir := []string{cli.Config.ExecRoot}
  47. if cli.Pidfile != "" {
  48. pf, err := pidfile.New(cli.Pidfile)
  49. if err != nil {
  50. return errors.Wrap(err, "failed to start daemon")
  51. }
  52. potentiallyUnderRuntimeDir = append(potentiallyUnderRuntimeDir, cli.Pidfile)
  53. defer func() {
  54. if err := pf.Remove(); err != nil {
  55. logrus.Error(err)
  56. }
  57. }()
  58. }
  59. // Set sticky bit if XDG_RUNTIME_DIR is set && the file is actually under XDG_RUNTIME_DIR
  60. if _, err := homedir.StickRuntimeDirContents(potentiallyUnderRuntimeDir); err != nil {
  61. // StickRuntimeDirContents returns nil error if XDG_RUNTIME_DIR is just unset
  62. logrus.WithError(err).Warn("cannot set sticky bit on files under XDG_RUNTIME_DIR")
  63. }
  64. // ********************************** NOTICE ********************************** //
  65. serverConfig, err := newAPIServerConfig(cli)
  66. // ********************************** NOTICE ********************************** //
  67. if err != nil {
  68. return errors.Wrap(err, "failed to create API server")
  69. }
  70. // ********************************** NOTICE ********************************** //
  71. cli.api = apiserver.New(serverConfig)
  72. // ********************************** NOTICE ********************************** //
  73. hosts, err := loadListeners(cli, serverConfig)
  74. if err != nil {
  75. return errors.Wrap(err, "failed to load listeners")
  76. }
  77. ctx, cancel := context.WithCancel(context.Background())
  78. if cli.Config.ContainerdAddr == "" && runtime.GOOS != "windows" {
  79. systemContainerdAddr, ok, err := systemContainerdRunning(cli.Config.IsRootless())
  80. if err != nil {
  81. cancel()
  82. return errors.Wrap(err, "could not determine whether the system containerd is running")
  83. }
  84. if !ok {
  85. opts, err := cli.getContainerdDaemonOpts()
  86. if err != nil {
  87. cancel()
  88. return errors.Wrap(err, "failed to generate containerd options")
  89. }
  90. r, err := supervisor.Start(ctx, filepath.Join(cli.Config.Root, "containerd"), filepath.Join(cli.Config.ExecRoot, "containerd"), opts...)
  91. if err != nil {
  92. cancel()
  93. return errors.Wrap(err, "failed to start containerd")
  94. }
  95. cli.Config.ContainerdAddr = r.Address()
  96. // Try to wait for containerd to shutdown
  97. defer r.WaitTimeout(10 * time.Second)
  98. } else {
  99. cli.Config.ContainerdAddr = systemContainerdAddr
  100. }
  101. }
  102. defer cancel()
  103. signal.Trap(func() {
  104. cli.stop()
  105. <-stopc // wait for daemonCli.start() to return
  106. }, logrus.StandardLogger())
  107. // Notify that the API is active, but before daemon is set up.
  108. preNotifySystem()
  109. pluginStore := plugin.NewStore()
  110. // ********************************** NOTICE ********************************** //
  111. if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil {
  112. // ********************************** NOTICE ********************************** //
  113. logrus.Fatalf("Error creating middlewares: %v", err)
  114. }
  115. // ********************************** NOTICE ********************************** //
  116. d, err := daemon.NewDaemon(ctx, cli.Config, pluginStore)
  117. // ********************************** NOTICE ********************************** //
  118. if err != nil {
  119. return errors.Wrap(err, "failed to start daemon")
  120. }
  121. d.StoreHosts(hosts)
  122. // validate after NewDaemon has restored enabled plugins. Don't change order.
  123. if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil {
  124. return errors.Wrap(err, "failed to validate authorization plugin")
  125. }
  126. // TODO: move into startMetricsServer()
  127. if cli.Config.MetricsAddress != "" {
  128. if !d.HasExperimental() {
  129. return errors.Wrap(err, "metrics-addr is only supported when experimental is enabled")
  130. }
  131. if err := startMetricsServer(cli.Config.MetricsAddress); err != nil {
  132. return err
  133. }
  134. }
  135. c, err := createAndStartCluster(cli, d)
  136. if err != nil {
  137. logrus.Fatalf("Error starting cluster component: %v", err)
  138. }
  139. // Restart all autostart containers which has a swarm endpoint
  140. // and is not yet running now that we have successfully
  141. // initialized the cluster.
  142. d.RestartSwarmContainers()
  143. logrus.Info("Daemon has completed initialization")
  144. cli.d = d
  145. routerOptions, err := newRouterOptions(cli.Config, d)
  146. if err != nil {
  147. return err
  148. }
  149. routerOptions.api = cli.api
  150. routerOptions.cluster = c
  151. // ********************************** NOTICE ********************************** //
  152. initRouter(routerOptions)
  153. // ********************************** NOTICE ********************************** //
  154. go d.ProcessClusterNotifications(ctx, c.GetWatchStream())
  155. cli.setupConfigReloadTrap()
  156. // The serve API routine never exits unless an error occurs
  157. // We need to start it as a goroutine and wait on it so
  158. // daemon doesn't exit
  159. serveAPIWait := make(chan error)
  160. go cli.api.Wait(serveAPIWait)
  161. // after the daemon is done setting up we can notify systemd api
  162. notifySystem()
  163. // Daemon is fully initialized and handling API traffic
  164. // Wait for serve API to complete
  165. // ********************************** NOTICE ********************************** //
  166. errAPI := <-serveAPIWait
  167. // ********************************** NOTICE ********************************** //
  168. c.Cleanup()
  169. shutdownDaemon(d)
  170. // Stop notification processing and any background processes
  171. cancel()
  172. if errAPI != nil {
  173. return errors.Wrap(errAPI, "shutting down due to ServeAPI error")
  174. }
  175. return nil
  176. }

2.1.1.1) api/server/server.go#New(返回Server)

  1. // New returns a new instance of the server based on the specified configuration.
  2. // It allocates resources which will be needed for ServeAPI(ports, unix-sockets).
  3. func New(cfg *Config) *Server {
  4. return &Server{
  5. cfg: cfg,
  6. }
  7. }
  8. // Server contains instance details for the server
  9. type Server struct {
  10. cfg *Config
  11. servers []*HTTPServer
  12. routers []router.Router
  13. routerSwapper *routerSwapper
  14. middlewares []middleware.Middleware
  15. }

2.1.1.2) daemon/daemon.go#NewDaemon

  1. // NewDaemon sets up everything for the daemon to be able to service
  2. // requests from the webserver.
  3. func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.Store) (daemon *Daemon, err error) {
  4. setDefaultMtu(config)
  5. registryService, err := registry.NewService(config.ServiceOptions)
  6. if err != nil {
  7. return nil, err
  8. }
  9. // Ensure that we have a correct root key limit for launching containers.
  10. if err := ModifyRootKeyLimit(); err != nil {
  11. logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
  12. }
  13. // Ensure we have compatible and valid configuration options
  14. if err := verifyDaemonSettings(config); err != nil {
  15. return nil, err
  16. }
  17. // Do we have a disabled network?
  18. config.DisableBridge = isBridgeNetworkDisabled(config)
  19. // Setup the resolv.conf
  20. setupResolvConf(config)
  21. // Verify the platform is supported as a daemon
  22. if !platformSupported {
  23. return nil, errSystemNotSupported
  24. }
  25. // Validate platform-specific requirements
  26. if err := checkSystem(); err != nil {
  27. return nil, err
  28. }
  29. idMapping, err := setupRemappedRoot(config)
  30. if err != nil {
  31. return nil, err
  32. }
  33. rootIDs := idMapping.RootPair()
  34. if err := setupDaemonProcess(config); err != nil {
  35. return nil, err
  36. }
  37. // set up the tmpDir to use a canonical path
  38. tmp, err := prepareTempDir(config.Root, rootIDs)
  39. if err != nil {
  40. return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
  41. }
  42. realTmp, err := getRealPath(tmp)
  43. if err != nil {
  44. return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
  45. }
  46. if runtime.GOOS == "windows" {
  47. if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) {
  48. if err := system.MkdirAll(realTmp, 0700, ""); err != nil {
  49. return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err)
  50. }
  51. }
  52. os.Setenv("TEMP", realTmp)
  53. os.Setenv("TMP", realTmp)
  54. } else {
  55. os.Setenv("TMPDIR", realTmp)
  56. }
  57. // ********************************** NOTICE ********************************** //
  58. d := &Daemon{
  59. configStore: config,
  60. PluginStore: pluginStore,
  61. startupDone: make(chan struct{}),
  62. }
  63. // ********************************** NOTICE ********************************** //
  64. // Ensure the daemon is properly shutdown if there is a failure during
  65. // initialization
  66. defer func() {
  67. if err != nil {
  68. if err := d.Shutdown(); err != nil {
  69. logrus.Error(err)
  70. }
  71. }
  72. }()
  73. if err := d.setGenericResources(config); err != nil {
  74. return nil, err
  75. }
  76. // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
  77. // on Windows to dump Go routine stacks
  78. stackDumpDir := config.Root
  79. if execRoot := config.GetExecRoot(); execRoot != "" {
  80. stackDumpDir = execRoot
  81. }
  82. d.setupDumpStackTrap(stackDumpDir)
  83. if err := d.setupSeccompProfile(); err != nil {
  84. return nil, err
  85. }
  86. // Set the default isolation mode (only applicable on Windows)
  87. if err := d.setDefaultIsolation(); err != nil {
  88. return nil, fmt.Errorf("error setting default isolation mode: %v", err)
  89. }
  90. if err := configureMaxThreads(config); err != nil {
  91. logrus.Warnf("Failed to configure golang's threads limit: %v", err)
  92. }
  93. // ensureDefaultAppArmorProfile does nothing if apparmor is disabled
  94. if err := ensureDefaultAppArmorProfile(); err != nil {
  95. logrus.Errorf(err.Error())
  96. }
  97. daemonRepo := filepath.Join(config.Root, "containers")
  98. if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil {
  99. return nil, err
  100. }
  101. // Create the directory where we'll store the runtime scripts (i.e. in
  102. // order to support runtimeArgs)
  103. daemonRuntimes := filepath.Join(config.Root, "runtimes")
  104. if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil {
  105. return nil, err
  106. }
  107. if err := d.loadRuntimes(); err != nil {
  108. return nil, err
  109. }
  110. if runtime.GOOS == "windows" {
  111. if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil {
  112. return nil, err
  113. }
  114. }
  115. // On Windows we don't support the environment variable, or a user supplied graphdriver
  116. // as Windows has no choice in terms of which graphdrivers to use. It's a case of
  117. // running Windows containers on Windows - windowsfilter, running Linux containers on Windows,
  118. // lcow. Unix platforms however run a single graphdriver for all containers, and it can
  119. // be set through an environment variable, a daemon start parameter, or chosen through
  120. // initialization of the layerstore through driver priority order for example.
  121. d.graphDrivers = make(map[string]string)
  122. layerStores := make(map[string]layer.Store)
  123. if runtime.GOOS == "windows" {
  124. d.graphDrivers[runtime.GOOS] = "windowsfilter"
  125. if system.LCOWSupported() {
  126. d.graphDrivers["linux"] = "lcow"
  127. }
  128. } else {
  129. driverName := os.Getenv("DOCKER_DRIVER")
  130. if driverName == "" {
  131. driverName = config.GraphDriver
  132. } else {
  133. logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
  134. }
  135. d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead.
  136. }
  137. d.RegistryService = registryService
  138. logger.RegisterPluginGetter(d.PluginStore)
  139. metricsSockPath, err := d.listenMetricsSock()
  140. if err != nil {
  141. return nil, err
  142. }
  143. registerMetricsPluginCallback(d.PluginStore, metricsSockPath)
  144. gopts := []grpc.DialOption{
  145. grpc.WithInsecure(),
  146. grpc.WithBackoffMaxDelay(3 * time.Second),
  147. grpc.WithDialer(dialer.Dialer),
  148. // TODO(stevvooe): We may need to allow configuration of this on the client.
  149. grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
  150. grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
  151. }
  152. if config.ContainerdAddr != "" {
  153. d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(ContainersNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
  154. if err != nil {
  155. return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
  156. }
  157. }
  158. // ********************************** NOTICE ********************************** //
  159. createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) {
  160. var pluginCli *containerd.Client
  161. // Windows is not currently using containerd, keep the
  162. // client as nil
  163. if config.ContainerdAddr != "" {
  164. pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(pluginexec.PluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
  165. if err != nil {
  166. return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
  167. }
  168. }
  169. return pluginexec.New(ctx, getPluginExecRoot(config.Root), pluginCli, m)
  170. }
  171. // ********************************** NOTICE ********************************** //
  172. // Plugin system initialization should happen before restore. Do not change order.
  173. d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
  174. Root: filepath.Join(config.Root, "plugins"),
  175. ExecRoot: getPluginExecRoot(config.Root),
  176. Store: d.PluginStore,
  177. CreateExecutor: createPluginExec,
  178. RegistryService: registryService,
  179. LiveRestoreEnabled: config.LiveRestoreEnabled,
  180. LogPluginEvent: d.LogPluginEvent, // todo: make private
  181. AuthzMiddleware: config.AuthzMiddleware,
  182. })
  183. if err != nil {
  184. return nil, errors.Wrap(err, "couldn't create plugin manager")
  185. }
  186. if err := d.setupDefaultLogConfig(); err != nil {
  187. return nil, err
  188. }
  189. for operatingSystem, gd := range d.graphDrivers {
  190. layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{
  191. Root: config.Root,
  192. MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
  193. GraphDriver: gd,
  194. GraphDriverOptions: config.GraphOptions,
  195. IDMapping: idMapping,
  196. PluginGetter: d.PluginStore,
  197. ExperimentalEnabled: config.Experimental,
  198. OS: operatingSystem,
  199. })
  200. if err != nil {
  201. return nil, err
  202. }
  203. }
  204. // As layerstore initialization may set the driver
  205. for os := range d.graphDrivers {
  206. d.graphDrivers[os] = layerStores[os].DriverName()
  207. }
  208. // Configure and validate the kernels security support. Note this is a Linux/FreeBSD
  209. // operation only, so it is safe to pass *just* the runtime OS graphdriver.
  210. if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil {
  211. return nil, err
  212. }
  213. imageRoot := filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS])
  214. ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
  215. if err != nil {
  216. return nil, err
  217. }
  218. lgrMap := make(map[string]image.LayerGetReleaser)
  219. for os, ls := range layerStores {
  220. lgrMap[os] = ls
  221. }
  222. // ********************************** NOTICE ********************************** //
  223. imageStore, err := image.NewImageStore(ifs, lgrMap)
  224. // ********************************** NOTICE ********************************** //
  225. if err != nil {
  226. return nil, err
  227. }
  228. // ********************************** NOTICE ********************************** //
  229. d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
  230. // ********************************** NOTICE ********************************** //
  231. if err != nil {
  232. return nil, err
  233. }
  234. trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath)
  235. if err != nil {
  236. return nil, err
  237. }
  238. trustDir := filepath.Join(config.Root, "trust")
  239. if err := system.MkdirAll(trustDir, 0700, ""); err != nil {
  240. return nil, err
  241. }
  242. // We have a single tag/reference store for the daemon globally. However, it's
  243. // stored under the graphdriver. On host platforms which only support a single
  244. // container OS, but multiple selectable graphdrivers, this means depending on which
  245. // graphdriver is chosen, the global reference store is under there. For
  246. // platforms which support multiple container operating systems, this is slightly
  247. // more problematic as where does the global ref store get located? Fortunately,
  248. // for Windows, which is currently the only daemon supporting multiple container
  249. // operating systems, the list of graphdrivers available isn't user configurable.
  250. // For backwards compatibility, we just put it under the windowsfilter
  251. // directory regardless.
  252. refStoreLocation := filepath.Join(imageRoot, `repositories.json`)
  253. rs, err := refstore.NewReferenceStore(refStoreLocation)
  254. if err != nil {
  255. return nil, fmt.Errorf("Couldn't create reference store repository: %s", err)
  256. }
  257. distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
  258. if err != nil {
  259. return nil, err
  260. }
  261. // Discovery is only enabled when the daemon is launched with an address to advertise. When
  262. // initialized, the daemon is registered and we can store the discovery backend as it's read-only
  263. if err := d.initDiscovery(config); err != nil {
  264. return nil, err
  265. }
  266. sysInfo := sysinfo.New(false)
  267. // Check if Devices cgroup is mounted, it is hard requirement for container security,
  268. // on Linux.
  269. if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
  270. return nil, errors.New("Devices cgroup isn't mounted")
  271. }
  272. d.ID = trustKey.PublicKey().KeyID()
  273. d.repository = daemonRepo
  274. d.containers = container.NewMemoryStore()
  275. if d.containersReplica, err = container.NewViewDB(); err != nil {
  276. return nil, err
  277. }
  278. d.execCommands = exec.NewStore()
  279. d.idIndex = truncindex.NewTruncIndex([]string{})
  280. d.statsCollector = d.newStatsCollector(1 * time.Second)
  281. d.EventsService = events.New()
  282. d.root = config.Root
  283. d.idMapping = idMapping
  284. d.seccompEnabled = sysInfo.Seccomp
  285. d.apparmorEnabled = sysInfo.AppArmor
  286. d.linkIndex = newLinkIndex()
  287. // TODO: imageStore, distributionMetadataStore, and ReferenceStore are only
  288. // used above to run migration. They could be initialized in ImageService
  289. // if migration is called from daemon/images. layerStore might move as well.
  290. // ********************************** NOTICE ********************************** //
  291. d.imageService = images.NewImageService(images.ImageServiceConfig{
  292. ContainerStore: d.containers,
  293. DistributionMetadataStore: distributionMetadataStore,
  294. EventsService: d.EventsService,
  295. ImageStore: imageStore,
  296. LayerStores: layerStores,
  297. MaxConcurrentDownloads: *config.MaxConcurrentDownloads,
  298. MaxConcurrentUploads: *config.MaxConcurrentUploads,
  299. ReferenceStore: rs,
  300. RegistryService: registryService,
  301. TrustKey: trustKey,
  302. })
  303. // ********************************** NOTICE ********************************** //
  304. go d.execCommandGC()
  305. // ********************************** NOTICE ********************************** //
  306. d.containerd, err = libcontainerd.NewClient(ctx, d.containerdCli, filepath.Join(config.ExecRoot, "containerd"), ContainersNamespace, d)
  307. // ********************************** NOTICE ********************************** //
  308. if err != nil {
  309. return nil, err
  310. }
  311. // ********************************** NOTICE ********************************** //
  312. if err := d.restore(); err != nil {
  313. // ********************************** NOTICE ********************************** //
  314. return nil, err
  315. }
  316. close(d.startupDone)
  317. // FIXME: this method never returns an error
  318. info, _ := d.SystemInfo()
  319. engineInfo.WithValues(
  320. dockerversion.Version,
  321. dockerversion.GitCommit,
  322. info.Architecture,
  323. info.Driver,
  324. info.KernelVersion,
  325. info.OperatingSystem,
  326. info.OSType,
  327. info.ID,
  328. ).Set(1)
  329. engineCpus.Set(float64(info.NCPU))
  330. engineMemory.Set(float64(info.MemTotal))
  331. gd := ""
  332. for os, driver := range d.graphDrivers {
  333. if len(gd) > 0 {
  334. gd += ", "
  335. }
  336. gd += driver
  337. if len(d.graphDrivers) > 1 {
  338. gd = fmt.Sprintf("%s (%s)", gd, os)
  339. }
  340. }
  341. logrus.WithFields(logrus.Fields{
  342. "version": dockerversion.Version,
  343. "commit": dockerversion.GitCommit,
  344. "graphdriver(s)": gd,
  345. }).Info("Docker daemon")
  346. return d, nil
  347. }

2.1.1.2.1) daemon/daemon.go#Daemon.restore

  1. func (daemon *Daemon) restore() error {
  2. var mapLock sync.Mutex
  3. containers := make(map[string]*container.Container)
  4. logrus.Info("Loading containers: start.")
  5. // ********************************** NOTICE ********************************** //
  6. dir, err := ioutil.ReadDir(daemon.repository)
  7. // ********************************** NOTICE ********************************** //
  8. if err != nil {
  9. return err
  10. }
  11. // parallelLimit is the maximum number of parallel startup jobs that we
  12. // allow (this is the limited used for all startup semaphores). The multipler
  13. // (128) was chosen after some fairly significant benchmarking -- don't change
  14. // it unless you've tested it significantly (this value is adjusted if
  15. // RLIMIT_NOFILE is small to avoid EMFILE).
  16. parallelLimit := adjustParallelLimit(len(dir), 128*runtime.NumCPU())
  17. // Re-used for all parallel startup jobs.
  18. var group sync.WaitGroup
  19. sem := semaphore.NewWeighted(int64(parallelLimit))
  20. for _, v := range dir {
  21. group.Add(1)
  22. go func(id string) {
  23. defer group.Done()
  24. _ = sem.Acquire(context.Background(), 1)
  25. defer sem.Release(1)
  26. // ********************************** NOTICE ********************************** //
  27. container, err := daemon.load(id)
  28. // ********************************** NOTICE ********************************** //
  29. if err != nil {
  30. logrus.Errorf("Failed to load container %v: %v", id, err)
  31. return
  32. }
  33. if !system.IsOSSupported(container.OS) {
  34. logrus.Errorf("Failed to load container %v: %s (%q)", id, system.ErrNotSupportedOperatingSystem, container.OS)
  35. return
  36. }
  37. // Ignore the container if it does not support the current driver being used by the graph
  38. currentDriverForContainerOS := daemon.graphDrivers[container.OS]
  39. if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS {
  40. rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS)
  41. if err != nil {
  42. logrus.Errorf("Failed to load container mount %v: %v", id, err)
  43. return
  44. }
  45. container.RWLayer = rwlayer
  46. logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning())
  47. mapLock.Lock()
  48. containers[container.ID] = container
  49. mapLock.Unlock()
  50. } else {
  51. logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
  52. }
  53. }(v.Name())
  54. }
  55. group.Wait()
  56. removeContainers := make(map[string]*container.Container)
  57. restartContainers := make(map[*container.Container]chan struct{})
  58. activeSandboxes := make(map[string]interface{})
  59. for _, c := range containers {
  60. group.Add(1)
  61. go func(c *container.Container) {
  62. defer group.Done()
  63. _ = sem.Acquire(context.Background(), 1)
  64. defer sem.Release(1)
  65. if err := daemon.registerName(c); err != nil {
  66. logrus.Errorf("Failed to register container name %s: %s", c.ID, err)
  67. mapLock.Lock()
  68. delete(containers, c.ID)
  69. mapLock.Unlock()
  70. return
  71. }
  72. if err := daemon.Register(c); err != nil {
  73. logrus.Errorf("Failed to register container %s: %s", c.ID, err)
  74. mapLock.Lock()
  75. delete(containers, c.ID)
  76. mapLock.Unlock()
  77. return
  78. }
  79. // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
  80. // We should rewrite it to use the daemon defaults.
  81. // Fixes https://github.com/docker/docker/issues/22536
  82. if c.HostConfig.LogConfig.Type == "" {
  83. if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
  84. logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
  85. }
  86. }
  87. }(c)
  88. }
  89. group.Wait()
  90. for _, c := range containers {
  91. group.Add(1)
  92. go func(c *container.Container) {
  93. defer group.Done()
  94. _ = sem.Acquire(context.Background(), 1)
  95. defer sem.Release(1)
  96. daemon.backportMountSpec(c)
  97. if err := daemon.checkpointAndSave(c); err != nil {
  98. logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk")
  99. }
  100. daemon.setStateCounter(c)
  101. logrus.WithFields(logrus.Fields{
  102. "container": c.ID,
  103. "running": c.IsRunning(),
  104. "paused": c.IsPaused(),
  105. }).Debug("restoring container")
  106. var (
  107. err error
  108. alive bool
  109. ec uint32
  110. exitedAt time.Time
  111. )
  112. alive, _, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio)
  113. if err != nil && !errdefs.IsNotFound(err) {
  114. logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err)
  115. return
  116. }
  117. if !alive {
  118. ec, exitedAt, err = daemon.containerd.DeleteTask(context.Background(), c.ID)
  119. if err != nil && !errdefs.IsNotFound(err) {
  120. logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
  121. return
  122. }
  123. } else if !daemon.configStore.LiveRestoreEnabled {
  124. if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) {
  125. logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container")
  126. return
  127. }
  128. }
  129. if c.IsRunning() || c.IsPaused() {
  130. c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
  131. if c.IsPaused() && alive {
  132. s, err := daemon.containerd.Status(context.Background(), c.ID)
  133. if err != nil {
  134. logrus.WithError(err).WithField("container", c.ID).
  135. Errorf("Failed to get container status")
  136. } else {
  137. logrus.WithField("container", c.ID).WithField("state", s).
  138. Info("restored container paused")
  139. switch s {
  140. case libcontainerd.StatusPaused, libcontainerd.StatusPausing:
  141. // nothing to do
  142. case libcontainerd.StatusStopped:
  143. alive = false
  144. case libcontainerd.StatusUnknown:
  145. logrus.WithField("container", c.ID).
  146. Error("Unknown status for container during restore")
  147. default:
  148. // running
  149. c.Lock()
  150. c.Paused = false
  151. daemon.setStateCounter(c)
  152. if err := c.CheckpointTo(daemon.containersReplica); err != nil {
  153. logrus.WithError(err).WithField("container", c.ID).
  154. Error("Failed to update stopped container state")
  155. }
  156. c.Unlock()
  157. }
  158. }
  159. }
  160. if !alive {
  161. c.Lock()
  162. c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt})
  163. daemon.Cleanup(c)
  164. if err := c.CheckpointTo(daemon.containersReplica); err != nil {
  165. logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err)
  166. }
  167. c.Unlock()
  168. }
  169. // we call Mount and then Unmount to get BaseFs of the container
  170. if err := daemon.Mount(c); err != nil {
  171. // The mount is unlikely to fail. However, in case mount fails
  172. // the container should be allowed to restore here. Some functionalities
  173. // (like docker exec -u user) might be missing but container is able to be
  174. // stopped/restarted/removed.
  175. // See #29365 for related information.
  176. // The error is only logged here.
  177. logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err)
  178. } else {
  179. if err := daemon.Unmount(c); err != nil {
  180. logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err)
  181. }
  182. }
  183. c.ResetRestartManager(false)
  184. if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
  185. options, err := daemon.buildSandboxOptions(c)
  186. if err != nil {
  187. logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
  188. }
  189. mapLock.Lock()
  190. activeSandboxes[c.NetworkSettings.SandboxID] = options
  191. mapLock.Unlock()
  192. }
  193. }
  194. // get list of containers we need to restart
  195. // Do not autostart containers which
  196. // has endpoints in a swarm scope
  197. // network yet since the cluster is
  198. // not initialized yet. We will start
  199. // it after the cluster is
  200. // initialized.
  201. if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
  202. mapLock.Lock()
  203. restartContainers[c] = make(chan struct{})
  204. mapLock.Unlock()
  205. } else if c.HostConfig != nil && c.HostConfig.AutoRemove {
  206. mapLock.Lock()
  207. removeContainers[c.ID] = c
  208. mapLock.Unlock()
  209. }
  210. c.Lock()
  211. if c.RemovalInProgress {
  212. // We probably crashed in the middle of a removal, reset
  213. // the flag.
  214. //
  215. // We DO NOT remove the container here as we do not
  216. // know if the user had requested for either the
  217. // associated volumes, network links or both to also
  218. // be removed. So we put the container in the "dead"
  219. // state and leave further processing up to them.
  220. logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
  221. c.RemovalInProgress = false
  222. c.Dead = true
  223. if err := c.CheckpointTo(daemon.containersReplica); err != nil {
  224. logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err)
  225. }
  226. }
  227. c.Unlock()
  228. }(c)
  229. }
  230. group.Wait()
  231. // ********************************** NOTICE ********************************** //
  232. daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
  233. // ********************************** NOTICE ********************************** //
  234. if err != nil {
  235. return fmt.Errorf("Error initializing network controller: %v", err)
  236. }
  237. // Now that all the containers are registered, register the links
  238. for _, c := range containers {
  239. group.Add(1)
  240. go func(c *container.Container) {
  241. _ = sem.Acquire(context.Background(), 1)
  242. if err := daemon.registerLinks(c, c.HostConfig); err != nil {
  243. logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
  244. }
  245. sem.Release(1)
  246. group.Done()
  247. }(c)
  248. }
  249. group.Wait()
  250. for c, notifier := range restartContainers {
  251. group.Add(1)
  252. go func(c *container.Container, chNotify chan struct{}) {
  253. _ = sem.Acquire(context.Background(), 1)
  254. logrus.Debugf("Starting container %s", c.ID)
  255. // ignore errors here as this is a best effort to wait for children to be
  256. // running before we try to start the container
  257. children := daemon.children(c)
  258. timeout := time.After(5 * time.Second)
  259. for _, child := range children {
  260. if notifier, exists := restartContainers[child]; exists {
  261. select {
  262. case <-notifier:
  263. case <-timeout:
  264. }
  265. }
  266. }
  267. // Make sure networks are available before starting
  268. daemon.waitForNetworks(c)
  269. if err := daemon.containerStart(c, "", "", true); err != nil {
  270. logrus.Errorf("Failed to start container %s: %s", c.ID, err)
  271. }
  272. close(chNotify)
  273. sem.Release(1)
  274. group.Done()
  275. }(c, notifier)
  276. }
  277. group.Wait()
  278. for id := range removeContainers {
  279. group.Add(1)
  280. go func(cid string) {
  281. _ = sem.Acquire(context.Background(), 1)
  282. if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
  283. logrus.Errorf("Failed to remove container %s: %s", cid, err)
  284. }
  285. sem.Release(1)
  286. group.Done()
  287. }(id)
  288. }
  289. group.Wait()
  290. // any containers that were started above would already have had this done,
  291. // however we need to now prepare the mountpoints for the rest of the containers as well.
  292. // This shouldn't cause any issue running on the containers that already had this run.
  293. // This must be run after any containers with a restart policy so that containerized plugins
  294. // can have a chance to be running before we try to initialize them.
  295. for _, c := range containers {
  296. // if the container has restart policy, do not
  297. // prepare the mountpoints since it has been done on restarting.
  298. // This is to speed up the daemon start when a restart container
  299. // has a volume and the volume driver is not available.
  300. if _, ok := restartContainers[c]; ok {
  301. continue
  302. } else if _, ok := removeContainers[c.ID]; ok {
  303. // container is automatically removed, skip it.
  304. continue
  305. }
  306. group.Add(1)
  307. go func(c *container.Container) {
  308. _ = sem.Acquire(context.Background(), 1)
  309. if err := daemon.prepareMountPoints(c); err != nil {
  310. logrus.Error(err)
  311. }
  312. sem.Release(1)
  313. group.Done()
  314. }(c)
  315. }
  316. group.Wait()
  317. logrus.Info("Loading containers: done.")
  318. return nil
  319. }

—2.1.1.2.1.1) daemon/daemon_unix.go#Daemon.initNetworkController

  1. func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) {
  2. // ********************************** NOTICE ********************************** //
  3. netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes)
  4. // ********************************** NOTICE ********************************** //
  5. if err != nil {
  6. return nil, err
  7. }
  8. // ********************************** NOTICE ********************************** //
  9. controller, err := libnetwork.New(netOptions...)
  10. // ********************************** NOTICE ********************************** //
  11. if err != nil {
  12. return nil, fmt.Errorf("error obtaining controller instance: %v", err)
  13. }
  14. if len(activeSandboxes) > 0 {
  15. logrus.Info("There are old running containers, the network config will not take affect")
  16. return controller, nil
  17. }
  18. // Initialize default network on "null"
  19. if n, _ := controller.NetworkByName("none"); n == nil {
  20. if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil {
  21. return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
  22. }
  23. }
  24. // Initialize default network on "host"
  25. if n, _ := controller.NetworkByName("host"); n == nil {
  26. if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil {
  27. return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
  28. }
  29. }
  30. // Clear stale bridge network
  31. if n, err := controller.NetworkByName("bridge"); err == nil {
  32. if err = n.Delete(); err != nil {
  33. return nil, fmt.Errorf("could not delete the default bridge network: %v", err)
  34. }
  35. if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled {
  36. removeDefaultBridgeInterface()
  37. }
  38. }
  39. if !config.DisableBridge {
  40. // Initialize default driver "bridge"
  41. // ********************************** NOTICE ********************************** //
  42. if err := initBridgeDriver(controller, config); err != nil {
  43. // ********************************** NOTICE ********************************** //
  44. return nil, err
  45. }
  46. } else {
  47. removeDefaultBridgeInterface()
  48. }
  49. return controller, nil
  50. }

——2.1.1.2.1.1.1) daemon/daemon.go#Daemon.networkOptions

  1. func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
  2. options := []nwconfig.Option{}
  3. if dconfig == nil {
  4. return options, nil
  5. }
  6. options = append(options, nwconfig.OptionExperimental(dconfig.Experimental))
  7. options = append(options, nwconfig.OptionDataDir(dconfig.Root))
  8. options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
  9. // bridge
  10. dd := runconfig.DefaultDaemonNetworkMode()
  11. // bridge
  12. dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
  13. options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
  14. options = append(options, nwconfig.OptionDefaultNetwork(dn))
  15. if strings.TrimSpace(dconfig.ClusterStore) != "" {
  16. kv := strings.Split(dconfig.ClusterStore, "://")
  17. if len(kv) != 2 {
  18. return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
  19. }
  20. options = append(options, nwconfig.OptionKVProvider(kv[0]))
  21. options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
  22. }
  23. if len(dconfig.ClusterOpts) > 0 {
  24. options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
  25. }
  26. if daemon.discoveryWatcher != nil {
  27. options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
  28. }
  29. if dconfig.ClusterAdvertise != "" {
  30. options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
  31. }
  32. options = append(options, nwconfig.OptionLabels(dconfig.Labels))
  33. options = append(options, driverOptions(dconfig)...)
  34. if len(dconfig.NetworkConfig.DefaultAddressPools.Value()) > 0 {
  35. options = append(options, nwconfig.OptionDefaultAddressPoolConfig(dconfig.NetworkConfig.DefaultAddressPools.Value()))
  36. }
  37. if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
  38. options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
  39. }
  40. if pg != nil {
  41. options = append(options, nwconfig.OptionPluginGetter(pg))
  42. }
  43. options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU))
  44. return options, nil
  45. }

——2.1.1.2.1.1.2) libnetwork/controller.go#New

  1. // New creates a new instance of network controller.
  2. func New(cfgOptions ...config.Option) (NetworkController, error) {
  3. c := &controller{
  4. id: stringid.GenerateRandomID(),
  5. cfg: config.ParseConfigOptions(cfgOptions...),
  6. sandboxes: sandboxTable{},
  7. svcRecords: make(map[string]svcInfo),
  8. serviceBindings: make(map[serviceKey]*service),
  9. agentInitDone: make(chan struct{}),
  10. networkLocker: locker.New(),
  11. DiagnosticServer: diagnostic.New(),
  12. }
  13. c.DiagnosticServer.Init()
  14. if err := c.initStores(); err != nil {
  15. return nil, err
  16. }
  17. drvRegistry, err := drvregistry.New(c.getStore(datastore.LocalScope), c.getStore(datastore.GlobalScope), c.RegisterDriver, nil, c.cfg.PluginGetter)
  18. if err != nil {
  19. return nil, err
  20. }
  21. for _, i := range getInitializers(c.cfg.Daemon.Experimental) {
  22. var dcfg map[string]interface{}
  23. // External plugins don't need config passed through daemon. They can
  24. // bootstrap themselves
  25. if i.ntype != "remote" {
  26. dcfg = c.makeDriverConfig(i.ntype)
  27. }
  28. if err := drvRegistry.AddDriver(i.ntype, i.fn, dcfg); err != nil {
  29. return nil, err
  30. }
  31. }
  32. if err = initIPAMDrivers(drvRegistry, nil, c.getStore(datastore.GlobalScope), c.cfg.Daemon.DefaultAddressPool); err != nil {
  33. return nil, err
  34. }
  35. c.drvRegistry = drvRegistry
  36. if c.cfg != nil && c.cfg.Cluster.Watcher != nil {
  37. if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil {
  38. // Failing to initialize discovery is a bad situation to be in.
  39. // But it cannot fail creating the Controller
  40. logrus.Errorf("Failed to Initialize Discovery : %v", err)
  41. }
  42. }
  43. c.WalkNetworks(populateSpecial)
  44. // Reserve pools first before doing cleanup. Otherwise the
  45. // cleanups of endpoint/network and sandbox below will
  46. // generate many unnecessary warnings
  47. c.reservePools()
  48. // Cleanup resources
  49. c.sandboxCleanup(c.cfg.ActiveSandboxes)
  50. c.cleanupLocalEndpoints()
  51. c.networkCleanup()
  52. if err := c.startExternalKeyListener(); err != nil {
  53. return nil, err
  54. }
  55. return c, nil
  56. }

——2.1.1.2.1.1.3) daemon/daemon_unix.go#initBridgeDriver

  1. func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error {
  2. bridgeName := bridge.DefaultBridgeName
  3. if config.BridgeConfig.Iface != "" {
  4. bridgeName = config.BridgeConfig.Iface
  5. }
  6. netOption := map[string]string{
  7. bridge.BridgeName: bridgeName,
  8. bridge.DefaultBridge: strconv.FormatBool(true),
  9. netlabel.DriverMTU: strconv.Itoa(config.Mtu),
  10. bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq),
  11. bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication),
  12. }
  13. // --ip processing
  14. if config.BridgeConfig.DefaultIP != nil {
  15. netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String()
  16. }
  17. var (
  18. ipamV4Conf *libnetwork.IpamConf
  19. ipamV6Conf *libnetwork.IpamConf
  20. )
  21. ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
  22. nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName)
  23. if err != nil {
  24. return errors.Wrap(err, "list bridge addresses failed")
  25. }
  26. nw := nwList[0]
  27. if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" {
  28. _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
  29. if err != nil {
  30. return errors.Wrap(err, "parse CIDR failed")
  31. }
  32. // Iterate through in case there are multiple addresses for the bridge
  33. for _, entry := range nwList {
  34. if fCIDR.Contains(entry.IP) {
  35. nw = entry
  36. break
  37. }
  38. }
  39. }
  40. ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String()
  41. hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask)
  42. if hip.IsGlobalUnicast() {
  43. ipamV4Conf.Gateway = nw.IP.String()
  44. }
  45. if config.BridgeConfig.IP != "" {
  46. ipamV4Conf.PreferredPool = config.BridgeConfig.IP
  47. ip, _, err := net.ParseCIDR(config.BridgeConfig.IP)
  48. if err != nil {
  49. return err
  50. }
  51. ipamV4Conf.Gateway = ip.String()
  52. } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
  53. logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
  54. }
  55. if config.BridgeConfig.FixedCIDR != "" {
  56. _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
  57. if err != nil {
  58. return err
  59. }
  60. ipamV4Conf.SubPool = fCIDR.String()
  61. }
  62. if config.BridgeConfig.DefaultGatewayIPv4 != nil {
  63. ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String()
  64. }
  65. var deferIPv6Alloc bool
  66. if config.BridgeConfig.FixedCIDRv6 != "" {
  67. _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6)
  68. if err != nil {
  69. return err
  70. }
  71. // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has
  72. // at least 48 host bits, we need to guarantee the current behavior where the containers'
  73. // IPv6 addresses will be constructed based on the containers' interface MAC address.
  74. // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints
  75. // on this network until after the driver has created the endpoint and returned the
  76. // constructed address. Libnetwork will then reserve this address with the ipam driver.
  77. ones, _ := fCIDRv6.Mask.Size()
  78. deferIPv6Alloc = ones <= 80
  79. if ipamV6Conf == nil {
  80. ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
  81. }
  82. ipamV6Conf.PreferredPool = fCIDRv6.String()
  83. // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6
  84. // address belongs to the same network, we need to inform libnetwork about it, so
  85. // that it can be reserved with IPAM and it will not be given away to somebody else
  86. for _, nw6 := range nw6List {
  87. if fCIDRv6.Contains(nw6.IP) {
  88. ipamV6Conf.Gateway = nw6.IP.String()
  89. break
  90. }
  91. }
  92. }
  93. if config.BridgeConfig.DefaultGatewayIPv6 != nil {
  94. if ipamV6Conf == nil {
  95. ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
  96. }
  97. ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String()
  98. }
  99. v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
  100. v6Conf := []*libnetwork.IpamConf{}
  101. if ipamV6Conf != nil {
  102. v6Conf = append(v6Conf, ipamV6Conf)
  103. }
  104. // Initialize default network on "bridge" with the same name
  105. // ********************************** NOTICE ********************************** //
  106. _, err = controller.NewNetwork("bridge", "bridge", "",
  107. libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6),
  108. libnetwork.NetworkOptionDriverOpts(netOption),
  109. libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
  110. libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc))
  111. // ********************************** NOTICE ********************************** //
  112. if err != nil {
  113. return fmt.Errorf("Error creating default \"bridge\" network: %v", err)
  114. }
  115. return nil
  116. }
  1. // NewNetwork creates a new network of the specified network type. The options
  2. // are network specific and modeled in a generic way.
  3. func (c *controller) NewNetwork(networkType, name string, id string, options ...NetworkOption) (Network, error) {
  4. if id != "" {
  5. c.networkLocker.Lock(id)
  6. defer c.networkLocker.Unlock(id)
  7. if _, err := c.NetworkByID(id); err == nil {
  8. return nil, NetworkNameError(id)
  9. }
  10. }
  11. if !config.IsValidName(name) {
  12. return nil, ErrInvalidName(name)
  13. }
  14. if id == "" {
  15. id = stringid.GenerateRandomID()
  16. }
  17. defaultIpam := defaultIpamForNetworkType(networkType)
  18. // Construct the network object
  19. network := &network{
  20. name: name,
  21. networkType: networkType,
  22. generic: map[string]interface{}{netlabel.GenericData: make(map[string]string)},
  23. ipamType: defaultIpam,
  24. id: id,
  25. created: time.Now(),
  26. ctrlr: c,
  27. persist: true,
  28. drvOnce: &sync.Once{},
  29. loadBalancerMode: loadBalancerModeDefault,
  30. }
  31. network.processOptions(options...)
  32. if err := network.validateConfiguration(); err != nil {
  33. return nil, err
  34. }
  35. var (
  36. cap *driverapi.Capability
  37. err error
  38. )
  39. // Reset network types, force local scope and skip allocation and
  40. // plumbing for configuration networks. Reset of the config-only
  41. // network drivers is needed so that this special network is not
  42. // usable by old engine versions.
  43. if network.configOnly {
  44. network.scope = datastore.LocalScope
  45. network.networkType = "null"
  46. goto addToStore
  47. }
  48. _, cap, err = network.resolveDriver(network.networkType, true)
  49. if err != nil {
  50. return nil, err
  51. }
  52. if network.scope == datastore.LocalScope && cap.DataScope == datastore.GlobalScope {
  53. return nil, types.ForbiddenErrorf("cannot downgrade network scope for %s networks", networkType)
  54. }
  55. if network.ingress && cap.DataScope != datastore.GlobalScope {
  56. return nil, types.ForbiddenErrorf("Ingress network can only be global scope network")
  57. }
  58. // At this point the network scope is still unknown if not set by user
  59. if (cap.DataScope == datastore.GlobalScope || network.scope == datastore.SwarmScope) &&
  60. !c.isDistributedControl() && !network.dynamic {
  61. if c.isManager() {
  62. // For non-distributed controlled environment, globalscoped non-dynamic networks are redirected to Manager
  63. return nil, ManagerRedirectError(name)
  64. }
  65. return nil, types.ForbiddenErrorf("Cannot create a multi-host network from a worker node. Please create the network from a manager node.")
  66. }
  67. if network.scope == datastore.SwarmScope && c.isDistributedControl() {
  68. return nil, types.ForbiddenErrorf("cannot create a swarm scoped network when swarm is not active")
  69. }
  70. // Make sure we have a driver available for this network type
  71. // before we allocate anything.
  72. if _, err := network.driver(true); err != nil {
  73. return nil, err
  74. }
  75. // From this point on, we need the network specific configuration,
  76. // which may come from a configuration-only network
  77. if network.configFrom != "" {
  78. t, err := c.getConfigNetwork(network.configFrom)
  79. if err != nil {
  80. return nil, types.NotFoundErrorf("configuration network %q does not exist", network.configFrom)
  81. }
  82. if err := t.applyConfigurationTo(network); err != nil {
  83. return nil, types.InternalErrorf("Failed to apply configuration: %v", err)
  84. }
  85. defer func() {
  86. if err == nil {
  87. if err := t.getEpCnt().IncEndpointCnt(); err != nil {
  88. logrus.Warnf("Failed to update reference count for configuration network %q on creation of network %q: %v",
  89. t.Name(), network.Name(), err)
  90. }
  91. }
  92. }()
  93. }
  94. err = network.ipamAllocate()
  95. if err != nil {
  96. return nil, err
  97. }
  98. defer func() {
  99. if err != nil {
  100. network.ipamRelease()
  101. }
  102. }()
  103. err = c.addNetwork(network)
  104. if err != nil {
  105. return nil, err
  106. }
  107. defer func() {
  108. if err != nil {
  109. if e := network.deleteNetwork(); e != nil {
  110. logrus.Warnf("couldn't roll back driver network on network %s creation failure: %v", network.name, err)
  111. }
  112. }
  113. }()
  114. // XXX If the driver type is "overlay" check the options for DSR
  115. // being set. If so, set the network's load balancing mode to DSR.
  116. // This should really be done in a network option, but due to
  117. // time pressure to get this in without adding changes to moby,
  118. // swarm and CLI, it is being implemented as a driver-specific
  119. // option. Unfortunately, drivers can't influence the core
  120. // "libnetwork.network" data type. Hence we need this hack code
  121. // to implement in this manner.
  122. if gval, ok := network.generic[netlabel.GenericData]; ok && network.networkType == "overlay" {
  123. optMap := gval.(map[string]string)
  124. if _, ok := optMap[overlayDSROptionString]; ok {
  125. network.loadBalancerMode = loadBalancerModeDSR
  126. }
  127. }
  128. addToStore:
  129. // First store the endpoint count, then the network. To avoid to
  130. // end up with a datastore containing a network and not an epCnt,
  131. // in case of an ungraceful shutdown during this function call.
  132. epCnt := &endpointCnt{n: network}
  133. if err = c.updateToStore(epCnt); err != nil {
  134. return nil, err
  135. }
  136. defer func() {
  137. if err != nil {
  138. if e := c.deleteFromStore(epCnt); e != nil {
  139. logrus.Warnf("could not rollback from store, epCnt %v on failure (%v): %v", epCnt, err, e)
  140. }
  141. }
  142. }()
  143. network.epCnt = epCnt
  144. if err = c.updateToStore(network); err != nil {
  145. return nil, err
  146. }
  147. defer func() {
  148. if err != nil {
  149. if e := c.deleteFromStore(network); e != nil {
  150. logrus.Warnf("could not rollback from store, network %v on failure (%v): %v", network, err, e)
  151. }
  152. }
  153. }()
  154. if network.configOnly {
  155. return network, nil
  156. }
  157. joinCluster(network)
  158. defer func() {
  159. if err != nil {
  160. network.cancelDriverWatches()
  161. if e := network.leaveCluster(); e != nil {
  162. logrus.Warnf("Failed to leave agent cluster on network %s on failure (%v): %v", network.name, err, e)
  163. }
  164. }
  165. }()
  166. if network.hasLoadBalancerEndpoint() {
  167. if err = network.createLoadBalancerSandbox(); err != nil {
  168. return nil, err
  169. }
  170. }
  171. if !c.isDistributedControl() {
  172. c.Lock()
  173. arrangeIngressFilterRule()
  174. c.Unlock()
  175. }
  176. c.arrangeUserFilterRule()
  177. return network, nil
  178. }

2.1.1.3) cmd/dockerd/daemon.go#initRouter(为Server配置路由)

  1. func initRouter(opts routerOptions) {
  2. decoder := runconfig.ContainerDecoder{}
  3. // ********************************** NOTICE ********************************** //
  4. // 配置路由
  5. routers := []router.Router{
  6. // we need to add the checkpoint router before the container router or the DELETE gets masked
  7. checkpointrouter.NewRouter(opts.daemon, decoder),
  8. container.NewRouter(opts.daemon, decoder),
  9. image.NewRouter(opts.daemon.ImageService()),
  10. systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache, opts.buildkit, opts.features),
  11. volume.NewRouter(opts.daemon.VolumesService()),
  12. build.NewRouter(opts.buildBackend, opts.daemon, opts.features),
  13. sessionrouter.NewRouter(opts.sessionManager),
  14. swarmrouter.NewRouter(opts.cluster),
  15. pluginrouter.NewRouter(opts.daemon.PluginManager()),
  16. distributionrouter.NewRouter(opts.daemon.ImageService()),
  17. }
  18. // ********************************** NOTICE ********************************** //
  19. if opts.daemon.NetworkControllerEnabled() {
  20. routers = append(routers, network.NewRouter(opts.daemon, opts.cluster))
  21. }
  22. if opts.daemon.HasExperimental() {
  23. for _, r := range routers {
  24. for _, route := range r.Routes() {
  25. if experimental, ok := route.(router.ExperimentalRoute); ok {
  26. experimental.Enable()
  27. }
  28. }
  29. }
  30. }
  31. // ********************************** NOTICE ********************************** //
  32. opts.api.InitRouter(routers...)
  33. // ********************************** NOTICE ********************************** //
  34. }

2.1.1.3.1) api/server/router/container/container.go#NewRouter

  1. // NewRouter initializes a new container router
  2. func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router {
  3. r := &containerRouter{
  4. backend: b,
  5. decoder: decoder,
  6. }
  7. r.initRoutes()
  8. return r
  9. }
  1. // initRoutes initializes the routes in container router
  2. func (r *containerRouter) initRoutes() {
  3. r.routes = []router.Route{
  4. // HEAD
  5. router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive),
  6. // GET
  7. router.NewGetRoute("/containers/json", r.getContainersJSON),
  8. router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport),
  9. router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges),
  10. router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName),
  11. router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop),
  12. router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs),
  13. router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats),
  14. router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach),
  15. router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID),
  16. router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive),
  17. // POST
  18. router.NewPostRoute("/containers/create", r.postContainersCreate),
  19. router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill),
  20. router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause),
  21. router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause),
  22. router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart),
  23. router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart),
  24. router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop),
  25. router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait),
  26. router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize),
  27. router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach),
  28. router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12
  29. router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate),
  30. router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart),
  31. router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize),
  32. router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename),
  33. router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate),
  34. router.NewPostRoute("/containers/prune", r.postContainersPrune),
  35. router.NewPostRoute("/commit", r.postCommit),
  36. // PUT
  37. router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive),
  38. // DELETE
  39. router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers),
  40. }
  41. }

2.1.1.3.2) api/server/server.go#Server.InitRouter

  1. // InitRouter initializes the list of routers for the server.
  2. // This method also enables the Go profiler.
  3. func (s *Server) InitRouter(routers ...router.Router) {
  4. s.routers = append(s.routers, routers...)
  5. // ********************************** NOTICE ********************************** //
  6. m := s.createMux()
  7. // ********************************** NOTICE ********************************** //
  8. s.routerSwapper = &routerSwapper{
  9. router: m,
  10. }
  11. }
  12. // createMux initializes the main router the server uses.
  13. func (s *Server) createMux() *mux.Router {
  14. m := mux.NewRouter()
  15. logrus.Debug("Registering routers")
  16. for _, apiRouter := range s.routers {
  17. for _, r := range apiRouter.Routes() {
  18. // ********************************** NOTICE ********************************** //
  19. f := s.makeHTTPHandler(r.Handler())
  20. logrus.Debugf("Registering %s, %s", r.Method(), r.Path())
  21. // ********************************** NOTICE ********************************** //
  22. m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f)
  23. // ********************************** NOTICE ********************************** //
  24. m.Path(r.Path()).Methods(r.Method()).Handler(f)
  25. }
  26. }
  27. debugRouter := debug.NewRouter()
  28. s.routers = append(s.routers, debugRouter)
  29. for _, r := range debugRouter.Routes() {
  30. f := s.makeHTTPHandler(r.Handler())
  31. m.Path("/debug" + r.Path()).Handler(f)
  32. }
  33. notFoundHandler := httputils.MakeErrorHandler(pageNotFoundError{})
  34. m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler)
  35. m.NotFoundHandler = notFoundHandler
  36. m.MethodNotAllowedHandler = notFoundHandler
  37. return m
  38. }