From a4f5e3024919b0bbfe294e0a4e65b7b6e09c487e Mon Sep 17 00:00:00 2001 From: Misty Stanley-Jones Date: Thu, 25 Jan 2018 17:37:23 -0800 Subject: [PATCH] Various copyedits to reduce future tense, wordiness, and use of 'please' (#5788) * Reword lots of instances of 'will' * Reword lots of instances of won't * Reword lots of instances of we'll * Eradicate you'll * Eradicate 'be able to' type of phrases * Eradicate 'unable to' type of phrases * Eradicate 'has / have to' type of phrases * Eradicate 'note that' type of phrases * Eradicate 'in order to' type of phrases * Redirect to official Chef and Puppet docs * Eradicate gratuitous 'please' * Reduce use of e.g. * Reduce use of i.e. * Reduce use of N.B. * Get rid of 'sexagesimal' and correct some errors --- CONTRIBUTING.md | 2 +- README.md | 6 +- _includes/content/compose-extfields-sub.md | 2 +- _includes/content/compose-var-sub.md | 10 +- .../content/ssh/ssh-add-keys-to-agent.md | 2 +- _includes/content/ssh/ssh-find-keys.md | 4 +- _includes/docker_schedule_matrix.md | 33 +-- _includes/ee-linux-install-reuse.md | 10 +- _includes/install-script.md | 12 +- _includes/why_d4a.md | 16 +- apidocs/cloud-api-source/README.md | 2 +- .../source/includes/_errors.md | 6 +- .../source/includes/container.md | 2 +- .../source/includes/registry.md | 2 +- .../source/includes/repository.md | 6 +- .../source/includes/service.md | 8 +- .../cloud-api-source/source/includes/stack.md | 6 +- apidocs/cloud-api-source/source/index.md | 8 +- compose/aspnet-mssql-compose.md | 30 +- compose/bundles.md | 2 +- compose/completion.md | 13 +- compose/compose-file/compose-file-v1.md | 55 ++-- compose/compose-file/compose-file-v2.md | 115 ++++---- compose/compose-file/compose-versioning.md | 10 +- compose/compose-file/index.md | 166 +++++------ compose/django.md | 6 +- compose/env-file.md | 9 +- compose/environment-variables.md | 19 +- compose/extends.md | 4 +- compose/faq.md | 4 +- compose/gettingstarted.md | 6 +- compose/index.md | 2 +- compose/install.md | 10 +- compose/link-env-deprecated.md | 14 +- compose/networking.md | 16 +- compose/overview.md | 15 +- compose/production.md | 20 +- compose/rails.md | 19 +- compose/reference/build.md | 6 +- compose/reference/bundle.md | 2 +- compose/reference/envvars.md | 2 +- compose/reference/events.md | 2 +- compose/reference/exec.md | 2 +- compose/reference/overview.md | 2 +- compose/reference/pull.md | 2 +- compose/reference/restart.md | 4 +- compose/reference/rm.md | 6 +- compose/reference/run.md | 2 +- compose/reference/scale.md | 2 +- compose/startup-order.md | 10 +- compose/swarm.md | 18 +- compose/wordpress.md | 10 +- cs-engine/1.12/index.md | 8 +- cs-engine/1.12/upgrade.md | 2 +- cs-engine/1.13/index.md | 6 +- cs-engine/1.13/upgrade.md | 2 +- .../dtr/2.0/configure/config-general.md | 2 +- .../dtr/2.0/configure/config-security.md | 8 +- .../dtr/2.0/configure/config-storage.md | 4 +- datacenter/dtr/2.0/high-availability/index.md | 2 +- datacenter/dtr/2.0/install/license.md | 2 +- .../dtr/2.0/install/upgrade/upgrade-major.md | 2 +- .../2.0/release-notes/prior-release-notes.md | 9 +- datacenter/dtr/2.0/repos-and-images/index.md | 2 +- .../dtr/2.0/repos-and-images/push-an-image.md | 4 +- .../user-management/create-and-manage-orgs.md | 7 +- .../create-and-manage-teams.md | 2 +- .../2.1/guides/configure/configure-storage.md | 22 +- datacenter/dtr/2.1/guides/configure/index.md | 4 +- .../dtr/2.1/guides/high-availability/index.md | 2 +- datacenter/dtr/2.1/guides/install/license.md | 2 +- .../dtr/2.1/guides/repos-and-images/index.md | 2 +- .../guides/repos-and-images/push-an-image.md | 4 +- .../user-management/create-and-manage-orgs.md | 4 +- .../create-and-manage-teams.md | 2 +- datacenter/dtr/2.1/reference/cli/install.md | 2 +- .../dtr/2.1/reference/cli/reconfigure.md | 2 +- .../admin/backups-and-disaster-recovery.md | 2 +- .../admin/configure/create-and-manage-orgs.md | 4 +- .../admin/configure/deploy-caches/chaining.md | 8 +- .../admin/configure/deploy-caches/index.md | 2 +- .../admin/configure/deploy-caches/tls.md | 6 +- .../admin/configure/external-storage/s3.md | 4 +- .../admin/configure/garbage-collection.md | 4 +- .../configure/license-your-installation.md | 2 +- .../admin/configure/use-a-load-balancer.md | 5 +- .../use-your-own-tls-certificates.md | 4 +- .../manage-users/create-and-manage-teams.md | 2 +- .../troubleshoot-batch-jobs.md | 6 +- datacenter/dtr/2.2/guides/admin/upgrade.md | 4 +- .../configure-your-notary-client.md | 6 +- .../dtr/2.2/guides/user/access-dtr/index.md | 2 +- .../guides/user/create-and-manage-webhooks.md | 18 +- .../2.2/guides/user/manage-images/index.md | 4 +- .../manage-images/pull-and-push-images.md | 2 +- .../sign-images/delegate-image-signing.md | 6 +- .../user/manage-images/sign-images/index.md | 12 +- .../admin/backups-and-disaster-recovery.md | 2 +- .../admin/configure/deploy-caches/chaining.md | 2 +- .../admin/configure/deploy-caches/index.md | 2 +- .../admin/configure/deploy-caches/tls.md | 8 +- .../admin/configure/enable-single-sign-on.md | 6 +- .../admin/configure/external-storage/s3.md | 8 +- .../admin/configure/garbage-collection.md | 2 +- .../configure/license-your-installation.md | 2 +- .../admin/configure/use-a-load-balancer.md | 2 +- .../guides/admin/configure/use-a-web-proxy.md | 2 +- .../use-your-own-tls-certificates.md | 4 +- .../dtr/2.3/guides/admin/install/index.md | 2 +- .../manage-users/create-and-manage-orgs.md | 6 +- .../manage-users/create-and-manage-teams.md | 2 +- .../troubleshoot-batch-jobs.md | 6 +- datacenter/dtr/2.3/guides/admin/upgrade.md | 4 +- datacenter/dtr/2.3/guides/release-notes.md | 2 +- .../configure-your-notary-client.md | 6 +- .../dtr/2.3/guides/user/access-dtr/index.md | 2 +- .../guides/user/create-and-manage-webhooks.md | 2 +- .../guides/user/create-promotion-policies.md | 2 +- .../2.3/guides/user/manage-images/index.md | 4 +- .../prevent-tags-from-being-overwritten.md | 2 +- .../manage-images/pull-and-push-images.md | 4 +- .../scan-images-for-vulnerabilities.md | 2 +- .../sign-images/delegate-image-signing.md | 6 +- .../user/manage-images/sign-images/index.md | 12 +- .../admin/backups-and-disaster-recovery.md | 2 +- .../admin/configure/deploy-caches/chaining.md | 2 +- .../admin/configure/deploy-caches/index.md | 2 +- .../admin/configure/deploy-caches/tls.md | 10 +- .../admin/configure/enable-single-sign-on.md | 6 +- .../admin/configure/external-storage/s3.md | 10 +- .../admin/configure/garbage-collection.md | 2 +- .../configure/license-your-installation.md | 2 +- .../admin/configure/use-a-load-balancer.md | 2 +- .../guides/admin/configure/use-a-web-proxy.md | 2 +- .../use-your-own-tls-certificates.md | 4 +- .../dtr/2.4/guides/admin/install/index.md | 2 +- .../manage-users/create-and-manage-orgs.md | 4 +- .../manage-users/create-and-manage-teams.md | 2 +- .../notary-audit-logs.md | 4 +- .../troubleshoot-batch-jobs.md | 6 +- datacenter/dtr/2.4/guides/admin/upgrade.md | 4 +- .../configure-your-notary-client.md | 6 +- .../dtr/2.4/guides/user/access-dtr/index.md | 2 +- .../guides/user/create-and-manage-webhooks.md | 2 +- .../guides/user/create-promotion-policies.md | 2 +- .../2.4/guides/user/manage-images/index.md | 4 +- .../prevent-tags-from-being-overwritten.md | 2 +- .../manage-images/pull-and-push-images.md | 4 +- .../scan-images-for-vulnerabilities.md | 2 +- .../sign-images/delegate-image-signing.md | 6 +- .../user/manage-images/sign-images/index.md | 12 +- .../ucp/1.1/access-ucp/cli-based-access.md | 2 +- .../ucp/1.1/applications/deploy-app-cli.md | 2 +- .../ucp/1.1/configuration/configure-logs.md | 4 +- .../ucp/1.1/configuration/dtr-integration.md | 14 +- .../configuration/multi-host-networking.md | 8 +- .../use-externally-signed-certs.md | 6 +- .../set-up-high-availability.md | 2 +- datacenter/ucp/1.1/install-sandbox-2.md | 22 +- datacenter/ucp/1.1/install-sandbox.md | 28 +- .../1.1/installation/install-production.md | 14 +- datacenter/ucp/1.1/installation/upgrade.md | 6 +- datacenter/ucp/1.1/monitor/monitor-ucp.md | 5 +- .../monitor/troubleshoot-configurations.md | 2 +- .../ucp/1.1/monitor/troubleshoot-ucp.md | 2 +- datacenter/ucp/1.1/reference/help.md | 8 +- datacenter/ucp/1.1/reference/index.md | 8 +- datacenter/ucp/1.1/reference/install.md | 6 +- datacenter/ucp/1.1/reference/join.md | 2 +- datacenter/ucp/1.1/reference/regen-certs.md | 2 +- datacenter/ucp/1.1/release_notes.md | 6 +- .../authentication-and-authorization.md | 6 +- .../create-and-manage-teams.md | 2 +- .../create-and-manage-users.md | 4 +- .../1.1/user-management/permission-levels.md | 8 +- .../2.0/guides/access-ucp/cli-based-access.md | 2 +- .../2.0/guides/applications/deploy-app-cli.md | 2 +- .../guides/configuration/configure-logs.md | 4 +- .../ucp/2.0/guides/configuration/index.md | 10 +- .../configuration/integrate-with-dtr.md | 2 +- .../content-trust/continuous-integration.md | 29 +- .../ucp/2.0/guides/content-trust/index.md | 12 +- .../backups-and-disaster-recovery.md | 2 +- .../installation/plan-production-install.md | 2 +- .../guides/installation/scale-your-cluster.md | 4 +- .../installation/system-requirements.md | 2 +- .../ucp/2.0/guides/installation/uninstall.md | 9 +- .../ucp/2.0/guides/installation/upgrade.md | 10 +- datacenter/ucp/2.0/guides/monitor/index.md | 2 +- .../monitor/troubleshoot-configurations.md | 2 +- .../ucp/2.0/guides/monitor/troubleshoot.md | 2 +- .../ucp/2.0/guides/user-management/index.md | 6 +- .../user-management/permission-levels.md | 8 +- datacenter/ucp/2.0/reference/cli/install.md | 4 +- .../ucp/2.0/reference/cli/uninstall-ucp.md | 2 +- .../admin/backups-and-disaster-recovery.md | 6 +- .../admin/configure/add-sans-to-cluster.md | 8 +- .../admin/configure/integrate-with-dtr.md | 2 +- .../restrict-services-to-worker-nodes.md | 2 +- .../admin/configure/scale-your-cluster.md | 10 +- .../store-logs-in-an-external-system.md | 4 +- .../use-domain-names-to-access-services.md | 11 +- .../configure/use-trusted-images-for-ci.md | 29 +- .../use-your-own-tls-certificates.md | 8 +- .../guides/admin/install/plan-installation.md | 4 +- .../admin/install/system-requirements.md | 2 +- .../ucp/2.1/guides/admin/install/uninstall.md | 13 +- .../2.1/guides/admin/manage-users/index.md | 6 +- .../admin/manage-users/permission-levels.md | 4 +- .../admin/monitor-and-troubleshoot/index.md | 2 +- .../troubleshoot-configurations.md | 4 +- .../troubleshoot-node-messages.md | 4 +- .../troubleshoot-with-logs.md | 4 +- datacenter/ucp/2.1/guides/admin/upgrade.md | 6 +- .../incompatibilities-and-breaking-changes.md | 2 +- .../ucp/2.1/guides/release-notes/index.md | 6 +- .../user/access-ucp/cli-based-access.md | 2 +- .../user/secrets/grant-revoke-access.md | 4 +- .../ucp/2.1/guides/user/secrets/index.md | 24 +- .../guides/user/services/deploy-a-service.md | 6 +- .../guides/user/services/deploy-app-cli.md | 2 +- .../ucp/2.1/guides/user/services/index.md | 2 +- .../use-domain-names-to-access-services.md | 6 +- datacenter/ucp/2.1/reference/cli/restore.md | 4 +- .../ucp/2.1/reference/cli/uninstall-ucp.md | 2 +- .../access-control-design-ee-standard.md | 6 +- .../access-control/access-control-node.md | 12 +- .../access-control/create-and-manage-teams.md | 12 +- .../ucp/2.2/guides/access-control/index.md | 22 +- .../isolate-nodes-between-teams.md | 32 +- .../isolate-volumes-between-teams.md | 8 +- .../admin/backups-and-disaster-recovery.md | 4 +- .../configure/add-labels-to-cluster-nodes.md | 12 +- .../admin/configure/add-sans-to-cluster.md | 4 +- .../admin/configure/integrate-with-dtr.md | 2 +- .../configure/join-windows-worker-nodes.md | 2 +- .../restrict-services-to-worker-nodes.md | 2 +- .../run-only-the-images-you-trust.md | 8 +- .../admin/configure/scale-your-cluster.md | 16 +- .../use-domain-names-to-access-services.md | 11 +- .../use-node-local-network-in-swarm.md | 6 +- .../configure/use-trusted-images-for-ci.md | 22 +- .../use-your-own-tls-certificates.md | 10 +- .../ucp/2.2/guides/admin/install/index.md | 4 +- .../guides/admin/install/plan-installation.md | 6 +- .../admin/install/system-requirements.md | 8 +- .../ucp/2.2/guides/admin/install/uninstall.md | 11 +- .../ucp/2.2/guides/admin/install/upgrade.md | 24 +- .../admin/monitor-and-troubleshoot/index.md | 13 +- .../troubleshoot-configurations.md | 14 +- .../troubleshoot-node-messages.md | 4 +- .../troubleshoot-with-logs.md | 6 +- .../user/secrets/grant-revoke-access.md | 4 +- .../ucp/2.2/guides/user/secrets/index.md | 26 +- .../guides/user/services/deploy-a-service.md | 10 +- .../guides/user/services/deploy-app-cli.md | 6 +- .../services/deploy-stack-to-collection.md | 10 +- .../ucp/2.2/guides/user/services/index.md | 10 +- .../use-domain-names-to-access-services.md | 4 +- datacenter/ucp/2.2/reference/cli/install.md | 6 +- datacenter/ucp/2.2/reference/cli/restore.md | 4 +- .../ucp/2.2/reference/cli/uninstall-ucp.md | 2 +- deploy/test.md | 2 +- develop/dev-best-practices.md | 12 +- develop/sdk/examples.md | 4 +- develop/sdk/index.md | 10 +- docker-cloud/apps/auto-destroy.md | 2 +- docker-cloud/apps/auto-redeploy.md | 2 +- docker-cloud/apps/autorestart.md | 8 +- docker-cloud/apps/deploy-tags.md | 6 +- docker-cloud/apps/deploy-to-cloud-btn.md | 7 +- docker-cloud/apps/load-balance-hello-world.md | 51 ++-- docker-cloud/apps/ports.md | 10 +- docker-cloud/apps/service-links.md | 20 +- docker-cloud/apps/service-redeploy.md | 2 +- docker-cloud/apps/stack-yaml-reference.md | 12 +- docker-cloud/apps/stacks.md | 4 +- docker-cloud/apps/volumes.md | 8 +- docker-cloud/builds/advanced.md | 6 +- docker-cloud/builds/automated-build.md | 20 +- docker-cloud/builds/automated-testing.md | 12 +- docker-cloud/builds/image-scan.md | 20 +- docker-cloud/builds/link-source.md | 5 +- docker-cloud/builds/push-images.md | 2 +- docker-cloud/builds/repos.md | 10 +- docker-cloud/cloud-swarm/connect-to-swarm.md | 2 +- .../cloud-swarm/create-cloud-swarm-aws.md | 6 +- .../cloud-swarm/create-cloud-swarm-azure.md | 10 +- docker-cloud/cloud-swarm/link-aws-swarm.md | 20 +- docker-cloud/cloud-swarm/link-azure-swarm.md | 10 +- docker-cloud/cloud-swarm/register-swarms.md | 9 +- docker-cloud/cloud-swarm/ssh-key-setup.md | 4 +- docker-cloud/cloud-swarm/using-swarm-mode.md | 6 +- docker-cloud/docker-errors-faq.md | 4 +- docker-cloud/getting-started/connect-infra.md | 2 +- ...ovision_a_data_backend_for_your_service.md | 12 +- .../deploy-app/11_service_stacks.md | 4 +- .../12_data_management_with_volumes.md | 8 +- .../deploy-app/1_introduction.md | 4 +- .../getting-started/deploy-app/2_set_up.md | 8 +- .../deploy-app/3_prepare_the_app.md | 4 +- .../deploy-app/4_push_to_cloud_registry.md | 2 +- .../5_deploy_the_app_as_a_service.md | 4 +- .../6_define_environment_variables.md | 2 +- .../deploy-app/7_scale_the_service.md | 2 +- .../getting-started/deploy-app/8_view_logs.md | 4 +- .../deploy-app/9_load-balance_the_service.md | 4 +- docker-cloud/getting-started/intro_cloud.md | 2 +- .../getting-started/your_first_node.md | 6 +- .../getting-started/your_first_service.md | 10 +- docker-cloud/infrastructure/byoh.md | 6 +- .../infrastructure/cloud-on-aws-faq.md | 10 +- .../infrastructure/cloud-on-packet.net-faq.md | 2 +- .../infrastructure/deployment-strategies.md | 10 +- docker-cloud/infrastructure/docker-upgrade.md | 4 +- docker-cloud/infrastructure/index.md | 2 +- docker-cloud/infrastructure/link-do.md | 6 +- docker-cloud/infrastructure/link-softlayer.md | 2 +- docker-cloud/installing-cli.md | 11 +- docker-cloud/orgs.md | 20 +- docker-cloud/release-notes.md | 2 +- docker-cloud/slack-integration.md | 2 +- docker-for-aws/archive.md | 3 +- docker-for-aws/deploy.md | 24 +- docker-for-aws/faqs.md | 36 ++- docker-for-aws/iam-permissions.md | 2 +- docker-for-aws/index.md | 28 +- docker-for-aws/load-balancer.md | 6 +- docker-for-aws/persistent-data-volumes.md | 10 +- docker-for-aws/release-notes.md | 2 +- docker-for-aws/scaling.md | 18 +- docker-for-aws/upgrade.md | 8 +- docker-for-azure/archive.md | 3 +- docker-for-azure/deploy.md | 26 +- docker-for-azure/faqs.md | 10 +- docker-for-azure/index.md | 18 +- docker-for-azure/persistent-data-volumes.md | 6 +- docker-for-azure/release-notes.md | 2 +- docker-for-azure/upgrade.md | 8 +- docker-for-ibm-cloud/administering-swarms.md | 9 +- docker-for-ibm-cloud/deploy.md | 2 +- docker-for-ibm-cloud/faqs.md | 6 +- docker-for-ibm-cloud/ibm-registry.md | 2 +- docker-for-ibm-cloud/index.md | 3 +- .../persistent-data-volumes.md | 2 +- docker-for-ibm-cloud/quickstart.md | 10 +- docker-for-ibm-cloud/why.md | 6 +- docker-for-mac/docker-toolbox.md | 14 +- docker-for-mac/faqs.md | 20 +- docker-for-mac/index.md | 42 +-- docker-for-mac/install.md | 16 +- docker-for-mac/networking.md | 20 +- docker-for-mac/osxfs-caching.md | 56 ++-- docker-for-mac/osxfs.md | 57 ++-- docker-for-mac/release-notes.md | 30 +- docker-for-mac/troubleshoot.md | 59 ++-- docker-for-windows/faqs.md | 20 +- docker-for-windows/index.md | 54 ++-- docker-for-windows/install.md | 25 +- docker-for-windows/release-notes.md | 82 +++--- docker-for-windows/troubleshoot.md | 91 +++--- docker-hub/bitbucket.md | 11 +- docker-hub/builds.md | 8 +- docker-hub/github.md | 37 ++- docker-hub/index.md | 2 +- docker-hub/official_repos.md | 4 +- docker-hub/orgs.md | 2 +- docker-hub/repos.md | 37 ++- docker-hub/webhooks.md | 2 +- docker-id/index.md | 2 +- docker-store/byol.md | 50 ++-- docker-store/customer_faq.md | 6 +- docker-store/index.md | 6 +- docker-store/publish.md | 58 ++-- docker-store/publisher_faq.md | 34 +-- docker-store/trustchain.md | 2 +- edge/engine/reference/commandline/README.md | 2 +- edge/index.md | 2 +- engine/admin/ambassador_pattern_linking.md | 2 +- engine/admin/ansible.md | 61 +--- engine/admin/b2d_volume_resize.md | 10 +- engine/admin/chef.md | 65 +---- engine/admin/dsc.md | 2 +- engine/admin/index.md | 14 +- engine/admin/live-restore.md | 6 +- engine/admin/logging/awslogs.md | 12 +- engine/admin/logging/etwlogs.md | 6 +- engine/admin/logging/gcplogs.md | 8 +- engine/admin/logging/gelf.md | 4 +- engine/admin/logging/journald.md | 2 +- engine/admin/logging/json-file.md | 6 +- engine/admin/logging/logentries.md | 5 +- engine/admin/logging/overview.md | 4 +- engine/admin/logging/splunk.md | 4 +- engine/admin/logging/syslog.md | 4 +- engine/admin/multi-service_container.md | 8 +- engine/admin/prometheus.md | 2 +- engine/admin/puppet.md | 90 +----- engine/admin/resource_constraints.md | 30 +- engine/admin/runmetrics.md | 150 +++++----- .../admin/start-containers-automatically.md | 6 +- engine/admin/systemd.md | 4 +- engine/admin/volumes/bind-mounts.md | 23 +- engine/admin/volumes/index.md | 8 +- engine/admin/volumes/tmpfs.md | 6 +- engine/admin/volumes/volumes.md | 27 +- engine/examples/apt-cacher-ng.md | 9 +- engine/examples/couchdb_data_volumes.md | 2 +- engine/examples/dotnetcore.md | 2 +- engine/examples/postgresql_service.md | 2 +- engine/examples/running_riak_service.md | 4 +- engine/examples/running_ssh_service.md | 4 +- engine/faq.md | 21 +- engine/index.md | 12 +- engine/installation/index.md | 6 +- engine/installation/linux/docker-ce/centos.md | 10 +- engine/installation/linux/docker-ce/debian.md | 6 +- engine/installation/linux/docker-ce/fedora.md | 10 +- engine/installation/linux/docker-ce/ubuntu.md | 10 +- engine/installation/linux/docker-ee/oracle.md | 4 +- engine/installation/linux/docker-ee/suse.md | 20 +- engine/installation/linux/docker-ee/ubuntu.md | 20 +- .../installation/linux/linux-postinstall.md | 20 +- engine/installation/windows/docker-ee.md | 8 +- engine/migration.md | 20 +- engine/reference/commandline/README.md | 2 +- engine/security/antivirus.md | 4 +- engine/security/certificates.md | 31 +- engine/security/https.md | 33 +-- engine/security/https/README.md | 4 +- engine/security/security.md | 67 ++--- engine/security/trust/content_trust.md | 11 +- engine/security/trust/deploying_notary.md | 2 +- engine/security/trust/trust_automation.md | 4 +- engine/security/trust/trust_delegation.md | 39 ++- engine/security/trust/trust_key_mng.md | 3 +- engine/security/trust/trust_sandbox.md | 63 ++-- engine/security/userns-remap.md | 24 +- engine/static_files/README.md | 2 +- engine/swarm/admin_guide.md | 42 ++- engine/swarm/configs.md | 20 +- engine/swarm/how-swarm-mode-works/nodes.md | 8 +- engine/swarm/how-swarm-mode-works/pki.md | 18 +- engine/swarm/how-swarm-mode-works/services.md | 14 +- engine/swarm/index.md | 2 +- engine/swarm/ingress.md | 10 +- engine/swarm/join-nodes.md | 2 +- engine/swarm/manage-nodes.md | 6 +- engine/swarm/networking.md | 12 +- engine/swarm/raft.md | 4 +- engine/swarm/secrets.md | 55 ++-- engine/swarm/services.md | 82 +++--- engine/swarm/stack-deploy.md | 10 +- engine/swarm/swarm-tutorial/create-swarm.md | 2 +- engine/swarm/swarm-tutorial/index.md | 31 +- engine/swarm/swarm_manager_locking.md | 12 +- engine/tutorials/networkingcontainers.md | 15 +- engine/userguide/eng-image/baseimages.md | 2 +- .../eng-image/dockerfile_best-practices.md | 60 ++-- .../userguide/eng-image/multistage-build.md | 4 +- engine/userguide/networking/configure-dns.md | 24 +- .../networking/default_network/binding.md | 12 +- .../default_network/configure-dns.md | 20 +- .../container-communication.md | 30 +- .../default_network/custom-docker0.md | 13 +- .../networking/default_network/dockerlinks.md | 40 +-- .../networking/default_network/ipv6.md | 58 ++-- .../networking/get-started-macvlan.md | 24 +- engine/userguide/networking/index.md | 30 +- .../networking/overlay-security-model.md | 2 +- .../networking/overlay-standalone-swarm.md | 16 +- .../networking/work-with-networks.md | 24 +- engine/userguide/storagedriver/aufs-driver.md | 8 +- .../userguide/storagedriver/btrfs-driver.md | 6 +- .../storagedriver/device-mapper-driver.md | 40 +-- .../storagedriver/imagesandcontainers.md | 26 +- .../storagedriver/overlayfs-driver.md | 16 +- .../userguide/storagedriver/selectadriver.md | 13 +- engine/userguide/storagedriver/vfs-driver.md | 2 +- engine/userguide/storagedriver/zfs-driver.md | 2 +- enterprise/17.06/index.md | 10 +- enterprise/backup.md | 2 +- enterprise/telemetry.md | 4 +- enterprise/upgrade.md | 12 +- get-started/index.md | 11 +- get-started/part2.md | 46 +-- get-started/part3.md | 38 +-- get-started/part4.md | 40 +-- get-started/part5.md | 24 +- get-started/part6.md | 24 +- hackathon/index.md | 6 +- index.md | 196 +------------ kitematic/minecraft-server.md | 8 +- kitematic/nginx-web-server.md | 9 +- kitematic/rethinkdb-dev-database.md | 11 +- kitematic/userguide.md | 48 +-- machine/AVAILABLE_DRIVER_PLUGINS.md | 2 +- machine/DRIVER_SPEC.md | 41 ++- machine/completion.md | 10 +- machine/concepts.md | 32 +- machine/drivers/aws.md | 12 +- machine/drivers/azure.md | 10 +- machine/drivers/digital-ocean.md | 14 +- machine/drivers/exoscale.md | 13 +- machine/drivers/gce.md | 14 +- machine/drivers/generic.md | 24 +- machine/drivers/hyper-v.md | 10 +- machine/drivers/openstack.md | 22 +- machine/drivers/os-base.md | 2 +- machine/drivers/rackspace.md | 4 +- machine/drivers/soft-layer.md | 6 +- machine/drivers/virtualbox.md | 20 +- machine/drivers/vm-cloud.md | 6 +- machine/drivers/vsphere.md | 9 +- machine/examples/aws.md | 4 +- machine/examples/index.md | 2 +- machine/examples/ocean.md | 21 +- machine/get-started-cloud.md | 10 +- machine/get-started.md | 46 +-- machine/install-machine.md | 4 +- machine/overview.md | 2 +- machine/reference/create.md | 30 +- machine/reference/env.md | 14 +- machine/reference/inspect.md | 4 +- machine/reference/ls.md | 11 +- machine/reference/mount.md | 4 +- machine/reference/provision.md | 4 +- machine/reference/rm.md | 2 +- machine/reference/scp.md | 14 +- machine/reference/ssh.md | 16 +- machine/reference/upgrade.md | 6 +- notary/advanced_usage.md | 112 ++++--- notary/getting_started.md | 46 +-- notary/reference/client-config.md | 28 +- notary/reference/common-configs.md | 4 +- notary/reference/server-config.md | 47 +-- notary/reference/signer-config.md | 29 +- notary/running_a_service.md | 58 ++-- notary/service_architecture.md | 276 +++++++++--------- opensource/index.md | 6 +- opensource/ways.md | 11 +- registry/compatibility.md | 44 +-- registry/deploying.md | 32 +- registry/garbage-collection.md | 22 +- registry/insecure.md | 4 +- registry/notifications.md | 24 +- registry/recipes/apache.md | 4 +- registry/recipes/index.md | 9 +- registry/recipes/mirror.md | 41 ++- registry/recipes/nginx.md | 16 +- registry/storage-drivers/azure.md | 2 +- registry/storage-drivers/gcs.md | 4 +- registry/storage-drivers/index.md | 2 +- registry/storage-drivers/s3.md | 28 +- registry/storage-drivers/swift.md | 4 +- release-notes/docker-ce.md | 4 +- release-notes/docker-compose.md | 20 +- release-notes/docker-engine.md | 12 +- release-notes/docker-machine.md | 6 +- swarm/configure-tls.md | 94 +++--- swarm/discovery.md | 18 +- swarm/install-manual.md | 63 ++-- swarm/install-w-machine.md | 2 +- swarm/multi-manager-setup.md | 14 +- swarm/overview.md | 16 +- swarm/plan-for-production.md | 110 +++---- swarm/provision-with-machine.md | 18 +- swarm/reference/create.md | 2 +- swarm/reference/manage.md | 2 +- swarm/scheduler/filter.md | 16 +- swarm/scheduler/strategy.md | 2 +- swarm/secure-swarm-tls.md | 8 +- swarm/swarm-api.md | 6 +- swarm/swarm_at_scale/about.md | 2 +- swarm/swarm_at_scale/deploy-app.md | 28 +- swarm/swarm_at_scale/deploy-infra.md | 24 +- swarm/swarm_at_scale/index.md | 2 +- swarm/swarm_at_scale/troubleshoot.md | 83 +++--- test.md | 47 ++- thank-you-subscribing-docker-weekly.md | 2 +- toolbox/faqs/troubleshoot.md | 9 +- toolbox/toolbox_install_mac.md | 11 +- toolbox/toolbox_install_windows.md | 9 +- 583 files changed, 3732 insertions(+), 4114 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bb446215428..3306ff5e8a0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -124,5 +124,5 @@ know. If you have questions about how to write for Docker's documentation, have a look at the [style guide](https://docs.docker.com/opensource/doc-style/). The style guide provides guidance about grammar, syntax, formatting, styling, language, or -tone. If something isn't clear in the guide, please submit an issue to let us +tone. If something isn't clear in the guide, submit an issue to let us know or submit a pull request to help us improve it. diff --git a/README.md b/README.md index e50caede2e4..75d82320798 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ We really want your feedback, and we've made it easy. You can edit, rate, or file an issue at the bottom of every page on [https://docs.docker.com/](https://docs.docker.com/). -**Please only file issues about the documentation in this repository.** One way +**Only file issues about the documentation in this repository.** One way to think about this is that you should file a bug here if your issue is that you don't see something that should be in the docs, or you see something incorrect or confusing in the docs. @@ -21,7 +21,7 @@ or confusing in the docs. ask in [https://forums.docker.com](https://forums.docker.com) instead. - If you have an idea for a new feature or behavior change in a specific aspect - of Docker, or have found a bug in part of Docker, please file that issue in + of Docker, or have found a bug in part of Docker, file that issue in the project's code repository. ## Contributing @@ -158,7 +158,7 @@ You have three options: bundle install ``` - >**Note**: You may have to install some packages manually. + >**Note**: You may need to install some packages manually. f. Change the directory to `docker.github.io`. diff --git a/_includes/content/compose-extfields-sub.md b/_includes/content/compose-extfields-sub.md index fd4dfb2db88..0508dca7dff 100644 --- a/_includes/content/compose-extfields-sub.md +++ b/_includes/content/compose-extfields-sub.md @@ -13,7 +13,7 @@ x-custom: name: "custom" ``` -The contents of those fields will be ignored by Compose, but they can be +The contents of those fields are ignored by Compose, but they can be inserted in your resource definitions using [YAML anchors](http://www.yaml.org/spec/1.2/spec.html#id2765878). For example, if you want several of your services to use the same logging configuration: diff --git a/_includes/content/compose-var-sub.md b/_includes/content/compose-var-sub.md index 4d16254bdc5..33a5ccb15f3 100644 --- a/_includes/content/compose-var-sub.md +++ b/_includes/content/compose-var-sub.md @@ -16,8 +16,8 @@ string. In the example above, if `POSTGRES_VERSION` is not set, the value for the `image` option is `postgres:`. You can set default values for environment variables using a -[`.env` file](../env-file.md), which Compose will automatically look for. Values -set in the shell environment will override those set in the `.env` file. +[`.env` file](../env-file.md), which Compose automatically looks for. Values +set in the shell environment override those set in the `.env` file. > **Important**: The `.env file` feature only works when you use the > `docker-compose up` command and does not work with `docker stack deploy`. @@ -27,9 +27,9 @@ Both `$VARIABLE` and `${VARIABLE}` syntax are supported. Additionally when using the [2.1 file format](compose-versioning.md#version-21), it is possible to provide inline default values using typical shell syntax: -- `${VARIABLE:-default}` will evaluate to `default` if `VARIABLE` is unset or +- `${VARIABLE:-default}` evaluates to `default` if `VARIABLE` is unset or empty in the environment. -- `${VARIABLE-default}` will evaluate to `default` only if `VARIABLE` is unset +- `${VARIABLE-default}` evaluates to `default` only if `VARIABLE` is unset in the environment. Other extended shell-style features, such as `${VARIABLE/foo/bar}`, are not @@ -45,6 +45,6 @@ Compose. command: "$$VAR_NOT_INTERPOLATED_BY_COMPOSE" If you forget and use a single dollar sign (`$`), Compose interprets the value -as an environment variable and will warn you: +as an environment variable and warns you: The VAR_NOT_INTERPOLATED_BY_COMPOSE is not set. Substituting an empty string. diff --git a/_includes/content/ssh/ssh-add-keys-to-agent.md b/_includes/content/ssh/ssh-add-keys-to-agent.md index 8e923cdcbda..82f26bea8a6 100644 --- a/_includes/content/ssh/ssh-add-keys-to-agent.md +++ b/_includes/content/ssh/ssh-add-keys-to-agent.md @@ -8,7 +8,7 @@

{% capture mac-content-add %} -1. Start the `ssh-agent` in the background using the command `eval "$(ssh-agent -s)"`. You will get the agent process ID in return. +1. Start the `ssh-agent` in the background using the command `eval "$(ssh-agent -s)"`. You get the agent process ID in return. ```none eval "$(ssh-agent -s)" diff --git a/_includes/content/ssh/ssh-find-keys.md b/_includes/content/ssh/ssh-find-keys.md index 63fece8c027..b347f8f81ea 100644 --- a/_includes/content/ssh/ssh-find-keys.md +++ b/_includes/content/ssh/ssh-find-keys.md @@ -53,7 +53,7 @@ $ ls -al ~/.ssh ``` - This will list files in your `.ssh` directory. + This lists files in your `.ssh` directory. 2. Check to see if you already have SSH keys you can use. @@ -91,7 +91,7 @@ $ ls -al ~/.ssh ``` - This will list files in your `.ssh` directory. + This lists files in your `.ssh` directory. 2. Check to see if you already have a SSH keys you can use. diff --git a/_includes/docker_schedule_matrix.md b/_includes/docker_schedule_matrix.md index d6e14c1a09b..14d004c3766 100644 --- a/_includes/docker_schedule_matrix.md +++ b/_includes/docker_schedule_matrix.md @@ -1,23 +1,18 @@ {% capture green-check %}![yes](/engine/installation/images/green-check.svg){: style="height: 14px; display: inline-block"}{% endcapture %} -{% capture superscript-link %}[1](#edge-footnote){: style="vertical-align: super; font-size: smaller;" }{% endcapture %} {: style="width: 75%" } -| Month | Docker CE Edge | Docker CE Stable | -|:----------|:----------------------------------------|:------------------| -| January | {{ green-check }} | | -| February | {{ green-check }} | | -| March | {{ green-check }}{{ superscript-link }} | {{ green-check }} | -| April | {{ green-check }} | | -| May | {{ green-check }} | | -| June | {{ green-check }}{{ superscript-link }} | {{ green-check }} | -| July | {{ green-check }} | | -| August | {{ green-check }} | | -| September | {{ green-check }}{{ superscript-link }} | {{ green-check }} | -| October | {{ green-check }} | | -| November | {{ green-check }} | | -| December | {{ green-check }}{{ superscript-link }} | {{ green-check }} | +| Month | Docker CE Edge | Docker CE Stable | +|:----------|:------------------|:------------------| +| January | {{ green-check }} | | +| February | {{ green-check }} | | +| March | {{ green-check }} | {{ green-check }} | +| April | {{ green-check }} | | +| May | {{ green-check }} | | +| June | {{ green-check }} | {{ green-check }} | +| July | {{ green-check }} | | +| August | {{ green-check }} | | +| September | {{ green-check }} | {{ green-check }} | +| October | {{ green-check }} | | +| November | {{ green-check }} | | +| December | {{ green-check }} | {{ green-check }} | -`1`: On Linux distributions, these releases will only appear in the `stable` - channels, not the `edge` channels. For that reason, on Linux distributions, - you need to enable both channels. -{: id="edge-footnote" } diff --git a/_includes/ee-linux-install-reuse.md b/_includes/ee-linux-install-reuse.md index e4b1a047024..cad69e4d993 100644 --- a/_includes/ee-linux-install-reuse.md +++ b/_includes/ee-linux-install-reuse.md @@ -59,7 +59,7 @@ You can install Docker EE in different ways, depending on your needs: 2. Temporarily store the Docker EE repository URL you noted down in the [prerequisites](#prerequisites) in an environment variable. - This will not persist when the current session ends. + This does not persist when the current session ends. ```bash $ export DOCKERURL='' @@ -139,8 +139,8 @@ You can install Docker EE in different ways, depending on your needs: ``` If this is the first time you are installing a package from a recently added - repository, you will be prompted to accept the GPG key, and - the key's fingerprint will be shown. Verify that the fingerprint matches + repository, you are prompted to accept the GPG key, and + the key's fingerprint is shown. Verify that the fingerprint matches `{{ gpg-fingerprint }}` and if so, accept the key. 2. On production systems, you should install a specific version of Docker EE @@ -155,7 +155,7 @@ You can install Docker EE in different ways, depending on your needs: ``` The contents of the list depend upon which repositories you have enabled, - and will be specific to your version of {{ linux-dist-long }} + and is specific to your version of {{ linux-dist-long }} (indicated by the `.el7` suffix on the version, in this example). Choose a specific version to install. The second column is the version string. You can use the entire version string, but **you need to include at least to the @@ -223,7 +223,7 @@ To upgrade Docker EE: If you cannot use the official Docker repository to install Docker EE, you can download the `.{{ package-format | downcase }}` file for your release and -install it manually. You will need to download a new file each time you want to +install it manually. You need to download a new file each time you want to upgrade Docker EE. {% if linux-dist == "rhel" %} diff --git a/_includes/install-script.md b/_includes/install-script.md index 3d860f3bfc5..ff995af99e0 100644 --- a/_includes/install-script.md +++ b/_includes/install-script.md @@ -11,7 +11,7 @@ non-interactively. The source code for the scripts is in the environments**, and you should understand the potential risks before you use them: -- The scripts require `root` or `sudo` privileges in order to run. Therefore, +- The scripts require `root` or `sudo` privileges to run. Therefore, you should carefully examine and audit the scripts before running them. - The scripts attempt to detect your Linux distribution and version and configure your package management system for you. In addition, the scripts do @@ -22,7 +22,7 @@ them: manager without asking for confirmation. This may install a large number of packages, depending on the current configuration of your host machine. - The script does not provide options to specify which version of Docker to install, - and will install the latest version that is released in the "edge" channel. + and installs the latest version that is released in the "edge" channel. - Do not use the convenience script if Docker has already been installed on the host machine using another mechanism. @@ -48,9 +48,9 @@ adding your user to the "docker" group with something like: sudo usermod -aG docker your-user -Remember that you will have to log out and back in for this to take effect! +Remember to log out and back in for this to take effect! -WARNING: Adding a user to the "docker" group will grant the ability to run +WARNING: Adding a user to the "docker" group grants the ability to run containers which can be used to obtain root privileges on the docker host. Refer to https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface @@ -59,8 +59,8 @@ WARNING: Adding a user to the "docker" group will grant the ability to run Docker CE is installed. It starts automatically on `DEB`-based distributions. On `RPM`-based distributions, you need to start it manually using the appropriate -`systemctl` or `service` command. As the message indicates, non-root users are -not able to run Docker commands by default. +`systemctl` or `service` command. As the message indicates, non-root users can't +run Docker commands by default. #### Upgrade Docker after using the convenience script diff --git a/_includes/why_d4a.md b/_includes/why_d4a.md index dfc6bfc3beb..336f5b6ef5b 100644 --- a/_includes/why_d4a.md +++ b/_includes/why_d4a.md @@ -3,7 +3,7 @@ project was created and is being actively developed to ensure that Docker users can enjoy a fantastic out-of-the-box experience on {{cloudprovider}}. It is now generally available and can now be used by everyone. -As an informed user, you might be curious to know what this project has to offer +As an informed user, you might be curious to know what this project offers you for running your development, staging, or production workloads. ## Native to Docker @@ -14,12 +14,12 @@ operational complexity and adding unneeded additional APIs to the Docker stack. Docker for {{cloudprovider}} allows you to interact with Docker directly (including native Docker orchestration), instead of distracting you with the need to navigate extra layers on top of Docker. You can focus instead on the -thing that matters most: running your workloads. This will help you and your +thing that matters most: running your workloads. This helps you and your team to deliver more value to the business faster, to speak one common "language", and to have fewer details to keep in your head at once. -The skills that you and your team have already learned, and will continue to -learn, using Docker on the desktop or elsewhere will automatically carry over to +The skills that you and your team have already learned, and continue to +learn, using Docker on the desktop or elsewhere automatically carry over to using Docker on {{cloudprovider}}. The added consistency across clouds also helps to ensure that a migration or multi-cloud strategy is easier to accomplish in the future if desired. @@ -65,12 +65,12 @@ processes. In Docker for {{cloudprovider}}, your cluster is resilient to a variety of such issues by default. Log rotation native to the host is configured for you automatically, so chatty -logs won't use up all of your disk space. Likewise, the "system prune" option +logs don't use up all of your disk space. Likewise, the "system prune" option allows you to ensure unused Docker resources such as old images are cleaned up automatically. The lifecycle of nodes is managed using auto-scaling groups or similar constructs, so that if a node enters an unhealthy state for unforeseen -reasons, the node will be taken out of load balancer rotation and/or replaced -automatically and all of its container tasks will be rescheduled. +reasons, the node is taken out of load balancer rotation and/or replaced +automatically and all of its container tasks are rescheduled. These self-cleaning and self-healing properties are enabled by default and don't need configuration, so you can breathe easier as the risk of downtime is @@ -91,7 +91,7 @@ communicating the current state of your infrastructure and the issues you are seeing to the upstream. In Docker for {{cloudprovider}}, you receive new tools to communicate any issues you experience quickly and securely to Docker employees. The Docker for {{cloudprovider}} shell includes a `docker-diagnose` -script which, at your request, will transmit detailed diagnostic information to +script which, at your request, transmits detailed diagnostic information to Docker support staff to reduce the traditional "please-post-the-output-of-this-command" back and forth frequently encountered in bug reports. diff --git a/apidocs/cloud-api-source/README.md b/apidocs/cloud-api-source/README.md index 658f86b1e41..c636ecdb97c 100644 --- a/apidocs/cloud-api-source/README.md +++ b/apidocs/cloud-api-source/README.md @@ -3,7 +3,7 @@ dockercloud/api-docs [![Deploy to Docker Cloud](https://files.cloud.docker.com/images/deploy-to-dockercloud.svg)](https://cloud.docker.com/stack/deploy/) -If you find a typo or mismatch between the API and this documentation, please send us a pull request! +If you find a typo or mismatch between the API and this documentation, send us a pull request! ## Usage diff --git a/apidocs/cloud-api-source/source/includes/_errors.md b/apidocs/cloud-api-source/source/includes/_errors.md index 12bb1e69252..52615c126d3 100644 --- a/apidocs/cloud-api-source/source/includes/_errors.md +++ b/apidocs/cloud-api-source/source/includes/_errors.md @@ -17,12 +17,12 @@ Error Code | Meaning 400 | Bad Request -- There's a problem in the content of your request. Retrying the same request will fail. 401 | Unauthorized -- Your API key is wrong or your account has been deactivated. 402 | Payment Required -- You need to provide billing information to perform this request. -403 | Forbidden -- Quota limit exceeded. Please contact support to request a quota increase. +403 | Forbidden -- Quota limit exceeded. Contact support to request a quota increase. 404 | Not Found -- The requested object cannot be found. 405 | Method Not Allowed -- The endpoint requested does not implement the method sent. 409 | Conflict -- The object cannot be created or updated because another object exists with the same unique fields 415 | Unsupported Media Type -- Make sure you are using `Accept` and `Content-Type` headers as `application/json` and that the data your are `POST`-ing or `PATCH`-ing is in valid JSON format. 429 | Too Many Requests -- You are being throttled because of too many requests in a short period of time. 500 | Internal Server Error -- There was a server error while processing your request. Try again later, or contact support. -503 | Service Unavailable -- We're temporarily offline for maintenance. Please try again later. -504 | Gateway Timeout -- Our API servers are at full capacity. Please try again later. \ No newline at end of file +503 | Service Unavailable -- We're temporarily offline for maintenance. Try again later. +504 | Gateway Timeout -- Our API servers are at full capacity. Try again later. \ No newline at end of file diff --git a/apidocs/cloud-api-source/source/includes/container.md b/apidocs/cloud-api-source/source/includes/container.md index 3cbb5903577..29fa9ca6b68 100644 --- a/apidocs/cloud-api-source/source/includes/container.md +++ b/apidocs/cloud-api-source/source/includes/container.md @@ -331,7 +331,7 @@ protocol | The protocol of the port, either `tcp` or `udp` inner_port | The published port number inside the container outer_port | The published port number in the node public network interface port_name | Name of the service associated to this port -uri_protocol | The protocol to be used in the endpoint for this port (i.e. `http`) +uri_protocol | The protocol to be used in the endpoint for this port, such as `http` endpoint_uri | The URI of the endpoint for this port published | Whether the port has been published in the host public network interface or not. Non-published ports can only be accessed via links. diff --git a/apidocs/cloud-api-source/source/includes/registry.md b/apidocs/cloud-api-source/source/includes/registry.md index 9c1d8462778..b84c304ff05 100644 --- a/apidocs/cloud-api-source/source/includes/registry.md +++ b/apidocs/cloud-api-source/source/includes/registry.md @@ -26,7 +26,7 @@ Attribute | Description --------- | ----------- resource_uri | A unique API endpoint that represents the registry name | Human-readable name of the registry -host | FQDN of the registry, i.e. `registry-1.docker.io` +host | FQDN of the registry, such as `registry-1.docker.io` is_docker_registry | Whether this registry is run by Docker is_ssl | Whether this registry has SSL activated or not port | The port number where the registry is listening to diff --git a/apidocs/cloud-api-source/source/includes/repository.md b/apidocs/cloud-api-source/source/includes/repository.md index 5aaa29ba320..cf96964ffe2 100644 --- a/apidocs/cloud-api-source/source/includes/repository.md +++ b/apidocs/cloud-api-source/source/includes/repository.md @@ -22,7 +22,7 @@ This is a [namespaced endpoint](#namespaced-endpoints). Attribute | Description --------- | ----------- resource_uri | A unique API endpoint that represents the repository -name | Name of the repository, i.e. `my.registry.com/myrepo` +name | Name of the repository, such as `my.registry.com/myrepo` in_use | If the image is being used by any of your services registry | Resource URI of the registry where this image is hosted @@ -123,7 +123,7 @@ Available in Docker Cloud's **REST API** Parameter | Description --------- | ----------- -name | Name of the repository, i.e. 'my.registry.com/myrepo' +name | Name of the repository, such as 'my.registry.com/myrepo' username | Username to authenticate with the third party registry password | Password to authenticate with the third party registry @@ -258,7 +258,7 @@ repository.Remove() docker-cloud repository rm registry.local/user1/image1 ``` -Removes the external repository from Docker Cloud. It won't remove the repository from the third party registry where it's stored. +Removes the external repository from Docker Cloud. It doesn't remove the repository from the third party registry where it's stored. ### Endpoint Type diff --git a/apidocs/cloud-api-source/source/includes/service.md b/apidocs/cloud-api-source/source/includes/service.md index bbc2fd9bdf7..28f9e6761a3 100644 --- a/apidocs/cloud-api-source/source/includes/service.md +++ b/apidocs/cloud-api-source/source/includes/service.md @@ -299,7 +299,7 @@ Strategy | Description -------- | ----------- EMPTIEST_NODE | It will deploy containers to the node with the lower total amount of running containers (default). HIGH_AVAILABILITY | It will deploy containers to the node with the lower amount of running containers of the same service. -EVERY_NODE | It will deploy one container on every node. The service won't be able to scale manually. New containers will be deployed to new nodes automatically. +EVERY_NODE | It will deploy one container on every node. The service can't scale manually. New containers will be deployed to new nodes automatically. ### Network Modes @@ -408,15 +408,15 @@ Available in Docker Cloud's **REST API** Parameter | Description --------- | ----------- -image | (required) The image used to deploy this service in docker format, i.e. `tutum/hello-world` -name | (optional) A human-readable name for the service, i.e. `my-hello-world-app` (default: `image` without namespace) +image | (required) The image used to deploy this service in docker format, such as `tutum/hello-world` +name | (optional) A human-readable name for the service, such as `my-hello-world-app` (default: `image` without namespace) target_num_containers | (optional) The number of containers to run for this service initially (default: 1) run_command | (optional) The command used to start the containers of this service, overriding the value specified in the image, i.e. `/run.sh` (default: `null`) entrypoint | (optional) The command prefix used to start the containers of this service, overriding the value specified in the image, i.e. `/usr/sbin/sshd` (default: `null`) container_ports | (optional) An array of objects with port information to be published in the containers for this service, which will be added to the image port information, i.e. `[{"protocol": "tcp", "inner_port": 80, "outer_port": 80}]` (default: `[]`) (See table `Service Port attributes` below) container_envvars | (optional) An array of objects with environment variables to be added in the service containers on launch (overriding any image-defined environment variables), i.e. `[{"key": "DB_PASSWORD", "value": "mypass"}]` (default: `[]`) (See table `Service Environment Variable attributes` below) linked_to_service | (optional) An array of service resource URIs to link this service to, including the link name, i.e. `[{"to_service": "/api/app/v1/service/80ff1635-2d56-478d-a97f-9b59c720e513/", "name": "db"}]` (default: `[]`) (See table `Related services attributes` below) -bindings | (optional) An array of bindings this service has to mount, i.e. `[{"volumes_from": "/api/app/v1/service/80ff1635-2d56-478d-a97f-9b59c720e513/", "rewritable": true}]` (default: `[]`) (See table `Related bindings attributes` below) +bindings | (optional) An array of bindings this service mounts, i.e. `[{"volumes_from": "/api/app/v1/service/80ff1635-2d56-478d-a97f-9b59c720e513/", "rewritable": true}]` (default: `[]`) (See table `Related bindings attributes` below) autorestart | (optional) Whether the containers for this service should be restarted if they stop, i.e. `ALWAYS` (default: `OFF`, possible values: `OFF`, `ON_FAILURE`, `ALWAYS`) (see [Crash recovery](/docker-cloud/apps/autorestart/) for more information) autodestroy | (optional) Whether the containers should be terminated if they stop, i.e. `OFF` (default: `OFF`, possible values: `OFF`, `ON_SUCCESS`, `ALWAYS`) (see [Autodestroy](/docker-cloud/apps/auto-destroy/) for more information) sequential_deployment | (optional) Whether the containers should be launched and scaled in sequence, i.e. `true` (default: `false`) (see [Service scaling](/docker-cloud/apps/service-scaling/) for more information) diff --git a/apidocs/cloud-api-source/source/includes/stack.md b/apidocs/cloud-api-source/source/includes/stack.md index 07660d3c75e..b25708a7824 100644 --- a/apidocs/cloud-api-source/source/includes/stack.md +++ b/apidocs/cloud-api-source/source/includes/stack.md @@ -157,7 +157,9 @@ Content-Type: application/json docker-cloud stack create --name hello-world -f docker-compose.yml ``` -Creates a new stack without starting it. Note that the JSON syntax is abstracted by both, the Docker Cloud CLI and our UI, in order to use [Stack YAML files](/docker-cloud/apps/stack-yaml-reference/). +Creates a new stack without starting it. The JSON syntax is abstracted to use +[Stack YAML files](/docker-cloud/apps/stack-yaml-reference/) in both +the Docker Cloud CLI and our UI, ### Endpoint Type @@ -171,7 +173,7 @@ Available in Docker Cloud's **REST API** Parameter | Description --------- | ----------- -name | (required) A human-readable name for the stack, i.e. `my-hello-world-stack` +name | (required) A human-readable name for the stack, such as `my-hello-world-stack` nickname | (optional) A user-friendly name for the stack (`name` by default) services | (optional) List of services belonging to the stack. Each service accepts the same parameters as a [Create new service](#create-a-new-service) operation (default: `[]`) plus the ability to refer "links" and "volumes-from" by the name of another service in the stack (see example). diff --git a/apidocs/cloud-api-source/source/index.md b/apidocs/cloud-api-source/source/index.md index 792d3fe1561..857c6a0dca6 100644 --- a/apidocs/cloud-api-source/source/index.md +++ b/apidocs/cloud-api-source/source/index.md @@ -35,7 +35,13 @@ Docker Cloud currently offers a **HTTP REST API** and a **Websocket Stream API** # Authentication -In order to be able to make requests to the Docker Cloud API, you should first obtain an ApiKey for your account. For this, log into Docker Cloud, click on the menu on the upper right corner of the screen, select **Account info** and then select **API keys**. +To make requests to the Docker Cloud API, you need an ApiKey for your account. +To get one: + +1. Log into Docker Cloud. +2. Click on the menu on the upper right corner of the screen. +3. Select **Account info**. +4. Select **API keys**. ## REST API diff --git a/compose/aspnet-mssql-compose.md b/compose/aspnet-mssql-compose.md index 545c1e2bcac..f74ae285ada 100644 --- a/compose/aspnet-mssql-compose.md +++ b/compose/aspnet-mssql-compose.md @@ -13,10 +13,10 @@ You just need to have [Docker Engine](https://docs.docker.com/engine/installatio and [Docker Compose](https://docs.docker.com/compose/install/) installed on your platform of choice: Linux, Mac or Windows. -For this sample, we will create a sample .NET Core Web Application using the -`aspnetcore-build` Docker image. After that, we will create a `Dockerfile`, +For this sample, we create a sample .NET Core Web Application using the +`aspnetcore-build` Docker image. After that, we create a `Dockerfile`, configure this app to use our SQL Server database, and then create a -`docker-compose.yml` that will define the behavior of all of these components. +`docker-compose.yml` that defines the behavior of all of these components. > **Note**: This sample is made for Docker Engine on Linux. For Windows > Containers, visit @@ -24,10 +24,10 @@ configure this app to use our SQL Server database, and then create a 1. Create a new directory for your application. - This directory will be the context of your docker-compose project. For + This directory is the context of your docker-compose project. For [Docker for Windows](https://docs.docker.com/docker-for-windows/#/shared-drives) and [Docker for Mac](https://docs.docker.com/docker-for-mac/#/file-sharing), you - have to set up file sharing for the volume that you need to map. + need to set up file sharing for the volume that you need to map. 1. Within your directory, use the `aspnetcore-build` Docker image to generate a sample web application within the container under the `/app` directory and @@ -53,17 +53,17 @@ configure this app to use our SQL Server database, and then create a CMD /bin/bash ./entrypoint.sh ``` - This file defines how to build the web app image. It will use the + This file defines how to build the web app image. It uses the [microsoft/aspnetcore-build](https://hub.docker.com/r/microsoft/aspnetcore-build/), map the volume with the generated code, restore the dependencies, build the - project and expose port 80. After that, it will call an `entrypoint` script - that we will create in the next step. + project and expose port 80. After that, it calls an `entrypoint` script + that we create in the next step. 1. The `Dockerfile` makes use of an entrypoint to your webapp Docker image. Create this script in a file called `entrypoint.sh` and paste the contents below. - > **Note**: Make sure to use UNIX line delimiters. The script won't work if + > **Note**: Make sure to use UNIX line delimiters. The script doesn't work if > you use Windows-based delimiters (Carriage return and line feed). ```bash @@ -81,13 +81,13 @@ configure this app to use our SQL Server database, and then create a exec $run_cmd ``` - This script will restore the database after it starts up, and then will run + This script restores the database after it starts up, and then runs the application. This allows some time for the SQL Server database image to start up. 1. Create a `docker-compose.yml` file. Write the following in the file, and make sure to replace the password in the `SA_PASSWORD` environment variable - under `db` below. This file will define the way the images will interact as + under `db` below. This file defines the way the images interact as independent services. > **Note**: The SQL Server container requires a secure password to startup: @@ -145,8 +145,8 @@ configure this app to use our SQL Server database, and then create a } [...] ``` - -1. Go to `app.csproj`. You will find a line like: + +1. Go to `app.csproj`. You see a line like: ``` @@ -158,7 +158,7 @@ configure this app to use our SQL Server database, and then create a ``` ``` - + The Sqlite dependency was at version 1.1.2 at the time of this writing. Use the same version for the SQL Server dependency. @@ -183,7 +183,7 @@ configure this app to use our SQL Server database, and then create a $ docker-compose up ``` - Go ahead and try out the website! This sample will use the SQL Server + Go ahead and try out the website! This sample uses the SQL Server database image in the back-end for authentication. Ready! You now have a ASP.NET Core application running against SQL Server in diff --git a/compose/bundles.md b/compose/bundles.md index fa5dfb64e61..d28910ae883 100644 --- a/compose/bundles.md +++ b/compose/bundles.md @@ -145,7 +145,7 @@ A service has the following fields: Image (required) string
- The image that the service will run. Docker images should be referenced + The image that the service runs. Docker images should be referenced with full content hash to fully specify the deployment artifact for the service. Example: postgres@sha256:e0a230a9f5b4e1b8b03bb3e8cf7322b0e42b7838c5c87f4545edb48f5eb8f077 diff --git a/compose/completion.md b/compose/completion.md index 60680758da3..91b3f567502 100644 --- a/compose/completion.md +++ b/compose/completion.md @@ -36,8 +36,7 @@ fi You can source your `~/.bash_profile` or launch a new terminal to utilize completion. -If you're using MacPorts instead of brew, you'll need to slightly modify your steps to the -following: +If you're using MacPorts instead of brew, use the following steps instead: Run `sudo port install bash-completion` to install bash completion. Add the following lines to `~/.bash_profile`: @@ -53,14 +52,14 @@ completion. ### Zsh -Place the completion script in your `/path/to/zsh/completion`, using e.g. `~/.zsh/completion/`: +Place the completion script in your `/path/to/zsh/completion` (typically `~/.zsh/completion/`): ```shell $ mkdir -p ~/.zsh/completion $ curl -L https://raw.githubusercontent.com/docker/compose/{{site.compose_version}}/contrib/completion/zsh/_docker-compose > ~/.zsh/completion/_docker-compose ``` -Include the directory in your `$fpath`, e.g. by adding in `~/.zshrc`: +Include the directory in your `$fpath` by adding in `~/.zshrc`: ```shell fpath=(~/.zsh/completion $fpath) @@ -80,12 +79,12 @@ exec $SHELL -l ## Available completions -Depending on what you typed on the command line so far, it will complete: +Depending on what you typed on the command line so far, it completes: - available docker-compose commands - options that are available for a particular command - - service names that make sense in a given context (e.g. services with running or stopped instances or services based on images vs. services based on Dockerfiles). For `docker-compose scale`, completed service names will automatically have "=" appended. - - arguments for selected options, e.g. `docker-compose kill -s` will complete some signals like SIGHUP and SIGUSR1. + - service names that make sense in a given context, such as services with running or stopped instances or services based on images vs. services based on Dockerfiles. For `docker-compose scale`, completed service names automatically have "=" appended. + - arguments for selected options. For example, `docker-compose kill -s` completes some signals like SIGHUP and SIGUSR1. Enjoy working with Compose faster and with less typos! diff --git a/compose/compose-file/compose-file-v1.md b/compose/compose-file/compose-file-v1.md index 2ea14cd5f81..23e428e5f70 100644 --- a/compose/compose-file/compose-file-v1.md +++ b/compose/compose-file/compose-file-v1.md @@ -29,12 +29,12 @@ The default path for a Compose file is `./docker-compose.yml`. >**Tip**: You can use either a `.yml` or `.yaml` extension for this file. They both work. -A service definition contains configuration which will be applied to each +A service definition contains configuration which is applied to each container started for that service, much like passing command-line parameters to `docker run`. -As with `docker run`, options specified in the Dockerfile (e.g., `CMD`, -`EXPOSE`, `VOLUME`, `ENV`) are respected by default - you don't need to +As with `docker run`, options specified in the Dockerfile, such as `CMD`, +`EXPOSE`, `VOLUME`, `ENV`, are respected by default - you don't need to specify them again in `docker-compose.yml`. This section contains a list of all configuration options supported by a service @@ -63,7 +63,7 @@ Attempting to do so results in an error. Alternate Dockerfile. -Compose will use an alternate file to build with. A build path must also be +Compose uses an alternate file to build with. A build path must also be specified. build: . @@ -163,10 +163,10 @@ The entrypoint can also be a list, in a manner similar to - memory_limit=-1 - vendor/bin/phpunit -> **Note**: Setting `entrypoint` will both override any default entrypoint set +> **Note**: Setting `entrypoint` both overrides any default entrypoint set > on the service's image with the `ENTRYPOINT` Dockerfile instruction, *and* -> clear out any default command on the image - meaning that if there's a `CMD` -> instruction in the Dockerfile, it will be ignored. +> clears out any default command on the image - meaning that if there's a `CMD` +> instruction in the Dockerfile, it is ignored. ### env_file @@ -186,18 +186,19 @@ these values. - /opt/secrets.env Compose expects each line in an env file to be in `VAR=VAL` format. Lines -beginning with `#` (i.e. comments) are ignored, as are blank lines. +beginning with `#` are processed as comments and are ignored. Blank lines are +also ignored. # Set Rails/Rack environment RACK_ENV=development > **Note**: If your service specifies a [build](#build) option, variables -> defined in environment files will _not_ be automatically visible during the +> defined in environment files are _not_ automatically visible during the > build. The value of `VAL` is used as is and not modified at all. For example if the value is surrounded by quotes (as is often the case of shell variables), the -quotes will be included in the value passed to Compose. +quotes are included in the value passed to Compose. Keep in mind that _the order of files in the list is significant in determining the value assigned to a variable that shows up more than once_. The files in the @@ -228,7 +229,7 @@ and VAR=hello ``` -$VAR will be `hello`. +$VAR is `hello`. ### environment @@ -250,7 +251,7 @@ machine Compose is running on, which can be helpful for secret or host-specific - SESSION_SECRET > **Note**: If your service specifies a [build](#build) option, variables -> defined in `environment` will _not_ be automatically visible during the +> defined in `environment` are _not_ automatically visible during the > build. ### expose @@ -311,7 +312,7 @@ Add hostname mappings. Use the same values as the docker client `--add-host` par - "somehost:162.242.195.82" - "otherhost:50.31.209.229" -An entry with the ip address and hostname will be created in `/etc/hosts` inside containers for this service, e.g: +An entry with the ip address and hostname is created in `/etc/hosts` inside containers for this service, e.g: 162.242.195.82 somehost 50.31.209.229 otherhost @@ -364,7 +365,7 @@ a link alias (`SERVICE:ALIAS`), or just the service name. - db:database - redis -Containers for the linked service will be reachable at a hostname identical to +Containers for the linked service are reachable at a hostname identical to the alias, or the service name if no alias was specified. Links also express dependency between services in the same way as @@ -411,19 +412,19 @@ id. pid: "host" -Sets the PID mode to the host PID mode. This turns on sharing between -container and the host operating system the PID address space. Containers -launched with this flag will be able to access and manipulate other +Sets the PID mode to the host PID mode. This turns on sharing between +container and the host operating system the PID address space. Containers +launched with this flag can access and manipulate other containers in the bare-metal machine's namespace and vise-versa. ### ports Expose ports. Either specify both ports (`HOST:CONTAINER`), or just the container -port (a random host port will be chosen). +port (an ephemeral host port is chosen). > **Note**: When mapping ports in the `HOST:CONTAINER` format, you may experience -> erroneous results when using a container port lower than 60, because YAML will -> parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason, +> erroneous results when using a container port lower than 60, because YAML +> parses numbers in the format `xx:yy` as a base-60 value. For this reason, > we recommend always explicitly specifying your port mappings as strings. ports: @@ -447,7 +448,7 @@ Override the default labeling scheme for each container. ### stop_signal Sets an alternative signal to stop the container. By default `stop` uses -SIGTERM. Setting an alternative signal using `stop_signal` will cause +SIGTERM. Setting an alternative signal using `stop_signal` causes `stop` to send that signal instead. stop_signal: SIGUSR1 @@ -470,10 +471,10 @@ Mount paths or named volumes, optionally specifying a path on the host machine (`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`). For [version 2 files](compose-versioning#version-2), named volumes need to be specified with the [top-level `volumes` key](compose-file-v2.md#volume-configuration-reference). -When using [version 1](compose-versioning#version-1), the Docker Engine will create the named +When using [version 1](compose-versioning#version-1), the Docker Engine creates the named volume automatically if it doesn't exist. -You can mount a relative path on the host, which will expand relative to +You can mount a relative path on the host, which expands relative to the directory of the Compose configuration file being used. Relative paths should always begin with `.` or `..`. @@ -501,11 +502,11 @@ There are several things to note, depending on which [Compose file version](compose-versioning#versioning) you're using: - For [version 1 files](compose-versioning#version-1), both named volumes and - container volumes will use the specified driver. + container volumes use the specified driver. -- No path expansion will be done if you have also specified a `volume_driver`. +- No path expansion is done if you have also specified a `volume_driver`. For example, if you specify a mapping of `./foo:/data`, the `./foo` part - will be passed straight to the volume driver without being expanded. + is passed straight to the volume driver without being expanded. See [Docker Volumes](/engine/userguide/dockervolumes.md) and [Volume Plugins](/engine/extend/plugins_volume.md) for more information. @@ -514,7 +515,7 @@ See [Docker Volumes](/engine/userguide/dockervolumes.md) and Mount all of the volumes from another service or container, optionally specifying read-only access (``ro``) or read-write (``rw``). If no access level -is specified, then read-write will be used. +is specified, then read-write is used. volumes_from: - service_name diff --git a/compose/compose-file/compose-file-v2.md b/compose/compose-file/compose-file-v2.md index 1299f5e2137..9f192e7591a 100644 --- a/compose/compose-file/compose-file-v2.md +++ b/compose/compose-file/compose-file-v2.md @@ -30,13 +30,13 @@ The default path for a Compose file is `./docker-compose.yml`. >**Tip**: You can use either a `.yml` or `.yaml` extension for this file. They both work. -A [container](/engine/reference/glossary.md#container) definition contains configuration which will be applied to each +A [container](/engine/reference/glossary.md#container) definition contains configuration which are applied to each container started for that service, much like passing command-line parameters to `docker run`. Likewise, network and volume definitions are analogous to `docker network create` and `docker volume create`. -As with `docker run`, options specified in the Dockerfile (e.g., `CMD`, -`EXPOSE`, `VOLUME`, `ENV`) are respected by default - you don't need to +As with `docker run`, options specified in the Dockerfile, such as `CMD`, +`EXPOSE`, `VOLUME`, `ENV`, are respected by default - you don't need to specify them again in `docker-compose.yml`. You can use environment variables in configuration values with a Bash-like @@ -126,7 +126,7 @@ with the `webapp` and optional `tag` specified in `image`: build: ./dir image: webapp:tag -This will result in an image named `webapp` and tagged `tag`, built from `./dir`. +This results in an image named `webapp` and tagged `tag`, built from `./dir`. #### context @@ -139,7 +139,7 @@ When the value supplied is a relative path, it is interpreted as relative to the location of the Compose file. This directory is also the build context that is sent to the Docker daemon. -Compose will build and tag it with a generated name, and use that image thereafter. +Compose builds and tags it with a generated name, and use that image thereafter. build: context: ./dir @@ -148,7 +148,7 @@ Compose will build and tag it with a generated name, and use that image thereaft Alternate Dockerfile. -Compose will use an alternate file to build with. A build path must also be +Compose uses an alternate file to build with. A build path must also be specified. build: @@ -203,7 +203,7 @@ Add hostname mappings at build-time. Use the same values as the docker client `- - "somehost:162.242.195.82" - "otherhost:50.31.209.229" -An entry with the ip address and hostname will be created in `/etc/hosts` inside containers for this build, e.g: +An entry with the ip address and hostname is created in `/etc/hosts` inside containers for this build, e.g: 162.242.195.82 somehost 50.31.209.229 otherhost @@ -237,7 +237,7 @@ those used by other software. > Added in [version 2.2](compose-versioning.md#version-22) file format -Set the network containers will connect to for the `RUN` instructions during +Set the network containers connect to for the `RUN` instructions during build. build: @@ -332,11 +332,11 @@ client create option. Express dependency between services, which has two effects: -- `docker-compose up` will start services in dependency order. In the following - example, `db` and `redis` will be started before `web`. +- `docker-compose up` starts services in dependency order. In the following + example, `db` and `redis` are started before `web`. -- `docker-compose up SERVICE` will automatically include `SERVICE`'s - dependencies. In the following example, `docker-compose up web` will also +- `docker-compose up SERVICE` automatically include `SERVICE`'s + dependencies. In the following example, `docker-compose up web` also create and start `db` and `redis`. Simple example: @@ -353,7 +353,7 @@ Simple example: db: image: postgres -> **Note**: `depends_on` will not wait for `db` and `redis` to be "ready" before +> **Note**: `depends_on` does not wait for `db` and `redis` to be "ready" before > starting `web` - only until they have been started. If you need to wait > for a service to be ready, see [Controlling startup order](/compose/startup-order.md) > for more on this problem and strategies for solving it. @@ -361,8 +361,8 @@ Simple example: > [Added in version 2.1 file format](compose-versioning.md#version-21). A healthcheck indicates that you want a dependency to wait -for another container to be "healthy" (i.e. its healthcheck advertises a -successful state) before starting. +for another container to be "healthy" (as indicated by a successful state from +the healthcheck) before starting. Example: @@ -382,7 +382,7 @@ Example: healthcheck: test: "exit 0" -In the above example, Compose will wait for the `redis` service to be started +In the above example, Compose waits for the `redis` service to be started (legacy behavior) and the `db` service to be healthy before starting `web`. See the [healthcheck section](#healthcheck) for complementary @@ -440,10 +440,10 @@ The entrypoint can also be a list, in a manner similar to - memory_limit=-1 - vendor/bin/phpunit -> **Note**: Setting `entrypoint` will both override any default entrypoint set +> **Note**: Setting `entrypoint` both overrides any default entrypoint set > on the service's image with the `ENTRYPOINT` Dockerfile instruction, *and* -> clear out any default command on the image - meaning that if there's a `CMD` -> instruction in the Dockerfile, it will be ignored. +> clears out any default command on the image - meaning that if there's a `CMD` +> instruction in the Dockerfile, it is ignored. ### env_file @@ -464,19 +464,20 @@ empty or undefined. - /opt/secrets.env Compose expects each line in an env file to be in `VAR=VAL` format. Lines -beginning with `#` (i.e. comments) are ignored, as are blank lines. +beginning with `#` are processed as comments and are ignored. Blank lines are +also ignored. # Set Rails/Rack environment RACK_ENV=development > **Note**: If your service specifies a [build](#build) option, variables -> defined in environment files will _not_ be automatically visible during the +> defined in environment files are _not_ automatically visible during the > build. Use the [args](#args) sub-option of `build` to define build-time > environment variables. The value of `VAL` is used as is and not modified at all. For example if the value is surrounded by quotes (as is often the case of shell variables), the -quotes will be included in the value passed to Compose. +quotes are included in the value passed to Compose. Keep in mind that _the order of files in the list is significant in determining the value assigned to a variable that shows up more than once_. The files in the @@ -507,7 +508,7 @@ and VAR=hello ``` -$VAR will be `hello`. +$VAR is `hello`. ### environment @@ -529,7 +530,7 @@ machine Compose is running on, which can be helpful for secret or host-specific - SESSION_SECRET > **Note**: If your service specifies a [build](#build) option, variables -> defined in `environment` will _not_ be automatically visible during the +> defined in `environment` are _not_ automatically visible during the > build. Use the [args](#args) sub-option of `build` to define build-time > environment variables. @@ -595,7 +596,7 @@ Add hostname mappings. Use the same values as the docker client `--add-host` par - "somehost:162.242.195.82" - "otherhost:50.31.209.229" -An entry with the ip address and hostname will be created in `/etc/hosts` inside containers for this service, e.g: +An entry with the ip address and hostname is created in `/etc/hosts` inside containers for this service, e.g: 162.242.195.82 somehost 50.31.209.229 otherhost @@ -603,7 +604,7 @@ An entry with the ip address and hostname will be created in `/etc/hosts` inside ### group_add Specify additional groups (by name or number) which the user inside the -container will be a member of. Groups must exist in both the container and the +container should be a member of. Groups must exist in both the container and the host system to be added. An example of where this is useful is when multiple containers (running as different users) need to all read or write the same file on the host system. That file can be owned by a group shared by all the @@ -622,7 +623,7 @@ services: - mail ``` -Running `id` inside the created container will show that the user belongs to +Running `id` inside the created container shows that the user belongs to the `mail` group, which would not have been the case if `group_add` were not used. @@ -741,7 +742,7 @@ a link alias (`"SERVICE:ALIAS"`), or just the service name. - "db:database" - "redis" -Containers for the linked service will be reachable at a hostname identical to +Containers for the linked service are reachable at a hostname identical to the alias, or the service name if no alias was specified. Links also express dependency between services in the same way as @@ -772,7 +773,7 @@ The default value is json-file. driver: "none" > **Note**: Only the `json-file` and `journald` drivers make the logs available directly from -> `docker-compose up` and `docker-compose logs`. Using any other driver will not +> `docker-compose up` and `docker-compose logs`. Using any other driver does not > print any logs. Specify logging options for the logging driver with the ``options`` key, as with the ``--log-opt`` option for `docker run`. @@ -815,7 +816,7 @@ Aliases (alternative hostnames) for this service on the network. Other container Since `aliases` is network-scoped, the same service can have different aliases on different networks. -> **Note**: A network-wide alias can be shared by multiple containers, and even by multiple services. If it is, then exactly which container the name will resolve to is not guaranteed. +> **Note**: A network-wide alias can be shared by multiple containers, and even by multiple services. If it is, then exactly which container the name resolves to is not guaranteed. The general format is shown here. @@ -922,12 +923,12 @@ Example usage: pid: "service:foobar" If set to one of the following forms: `container:`, -`service:`, the service will share the PID address space of the +`service:`, the service shares the PID address space of the designated container or service. -If set to "host", the service's PID mode will be the host PID mode. This turns +If set to "host", the service's PID mode is the host PID mode. This turns on sharing between container and the host operating system the PID address -space. Containers launched with this flag will be able to access and manipulate +space. Containers launched with this flag can access and manipulate other containers in the bare-metal machine's namespace and vise-versa. > **Note**: the `service:` and `container:` forms require @@ -945,11 +946,11 @@ Tunes a container's PIDs limit. Set to `-1` for unlimited PIDs. ### ports Expose ports. Either specify both ports (`HOST:CONTAINER`), or just the container -port (a random host port will be chosen). +port (an ephemeral host port is chosen). > **Note**: When mapping ports in the `HOST:CONTAINER` format, you may experience -> erroneous results when using a container port lower than 60, because YAML will -> parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason, +> erroneous results when using a container port lower than 60, because YAML +> parses numbers in the format `xx:yy` as a base-60 value. For this reason, > we recommend always explicitly specifying your port mappings as strings. ports: @@ -968,7 +969,7 @@ port (a random host port will be chosen). > [Added in version 2.2 file format](compose-versioning.md#version-22) Specify the default number of containers to deploy for this service. Whenever -you run `docker-compose up`, Compose will create or remove containers to match +you run `docker-compose up`, Compose creates or removes containers to match the specified number. This value can be overridden using the [`--scale`](/compose/reference/up.md) flag. @@ -1001,7 +1002,7 @@ SIGKILL. ### stop_signal Sets an alternative signal to stop the container. By default `stop` uses -SIGTERM. Setting an alternative signal using `stop_signal` will cause +SIGTERM. Setting an alternative signal using `stop_signal` causes `stop` to send that signal instead. stop_signal: SIGUSR1 @@ -1057,7 +1058,7 @@ more information. Mount host folders or named volumes. Named volumes need to be specified with the [top-level `volumes` key](#volume-configuration-reference). -You can mount a relative path on the host, which will expand relative to +You can mount a relative path on the host, which expands relative to the directory of the Compose configuration file being used. Relative paths should always begin with `.` or `..`. @@ -1065,7 +1066,7 @@ should always begin with `.` or `..`. The short syntax uses the generic `[SOURCE:]TARGET[:MODE]` format, where `SOURCE` can be either a host path or volume name. `TARGET` is the container -path where the volume will be mounted. Standard modes are `ro` for read-only +path where the volume is mounted. Standard modes are `ro` for read-only and `rw` for read-write (default). volumes: @@ -1095,7 +1096,7 @@ expressed in the short form. - `source`: the source of the mount, a path on the host for a bind mount, or the name of a volume defined in the [top-level `volumes` key](#volume-configuration-reference). Not applicable for a tmpfs mount. -- `target`: the path in the container where the volume will be mounted +- `target`: the path in the container where the volume is mounted - `read_only`: flag to set the volume as read-only - `bind`: configure additional bind options - `propagation`: the propagation mode used for the bind @@ -1129,8 +1130,8 @@ volumes: ``` > **Note:** When creating bind mounts, using the long syntax requires the -> referenced folder to be created beforehand. Using the short syntax will -> create the folder on the fly if it doesn't exist. +> referenced folder to be created beforehand. Using the short syntax +> creates the folder on the fly if it doesn't exist. > See the [bind mounts documentation](/engine/admin/volumes/bind-mounts.md/#differences-between--v-and---mount-behavior) > for more information. @@ -1142,7 +1143,7 @@ service. volume_driver: mydriver > **Note:** In [version 2 files](compose-versioning.md#version-2), this -> option will only apply to anonymous volumes (those specified in the image, +> option only applies to anonymous volumes (those specified in the image, > or specified under `volumes` without an explicit named volume or host path). > To configure the driver for a named volume, use the `driver` key under the > entry in the [top-level `volumes` option](#volume-configuration-reference). @@ -1155,7 +1156,7 @@ See [Docker Volumes](/engine/userguide/dockervolumes.md) and Mount all of the volumes from another service or container, optionally specifying read-only access (``ro``) or read-write (``rw``). If no access level is specified, -then read-write will be used. +then read-write is used. volumes_from: - service_name @@ -1178,7 +1179,7 @@ then read-write will be used. ### restart -`no` is the default restart policy, and it will not restart a container under any circumstance. When `always` is specified, the container always restarts. The `on-failure` policy restarts a container if the exit code indicates an on-failure error. +`no` is the default restart policy, and it does restart a container under any circumstance. When `always` is specified, the container always restarts. The `on-failure` policy restarts a container if the exit code indicates an on-failure error. - restart: no - restart: always @@ -1253,7 +1254,7 @@ that looks like this: 1gb The supported units are `b`, `k`, `m` and `g`, and their alternative notation `kb`, -`mb` and `gb`. Please note that decimal values are not supported at this time. +`mb` and `gb`. Decimal values are not supported at this time. ## Volume configuration reference @@ -1283,15 +1284,15 @@ up: volumes: data-volume: -An entry under the top-level `volumes` key can be empty, in which case it will -use the default driver configured by the Engine (in most cases, this is the +An entry under the top-level `volumes` key can be empty, in which case it +uses the default driver configured by the Engine (in most cases, this is the `local` driver). Optionally, you can configure it with the following keys: ### driver Specify which volume driver should be used for this volume. Defaults to whatever driver the Docker Engine has been configured to use, which in most cases is -`local`. If the driver is not available, the Engine will return an error when +`local`. If the driver is not available, the Engine returns an error when `docker-compose up` tries to create the volume. driver: foobar @@ -1309,14 +1310,14 @@ documentation for more information. Optional. ### external If set to `true`, specifies that this volume has been created outside of -Compose. `docker-compose up` will not attempt to create it, and will raise +Compose. `docker-compose up` does not attempt to create it, and raises an error if it doesn't exist. `external` cannot be used in conjunction with other volume configuration keys (`driver`, `driver_opts`). In the example below, instead of attempting to create a volume called -`[projectname]_data`, Compose will look for an existing volume simply +`[projectname]_data`, Compose looks for an existing volume simply called `data` and mount it into the `db` service's containers. version: '2' @@ -1394,10 +1395,10 @@ explanation of Compose's use of Docker networking features, see the Specify which driver should be used for this network. The default driver depends on how the Docker Engine you're using is configured, -but in most instances it will be `bridge` on a single host and `overlay` on a +but in most instances it is `bridge` on a single host and `overlay` on a Swarm. -The Docker Engine will return an error if the driver is not available. +The Docker Engine returns an error if the driver is not available. driver: overlay @@ -1478,15 +1479,15 @@ conflicting with those used by other software. ### external If set to `true`, specifies that this network has been created outside of -Compose. `docker-compose up` will not attempt to create it, and will raise +Compose. `docker-compose up` does not attempt to create it, and raises an error if it doesn't exist. `external` cannot be used in conjunction with other network configuration keys (`driver`, `driver_opts`, `group_add`, `ipam`, `internal`). In the example below, `proxy` is the gateway to the outside world. Instead of -attempting to create a network called `[projectname]_outside`, Compose will -look for an existing network simply called `outside` and connect the `proxy` +attempting to create a network called `[projectname]_outside`, Compose +looks for an existing network simply called `outside` and connect the `proxy` service's containers to it. version: '2' diff --git a/compose/compose-file/compose-versioning.md b/compose/compose-file/compose-versioning.md index 8cddea9aa55..a423b09d7d2 100644 --- a/compose/compose-file/compose-versioning.md +++ b/compose/compose-file/compose-versioning.md @@ -28,7 +28,7 @@ There are several versions of the Compose file format – 1, 2, 2.x, and 3.x > > We recommend keeping up-to-date with newer releases as much as possible. However, if you are using an older version of Docker and want to determine which -Compose release is compatible, please refer to the [Compose release +Compose release is compatible, refer to the [Compose release notes](https://github.com/docker/compose/releases/). Each set of release notes gives details on which versions of Docker Engine are supported, along with compatible Compose file format versions. (See also, the discussion in @@ -87,7 +87,7 @@ Version 1 files cannot declare named Compose does not take advantage of [networking](/compose/networking.md) when you use version 1: every container is placed on the default `bridge` network and is -reachable from every other container at its IP address. You will need to use +reachable from every other container at its IP address. You need to use [links](compose-file-v1.md#links) to enable discovery between containers. Example: @@ -238,7 +238,7 @@ several more. - Removed: `volume_driver`, `volumes_from`, `cpu_shares`, `cpu_quota`, `cpuset`, `mem_limit`, `memswap_limit`, `extends`, `group_add`. See the [upgrading](#upgrading) guide for how to migrate away from these. -(For more information on `extends`, please see [Extending services](/compose/extends.md#extending-services).) +(For more information on `extends`, see [Extending services](/compose/extends.md#extending-services).) - Added: [deploy](/compose/compose-file/index.md#deploy) @@ -306,11 +306,11 @@ several options have been removed: - `cpu_shares`, `cpu_quota`, `cpuset`, `mem_limit`, `memswap_limit`: These have been replaced by the [resources](/compose/compose-file/index.md#resources) key under - `deploy`. Note that `deploy` configuration only takes effect when using + `deploy`. `deploy` configuration only takes effect when using `docker stack deploy`, and is ignored by `docker-compose`. - `extends`: This option has been removed for `version: "3.x"` -Compose files. (For more information, please see [Extending services](/compose/extends.md#extending-services).) +Compose files. (For more information, see [Extending services](/compose/extends.md#extending-services).) - `group_add`: This option has been removed for `version: "3.x"` Compose files. - `pids_limit`: This option has not been introduced in `version: "3.x"` Compose files. - `link_local_ips` in `networks`: This option has not been introduced in diff --git a/compose/compose-file/index.md b/compose/compose-file/index.md index 29c6811f000..32fae0c0bee 100644 --- a/compose/compose-file/index.md +++ b/compose/compose-file/index.md @@ -158,13 +158,13 @@ The default path for a Compose file is `./docker-compose.yml`. >**Tip**: You can use either a `.yml` or `.yaml` extension for this file. They both work. -A service definition contains configuration which will be applied to each +A service definition contains configuration that is applied to each container started for that service, much like passing command-line parameters to `docker container create`. Likewise, network and volume definitions are analogous to `docker network create` and `docker volume create`. -As with `docker container create`, options specified in the Dockerfile (e.g., `CMD`, -`EXPOSE`, `VOLUME`, `ENV`) are respected by default - you don't need to +As with `docker container create`, options specified in the Dockerfile, such as `CMD`, +`EXPOSE`, `VOLUME`, `ENV`, are respected by default - you don't need to specify them again in `docker-compose.yml`. You can use environment variables in configuration values with a Bash-like @@ -208,7 +208,7 @@ with the `webapp` and optional `tag` specified in `image`: build: ./dir image: webapp:tag -This will result in an image named `webapp` and tagged `tag`, built from `./dir`. +This results in an image named `webapp` and tagged `tag`, built from `./dir`. > **Note**: This option is ignored when > [deploying a stack in swarm mode](/engine/reference/commandline/stack_deploy.md) @@ -222,7 +222,7 @@ When the value supplied is a relative path, it is interpreted as relative to the location of the Compose file. This directory is also the build context that is sent to the Docker daemon. -Compose will build and tag it with a generated name, and use that image +Compose builds and tags it with a generated name, and use that image thereafter. build: @@ -232,7 +232,7 @@ thereafter. Alternate Dockerfile. -Compose will use an alternate file to build with. A build path must also be +Compose uses an alternate file to build with. A build path must also be specified. build: @@ -281,7 +281,7 @@ at build time is the value in the environment where Compose is running. > **Note:** This option is new in v3.2 -A list of images that the engine will use for cache resolution. +A list of images that the engine uses for cache resolution. build: context: . @@ -365,7 +365,7 @@ configuration. Two different syntax variants are supported. > **Note**: The config must already exist or be > [defined in the top-level `configs` configuration](#configs-configuration-reference) -> of this stack file, or stack deployment will fail. +> of this stack file, or stack deployment fails. For more information on configs, see [configs](/engine/swarm/configs.md). @@ -410,12 +410,12 @@ The long syntax provides more granularity in how the config is created within the service's task containers. - `source`: The name of the config as it exists in Docker. -- `target`: The path and name of the file that will be mounted in the service's +- `target`: The path and name of the file to be mounted in the service's task containers. Defaults to `/` if not specified. -- `uid` and `gid`: The numeric UID or GID which will own the mounted config file +- `uid` and `gid`: The numeric UID or GID that owns the mounted config file within in the service's task containers. Both default to `0` on Linux if not specified. Not supported on Windows. -- `mode`: The permissions for the file that will be mounted within the service's +- `mode`: The permissions for the file that is mounted within the service's task containers, in octal notation. For instance, `0444` represents world-readable. The default is `0444`. Configs cannot be writable because they are mounted in a temporary filesystem, so if you set the writable @@ -533,8 +533,8 @@ Specify a service discovery method for external clients connecting to a swarm. > **[Version 3.3](compose-versioning.md#version-3) only.** -* `endpoint_mode: vip` - Docker assigns the service a virtual IP (VIP), -which acts as the “front end” for clients to reach the service on a +* `endpoint_mode: vip` - Docker assigns the service a virtual IP (VIP) +that acts as the “front end” for clients to reach the service on a network. Docker routes requests between the client and available worker nodes for the service, without client knowledge of how many nodes are participating in the service or their IP addresses or ports. @@ -593,7 +593,7 @@ mode topics. #### labels -Specify labels for the service. These labels will *only* be set on the service, +Specify labels for the service. These labels are *only* set on the service, and *not* on any containers for the service. version: "3" @@ -703,7 +703,7 @@ services or containers in a swarm. on non swarm deployments, use [Compose file format version 2 CPU, memory, and other resource options](compose-file-v2.md#cpu-and-other-resources). -If you have further questions, please refer to the discussion on the GitHub +If you have further questions, refer to the discussion on the GitHub issue [docker/compose/4513](https://github.com/docker/compose/issues/4513){: target="_blank" class="_"}. {: .important} @@ -755,7 +755,7 @@ updates. (default: `pause`). - `monitor`: Duration after each task update to monitor for failure `(ns|us|ms|s|m|h)` (default 0s). - `max_failure_ratio`: Failure rate to tolerate during an update. -- `order`: Order of operations during updates. One of `stop-first` (old task is stopped before starting new one), or `start-first` (new task is started first, and the running tasks will briefly overlap) (default `stop-first`) **Note**: Only supported for v3.4 and higher. +- `order`: Order of operations during updates. One of `stop-first` (old task is stopped before starting new one), or `start-first` (new task is started first, and the running tasks briefly overlap) (default `stop-first`) **Note**: Only supported for v3.4 and higher. > **Note**: `order` is only supported for v3.4 and higher of the compose file format. @@ -793,10 +793,10 @@ The following sub-options (supported for `docker compose up` and `docker compose - [sysctls](#sysctls) - [userns_mode](#userns_mode) ->**Tip:** See also, the section on [how to configure volumes +>**Tip:** See the section on [how to configure volumes for services, swarms, and docker-stack.yml -files](#volumes-for-services-swarms-and-stack-files). Volumes _are_ supported -but in order to work with swarms and services, they must be configured properly, +files](#volumes-for-services-swarms-and-stack-files). Volumes _are_ supported +but to work with swarms and services, they must be configured as named volumes or associated with services that are constrained to nodes with access to the requisite volumes. @@ -814,14 +814,15 @@ client create option. ### depends_on -Express dependency between services, which has two effects: +Express dependency between services, Service dependencies cause the following +behaviors: -- `docker-compose up` will start services in dependency order. In the following - example, `db` and `redis` will be started before `web`. +- `docker-compose up` starts services in dependency order. In the following + example, `db` and `redis` is started before `web`. -- `docker-compose up SERVICE` will automatically include `SERVICE`'s - dependencies. In the following example, `docker-compose up web` will also - create and start `db` and `redis`. +- `docker-compose up SERVICE` automatically includes `SERVICE`'s + dependencies. In the following example, `docker-compose up web` also + creates and starts `db` and `redis`. Simple example: @@ -839,7 +840,7 @@ Simple example: > There are several things to be aware of when using `depends_on`: > -> - `depends_on` will not wait for `db` and `redis` to be "ready" before +> - `depends_on` does not wait for `db` and `redis` to be "ready" before > starting `web` - only until they have been started. If you need to wait > for a service to be ready, see [Controlling startup order](/compose/startup-order.md) > for more on this problem and strategies for solving it. @@ -901,10 +902,10 @@ The entrypoint can also be a list, in a manner similar to - memory_limit=-1 - vendor/bin/phpunit -> **Note**: Setting `entrypoint` will both override any default entrypoint set +> **Note**: Setting `entrypoint` both overrides any default entrypoint set > on the service's image with the `ENTRYPOINT` Dockerfile instruction, *and* -> clear out any default command on the image - meaning that if there's a `CMD` -> instruction in the Dockerfile, it will be ignored. +> clears out any default command on the image - meaning that if there's a `CMD` +> instruction in the Dockerfile, it is ignored. ### env_file @@ -925,19 +926,20 @@ empty or undefined. - /opt/secrets.env Compose expects each line in an env file to be in `VAR=VAL` format. Lines -beginning with `#` (i.e. comments) are ignored, as are blank lines. +beginning with `#` are treated as comments and are ignored. Blank lines are +also ignored. # Set Rails/Rack environment RACK_ENV=development > **Note**: If your service specifies a [build](#build) option, variables -> defined in environment files will _not_ be automatically visible during the +> defined in environment files are _not_ automatically visible during the > build. Use the [args](#args) sub-option of `build` to define build-time > environment variables. The value of `VAL` is used as is and not modified at all. For example if the value is surrounded by quotes (as is often the case of shell variables), the -quotes will be included in the value passed to Compose. +quotes are included in the value passed to Compose. Keep in mind that _the order of files in the list is significant in determining the value assigned to a variable that shows up more than once_. The files in the @@ -968,7 +970,7 @@ and VAR=hello ``` -$VAR will be `hello`. +$VAR is `hello`. ### environment @@ -990,7 +992,7 @@ machine Compose is running on, which can be helpful for secret or host-specific - SESSION_SECRET > **Note**: If your service specifies a [build](#build) option, variables -> defined in `environment` will _not_ be automatically visible during the +> defined in `environment` are _not_ automatically visible during the > build. Use the [args](#args) sub-option of `build` to define build-time > environment variables. @@ -1018,7 +1020,7 @@ specifying both the container name and the link alias (`CONTAINER:ALIAS`). > **Notes:** > > If you're using the [version 2 or above file format](compose-versioning.md#version-2), the externally-created containers -must be connected to at least one of the same networks as the service which is +must be connected to at least one of the same networks as the service that is linking to them. [Links](compose-file-v2#links) are a legacy option. We recommend using [networks](#networks) instead. > @@ -1033,7 +1035,7 @@ Add hostname mappings. Use the same values as the docker client `--add-host` par - "somehost:162.242.195.82" - "otherhost:50.31.209.229" -An entry with the ip address and hostname will be created in `/etc/hosts` inside containers for this service, e.g: +An entry with the ip address and hostname is created in `/etc/hosts` inside containers for this service, e.g: 162.242.195.82 somehost 50.31.209.229 otherhost @@ -1136,7 +1138,7 @@ a link alias (`SERVICE:ALIAS`), or just the service name. - db:database - redis -Containers for the linked service will be reachable at a hostname identical to +Containers for the linked service are reachable at a hostname identical to the alias, or the service name if no alias was specified. Links are not required to enable services to communicate - by default, @@ -1149,7 +1151,7 @@ Links also express dependency between services in the same way as > **Notes** > > * If you define both links and [networks](#networks), services with -> links between them must share at least one network in common in order to +> links between them must share at least one network in common to > communicate. > > * This option is ignored when @@ -1177,7 +1179,7 @@ The default value is json-file. > **Note**: Only the `json-file` and `journald` drivers make the logs available directly from `docker-compose up` and `docker-compose logs`. -Using any other driver will not print any logs. +Using any other driver does not print any logs. Specify logging options for the logging driver with the ``options`` key, as with the ``--log-opt`` option for `docker run`. @@ -1254,7 +1256,7 @@ Aliases (alternative hostnames) for this service on the network. Other container Since `aliases` is network-scoped, the same service can have different aliases on different networks. -> **Note**: A network-wide alias can be shared by multiple containers, and even by multiple services. If it is, then exactly which container the name will resolve to is not guaranteed. +> **Note**: A network-wide alias can be shared by multiple containers, and even by multiple services. If it is, then exactly which container the name resolves to is not guaranteed. The general format is shown here. @@ -1346,7 +1348,7 @@ networks: Sets the PID mode to the host PID mode. This turns on sharing between container and the host operating system the PID address space. Containers -launched with this flag will be able to access and manipulate other +launched with this flag can access and manipulate other containers in the bare-metal machine's namespace and vise-versa. ### ports @@ -1356,11 +1358,11 @@ Expose ports. #### Short syntax Either specify both ports (`HOST:CONTAINER`), or just the container -port (a random host port will be chosen). +port (an ephemeral host port is chosen). > **Note**: When mapping ports in the `HOST:CONTAINER` format, you may experience -> erroneous results when using a container port lower than 60, because YAML will -> parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason, +> erroneous results when using a container port lower than 60, because YAML +> parses numbers in the format `xx:yy` as a base-60 value. For this reason, > we recommend always explicitly specifying your port mappings as strings. ports: @@ -1382,7 +1384,7 @@ expressed in the short form. - `published`: the publicly exposed port - `protocol`: the port protocol (`tcp` or `udp`) - `mode`: `host` for publishing a host port on each node, or `ingress` for a swarm - mode port which will be load balanced. + mode port to be load balanced. ```none ports: @@ -1402,7 +1404,7 @@ configuration. Two different syntax variants are supported. > **Note**: The secret must already exist or be > [defined in the top-level `secrets` configuration](#secrets-configuration-reference) -> of this stack file, or stack deployment will fail. +> of this stack file, or stack deployment fails. For more information on secrets, see [secrets](/engine/swarm/secrets.md). @@ -1444,15 +1446,15 @@ The long syntax provides more granularity in how the secret is created within the service's task containers. - `source`: The name of the secret as it exists in Docker. -- `target`: The name of the file that will be mounted in `/run/secrets/` in the +- `target`: The name of the file to be mounted in `/run/secrets/` in the service's task containers. Defaults to `source` if not specified. -- `uid` and `gid`: The numeric UID or GID which will own the file within +- `uid` and `gid`: The numeric UID or GID that owns the file within `/run/secrets/` in the service's task containers. Both default to `0` if not specified. -- `mode`: The permissions for the file that will be mounted in `/run/secrets/` +- `mode`: The permissions for the file to be mounted in `/run/secrets/` in the service's task containers, in octal notation. For instance, `0444` - represents world-readable. The default in Docker 1.13.1 is `0000`, but will - be `0444` in the future. Secrets cannot be writable because they are mounted + represents world-readable. The default in Docker 1.13.1 is `0000`, but is + be `0444` in newer versions. Secrets cannot be writable because they are mounted in a temporary filesystem, so if you set the writable bit, it is ignored. The executable bit can be set. If you aren't familiar with UNIX file permission modes, you may find this @@ -1515,7 +1517,7 @@ SIGKILL. ### stop_signal Sets an alternative signal to stop the container. By default `stop` uses -SIGTERM. Setting an alternative signal using `stop_signal` will cause +SIGTERM. Setting an alternative signal using `stop_signal` causes `stop` to send that signal instead. stop_signal: SIGUSR1 @@ -1624,7 +1626,7 @@ volumes: Optionally specify a path on the host machine (`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`). -You can mount a relative path on the host, which will expand relative to +You can mount a relative path on the host, that expands relative to the directory of the Compose configuration file being used. Relative paths should always begin with `.` or `..`. @@ -1654,7 +1656,7 @@ expressed in the short form. - `source`: the source of the mount, a path on the host for a bind mount, or the name of a volume defined in the [top-level `volumes` key](#volume-configuration-reference). Not applicable for a tmpfs mount. -- `target`: the path in the container where the volume will be mounted +- `target`: the path in the container where the volume is mounted - `read_only`: flag to set the volume as read-only - `bind`: configure additional bind options - `propagation`: the propagation mode used for the bind @@ -1694,7 +1696,7 @@ volumes: When working with services, swarms, and `docker-stack.yml` files, keep in mind that the tasks (containers) backing a service can be deployed on any node in a -swarm, which may be a different node each time the service is updated. +swarm, and this may be a different node each time the service is updated. In the absence of having named volumes with specified sources, Docker creates an anonymous volume for each task backing a service. Anonymous volumes do not @@ -1708,7 +1710,7 @@ volume present. As an example, the `docker-stack.yml` file for the [votingapp sample in Docker Labs](https://github.com/docker/labs/blob/master/beginner/chapters/votingapp.md) defines a service called `db` that runs a `postgres` database. It is -configured as a named volume in order to persist the data on the swarm, +configured as a named volume to persist the data on the swarm, _and_ is constrained to run only on `manager` nodes. Here is the relevant snip-it from that file: ```none @@ -1764,7 +1766,7 @@ volume mounts (shared filesystems)](/docker-for-mac/osxfs-caching.md). ### restart -`no` is the default restart policy, and it will not restart a container under +`no` is the default restart policy, and it does not restart a container under any circumstance. When `always` is specified, the container always restarts. The `on-failure` policy restarts a container if the exit code indicates an on-failure error. @@ -1773,7 +1775,7 @@ on-failure error. restart: always restart: on-failure restart: unless-stopped - + > **Note**: This option is ignored when > [deploying a stack in swarm mode](/engine/reference/commandline/stack_deploy.md) > with a (version 3) Compose file. Use [restart_policy](#restart_policy) instead. @@ -1828,7 +1830,7 @@ that looks like this: 1gb The supported units are `b`, `k`, `m` and `g`, and their alternative notation `kb`, -`mb` and `gb`. Please note that decimal values are not supported at this time. +`mb` and `gb`. Decimal values are not supported at this time. ## Volume configuration reference @@ -1862,15 +1864,15 @@ up: volumes: data-volume: -An entry under the top-level `volumes` key can be empty, in which case it will -use the default driver configured by the Engine (in most cases, this is the +An entry under the top-level `volumes` key can be empty, in which case it +uses the default driver configured by the Engine (in most cases, this is the `local` driver). Optionally, you can configure it with the following keys: ### driver Specify which volume driver should be used for this volume. Defaults to whatever driver the Docker Engine has been configured to use, which in most cases is -`local`. If the driver is not available, the Engine will return an error when +`local`. If the driver is not available, the Engine returns an error when `docker-compose up` tries to create the volume. driver: foobar @@ -1888,14 +1890,14 @@ documentation for more information. Optional. ### external If set to `true`, specifies that this volume has been created outside of -Compose. `docker-compose up` will not attempt to create it, and will raise +Compose. `docker-compose up` does not attempt to create it, and raises an error if it doesn't exist. `external` cannot be used in conjunction with other volume configuration keys (`driver`, `driver_opts`). In the example below, instead of attempting to create a volume called -`[projectname]_data`, Compose will look for an existing volume simply +`[projectname]_data`, Compose looks for an existing volume simply called `data` and mount it into the `db` service's containers. version: '2' @@ -1923,7 +1925,7 @@ refer to it within the Compose file: > External volumes are always created with docker stack deploy > -External volumes that do not exist _will be created_ if you use [docker stack +External volumes that do not exist _are created_ if you use [docker stack deploy](#deploy) to launch the app in [swarm mode](/engine/swarm/index.md) (instead of [docker compose up](/compose/reference/up.md)). In swarm mode, a volume is automatically created when it is defined by a service. As service @@ -1956,7 +1958,7 @@ conflicting with those used by other software. > [Added in version 3.4 file format](compose-versioning.md#version-34) Set a custom name for this volume. The name field can be used to reference -networks which contain special characters. The name is used as is +networks that contain special characters. The name is used as is and will **not** be scoped with the stack name. version: '3.4' @@ -1989,10 +1991,10 @@ Networks](https://github.com/docker/labs/blob/master/networking/README.md) Specify which driver should be used for this network. The default driver depends on how the Docker Engine you're using is configured, -but in most instances it will be `bridge` on a single host and `overlay` on a +but in most instances it is `bridge` on a single host and `overlay` on a Swarm. -The Docker Engine will return an error if the driver is not available. +The Docker Engine returns an error if the driver is not available. driver: overlay @@ -2072,7 +2074,7 @@ documentation for more information. Optional. Only used when the `driver` is set to `overlay`. If set to `true`, then standalone containers can attach to this network, in addition to services. If a standalone container attaches to an overlay network, it can communicate with -services and standalone containers which are also attached to the overlay +services and standalone containers that are also attached to the overlay network from other Docker daemons. ```yaml @@ -2139,15 +2141,15 @@ conflicting with those used by other software. ### external If set to `true`, specifies that this network has been created outside of -Compose. `docker-compose up` will not attempt to create it, and will raise +Compose. `docker-compose up` does not attempt to create it, and raises an error if it doesn't exist. `external` cannot be used in conjunction with other network configuration keys (`driver`, `driver_opts`, `ipam`, `internal`). In the example below, `proxy` is the gateway to the outside world. Instead of -attempting to create a network called `[projectname]_outside`, Compose will -look for an existing network simply called `outside` and connect the `proxy` +attempting to create a network called `[projectname]_outside`, Compose +looks for an existing network simply called `outside` and connect the `proxy` service's containers to it. version: '2' @@ -2203,20 +2205,20 @@ It can also be used in conjuction with the `external` property: ## configs configuration reference The top-level `configs` declaration defines or references -[configs](/engine/swarm/configs.md) which can be granted to the services in this +[configs](/engine/swarm/configs.md) that can be granted to the services in this stack. The source of the config is either `file` or `external`. - `file`: The config is created with the contents of the file at the specified path. - `external`: If set to true, specifies that this config has already been - created. Docker will not attempt to create it, and if it does not exist, a + created. Docker does not attempt to create it, and if it does not exist, a `config not found` error occurs. - `name`: The name of the config object in Docker. This field can be used to - reference configs which contain special characters. The name is used as is + reference configs that contain special characters. The name is used as is and will **not** be scoped with the stack name. Introduced in version 3.5 file format. -In this example, `my_first_config` will be created (as +In this example, `my_first_config` is created (as `_my_first_config)`when the stack is deployed, and `my_second_config` already exists in Docker. @@ -2229,7 +2231,7 @@ configs: ``` Another variant for external configs is when the name of the config in Docker -is different from the name that will exist within the service. The following +is different from the name that exists within the service. The following example modifies the previous one to use the external config called `redis_config`. @@ -2250,20 +2252,20 @@ stack. ## secrets configuration reference The top-level `secrets` declaration defines or references -[secrets](/engine/swarm/secrets.md) which can be granted to the services in this +[secrets](/engine/swarm/secrets.md) that can be granted to the services in this stack. The source of the secret is either `file` or `external`. - `file`: The secret is created with the contents of the file at the specified path. - `external`: If set to true, specifies that this secret has already been - created. Docker will not attempt to create it, and if it does not exist, a + created. Docker does not attempt to create it, and if it does not exist, a `secret not found` error occurs. - `name`: The name of the secret object in Docker. This field can be used to - reference secrets which contain special characters. The name is used as is + reference secrets that contain special characters. The name is used as is and will **not** be scoped with the stack name. Introduced in version 3.5 file format. -In this example, `my_first_secret` will be created (as +In this example, `my_first_secret` is created (as `_my_first_secret)`when the stack is deployed, and `my_second_secret` already exists in Docker. @@ -2276,7 +2278,7 @@ secrets: ``` Another variant for external secrets is when the name of the secret in Docker -is different from the name that will exist within the service. The following +is different from the name that exists within the service. The following example modifies the previous one to use the external secret called `redis_secret`. diff --git a/compose/django.md b/compose/django.md index e13ac11395b..8c1ffaa34d8 100644 --- a/compose/django.md +++ b/compose/django.md @@ -4,8 +4,8 @@ keywords: documentation, docs, docker, compose, orchestration, containers title: "Quickstart: Compose and Django" --- -This quick-start guide demonstrates how to use Docker Compose to set up and run a simple Django/PostgreSQL app. Before starting, you'll need to have -[Compose installed](install.md). +This quick-start guide demonstrates how to use Docker Compose to set up and run a simple Django/PostgreSQL app. Before starting, +[install Compose](install.md). ### Define the project components @@ -197,7 +197,7 @@ In this section, you set up the database connection for Django. > > ALLOWED_HOSTS = ['*'] > - > Please note this value is **not** safe for production usage. Refer to the + > This value is **not** safe for production usage. Refer to the [Django documentation](https://docs.djangoproject.com/en/1.11/ref/settings/#allowed-hosts) for more information. 5. List running containers. diff --git a/compose/env-file.md b/compose/env-file.md index ec1bbd9349a..3b73aed17fd 100644 --- a/compose/env-file.md +++ b/compose/env-file.md @@ -13,13 +13,14 @@ named `.env` placed in the folder where the `docker-compose` command is executed These syntax rules apply to the `.env` file: * Compose expects each line in an `env` file to be in `VAR=VAL` format. -* Lines beginning with `#` (i.e. comments) are ignored. +* Lines beginning with `#` are processed as comments and ignored. * Blank lines are ignored. -* There is no special handling of quotation marks (i.e. **they will be part of the VAL**, you have been warned ;) ). +* There is no special handling of quotation marks. This means that + **they are part of the VAL**. ## Compose file and CLI variables -The environment variables you define here will be used for [variable +The environment variables you define here is used for [variable substitution](compose-file/index.md#variable-substitution) in your Compose file, and can also be used to define the following [CLI variables](reference/envvars.md): @@ -36,7 +37,7 @@ variables](reference/envvars.md): > **Notes** > -> * Values present in the environment at runtime will always override +> * Values present in the environment at runtime always override those defined inside the `.env` file. Similarly, values passed via command-line arguments take precedence as well. > diff --git a/compose/environment-variables.md b/compose/environment-variables.md index 84e798a6a4d..df93a9fec96 100644 --- a/compose/environment-variables.md +++ b/compose/environment-variables.md @@ -34,7 +34,7 @@ You can pass environment variables from your shell straight through to a service environment: - DEBUG -The value of the `DEBUG` variable in the container will be taken from the value for the same variable in the shell in which Compose is run. +The value of the `DEBUG` variable in the container is taken from the value for the same variable in the shell in which Compose is run. ## The “env_file” configuration option @@ -56,7 +56,7 @@ You can also pass a variable through from the shell by not giving it a value: docker-compose run -e DEBUG web python console.py -The value of the `DEBUG` variable in the container will be taken from the value for the same variable in the shell in which Compose is run. +The value of the `DEBUG` variable in the container is taken from the value for the same variable in the shell in which Compose is run. ## The “.env” file @@ -88,12 +88,12 @@ Values in the shell take precedence over those specified in the `.env` file. If services: web: image: 'webapp:v2.0' - -When values are provided both with a shell `environment` variable and with an `env_file` configuration file, values of environment variables will be taken **from environment key first and then from environment file, then from a `Dockerfile` `ENV`–entry**: + +When values are provided both with a shell `environment` variable and with an `env_file` configuration file, values of environment variables is taken **from environment key first and then from environment file, then from a `Dockerfile` `ENV`–entry**: $ cat ./Docker/api/api.env NODE_ENV=test - + $ cat docker-compose.yml version: '3' services: @@ -104,15 +104,15 @@ When values are provided both with a shell `environment` variable and with an `e environment: - NODE_ENV=production -You can test this with for e.g. a _NodeJS_ container in the CLI: +You can test this with a command like the following command that starts a _NodeJS_ container in the CLI: $ docker-compose exec api node > process.env.NODE_ENV 'production' -Having any `ARG` or `ENV` setting in a `Dockerfile` will evaluate only if there is _no_ Docker _Compose_ entry for `environment` or `env_file`. +Having any `ARG` or `ENV` setting in a `Dockerfile` evaluates only if there is _no_ Docker _Compose_ entry for `environment` or `env_file`. -_Spcecifics for NodeJS containers:_ If you have a `package.json` entry for `script:start` like `NODE_ENV=test node server.js`, then this will overrule _any_ setting in your `docker-compose.yml` file. +_Spcecifics for NodeJS containers:_ If you have a `package.json` entry for `script:start` like `NODE_ENV=test node server.js`, then this overrules _any_ setting in your `docker-compose.yml` file. ## Configuring Compose using environment variables @@ -120,4 +120,5 @@ Several environment variables are available for you to configure the Docker Comp ## Environment variables created by links -When using the ['links' option](compose-file.md#links) in a [v1 Compose file](compose-file.md#version-1), environment variables will be created for each link. They are documented in the [Link environment variables reference](link-env-deprecated.md). Please note, however, that these variables are deprecated - you should just use the link alias as a hostname instead. +When using the ['links' option](compose-file.md#links) in a [v1 Compose file](compose-file.md#version-1), environment variables are created for each link. They are documented in +the [Link environment variables reference](link-env-deprecated.md). However, these variables are deprecated. Use the link alias as a hostname instead. diff --git a/compose/extends.md b/compose/extends.md index 0f58c8d14f4..e299ac8ab22 100644 --- a/compose/extends.md +++ b/compose/extends.md @@ -205,7 +205,7 @@ looks like this: volumes: - "/data" -In this case, you'll get exactly the same result as if you wrote +In this case, you get exactly the same result as if you wrote `docker-compose.yml` with the same `build`, `ports` and `volumes` configuration values defined directly under `web`. @@ -302,7 +302,7 @@ replaces the old value. > was defined in the original service. > > For example, if the original service defines `image: webapp` and the -> local service defines `build: .` then the resulting service will have +> local service defines `build: .` then the resulting service has a > `build: .` and no `image` option. > > This is because `build` and `image` cannot be used together in a version 1 diff --git a/compose/faq.md b/compose/faq.md index cc4ac336197..ec8a08f2724 100644 --- a/compose/faq.md +++ b/compose/faq.md @@ -62,7 +62,7 @@ environment variable](./reference/envvars.md#compose-project-name). Typically, you want `docker-compose up`. Use `up` to start or restart all the services defined in a `docker-compose.yml`. In the default "attached" -mode, you'll see all the logs from all the containers. In "detached" mode (`-d`), +mode, you see all the logs from all the containers. In "detached" mode (`-d`), Compose exits after starting the containers, but the containers continue to run in the background. @@ -99,7 +99,7 @@ You should use a `volume` if you want to make changes to your code and see them reflected immediately, for example when you're developing code and your server supports hot code reloading or live-reload. -There may be cases where you'll want to use both. You can have the image +There may be cases where you want to use both. You can have the image include the code using a `COPY`, and use a `volume` in your Compose file to include the code from the host during development. The volume overrides the directory contents of the image. diff --git a/compose/gettingstarted.md b/compose/gettingstarted.md index df2af5f0b76..cbf08a6227f 100644 --- a/compose/gettingstarted.md +++ b/compose/gettingstarted.md @@ -66,7 +66,7 @@ Define the application dependencies. > loop lets us attempt our request multiple times if the redis service is > not available. This is useful at startup while the application comes > online, but also makes our application more resilient if the Redis - > service has to be restarted anytime during the app's lifetime. In a + > service needs to be restarted anytime during the app's lifetime. In a > cluster, this also helps handling momentary connection drops between > nodes. @@ -304,9 +304,9 @@ services. For example, to see what environment variables are available to the $ docker-compose run web env -See `docker-compose --help` to see other available commands. You can also install [command completion](completion.md) for the bash and zsh shell, which will also show you available commands. +See `docker-compose --help` to see other available commands. You can also install [command completion](completion.md) for the bash and zsh shell, which also shows you available commands. -If you started Compose with `docker-compose up -d`, you'll probably want to stop +If you started Compose with `docker-compose up -d`, stop your services once you've finished with them: $ docker-compose stop diff --git a/compose/index.md b/compose/index.md index 41b1febab98..7667e909fd6 100644 --- a/compose/index.md +++ b/compose/index.md @@ -20,5 +20,5 @@ Compose is a tool for defining and running multi-container Docker applications. - [Environment file](env-file.md) To see a detailed list of changes for past and current releases of Docker -Compose, please refer to the +Compose, refer to the [CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md). diff --git a/compose/install.md b/compose/install.md index 7a6448dc580..95c3e521984 100644 --- a/compose/install.md +++ b/compose/install.md @@ -197,12 +197,12 @@ but may be less stable. ## Upgrading -If you're upgrading from Compose 1.2 or earlier, you'll need to remove or +If you're upgrading from Compose 1.2 or earlier, remove or migrate your existing containers after upgrading Compose. This is because, as of -version 1.3, Compose uses Docker labels to keep track of containers, and so they -need to be recreated with labels added. +version 1.3, Compose uses Docker labels to keep track of containers, and your +containers need to be recreated to add the labels. -If Compose detects containers that were created without labels, it will refuse +If Compose detects containers that were created without labels, it refuses to run so that you don't end up with two sets of them. If you want to keep using your existing containers (for example, because they have data volumes you want to preserve), you can use Compose 1.5.x to migrate them with the following @@ -213,7 +213,7 @@ docker-compose migrate-to-labels ``` Alternatively, if you're not worried about keeping them, you can remove them. -Compose will just create new ones. +Compose just creates new ones. ```bash docker rm -f -v myapp_web_1 myapp_db_1 ... diff --git a/compose/link-env-deprecated.md b/compose/link-env-deprecated.md index 3ab5d8c9215..018fe86f1ab 100644 --- a/compose/link-env-deprecated.md +++ b/compose/link-env-deprecated.md @@ -9,7 +9,7 @@ notoc: true > **Note**: Environment variables are no longer the recommended method for connecting to linked services. Instead, you should use the link name (by default, the name of the linked service) as the hostname to connect to. See the [docker-compose.yml documentation](compose-file.md#links) for details. > -> Environment variables will only be populated if you're using the [legacy version 1 Compose file format](compose-file.md#versioning). +> Environment variables are only populated if you're using the [legacy version 1 Compose file format](compose-file.md#versioning). Compose uses [Docker links](/engine/userguide/networking/default_network/dockerlinks.md) to expose services' containers to one another. Each linked container injects a set of @@ -18,22 +18,22 @@ environment variables, each of which begins with the uppercase name of the conta To see what environment variables are available to a service, run `docker-compose run SERVICE env`. name\_PORT
-Full URL, e.g. `DB_PORT=tcp://172.17.0.5:5432` +Full URL, such as `DB_PORT=tcp://172.17.0.5:5432` name\_PORT\_num\_protocol
-Full URL, e.g. `DB_PORT_5432_TCP=tcp://172.17.0.5:5432` +Full URL, such as `DB_PORT_5432_TCP=tcp://172.17.0.5:5432` name\_PORT\_num\_protocol\_ADDR
-Container's IP address, e.g. `DB_PORT_5432_TCP_ADDR=172.17.0.5` +Container's IP address, such as `DB_PORT_5432_TCP_ADDR=172.17.0.5` name\_PORT\_num\_protocol\_PORT
-Exposed port number, e.g. `DB_PORT_5432_TCP_PORT=5432` +Exposed port number, such as `DB_PORT_5432_TCP_PORT=5432` name\_PORT\_num\_protocol\_PROTO
-Protocol (tcp or udp), e.g. `DB_PORT_5432_TCP_PROTO=tcp` +Protocol (tcp or udp), such as `DB_PORT_5432_TCP_PROTO=tcp` name\_NAME
-Fully qualified container name, e.g. `DB_1_NAME=/myapp_web_1/myapp_db_1` +Fully qualified container name, such as `DB_1_NAME=/myapp_web_1/myapp_db_1` ## Related information diff --git a/compose/networking.md b/compose/networking.md index e5f6a625945..eaf96e32f09 100644 --- a/compose/networking.md +++ b/compose/networking.md @@ -56,9 +56,9 @@ look like `postgres://{DOCKER_IP}:8001`. ## Update containers -If you make a configuration change to a service and run `docker-compose up` to update it, the old container will be removed and the new one will join the network under a different IP address but the same name. Running containers will be able to look up that name and connect to the new address, but the old address will stop working. +If you make a configuration change to a service and run `docker-compose up` to update it, the old container is removed and the new one joins the network under a different IP address but the same name. Running containers can look up that name and connect to the new address, but the old address stops working. -If any containers have connections open to the old container, they will be closed. It is a container's responsibility to detect this condition, look up the name again and reconnect. +If any containers have connections open to the old container, they are closed. It is a container's responsibility to detect this condition, look up the name again and reconnect. ## Links @@ -66,7 +66,7 @@ Links allow you to define extra aliases by which a service is reachable from ano version: "3" services: - + web: build: . links: @@ -78,11 +78,11 @@ See the [links reference](compose-file.md#links) for more information. ## Multi-host networking -> **Note**: The instructions in this section refer to [legacy Docker Swarm](/compose/swarm.md) operations, and will only work when targeting a legacy Swarm cluster. For instructions on deploying a compose project to the newer integrated swarm mode, consult the [Docker Stacks](/compose/bundles.md) documentation. +> **Note**: The instructions in this section refer to [legacy Docker Swarm](/compose/swarm.md) operations, and only work when targeting a legacy Swarm cluster. For instructions on deploying a compose project to the newer integrated swarm mode, consult the [Docker Stacks](/compose/bundles.md) documentation. When [deploying a Compose application to a Swarm cluster](swarm.md), you can make use of the built-in `overlay` driver to enable multi-host communication between containers with no changes to your Compose file or application code. -Consult the [Getting started with multi-host networking](/engine/userguide/networking/get-started-overlay/) to see how to set up a Swarm cluster. The cluster will use the `overlay` driver by default, but you can specify it explicitly if you prefer - see below for how to do this. +Consult the [Getting started with multi-host networking](/engine/userguide/networking/get-started-overlay/) to see how to set up a Swarm cluster. The cluster uses the `overlay` driver by default, but you can specify it explicitly if you prefer - see below for how to do this. ## Specify custom networks @@ -94,7 +94,7 @@ Here's an example Compose file defining two custom networks. The `proxy` service version: "3" services: - + proxy: build: ./proxy networks: @@ -133,7 +133,7 @@ Instead of (or as well as) specifying your own networks, you can also change the version: "3" services: - + web: build: . ports: @@ -155,4 +155,4 @@ If you want your containers to join a pre-existing network, use the [`external` external: name: my-pre-existing-network -Instead of attempting to create a network called `[projectname]_default`, Compose will look for a network called `my-pre-existing-network` and connect your app's containers to it. +Instead of attempting to create a network called `[projectname]_default`, Compose looks for a network called `my-pre-existing-network` and connect your app's containers to it. diff --git a/compose/overview.md b/compose/overview.md index 1430e9327a4..57858cf85ae 100644 --- a/compose/overview.md +++ b/compose/overview.md @@ -24,8 +24,7 @@ anywhere. 2. Define the services that make up your app in `docker-compose.yml` so they can be run together in an isolated environment. -3. Lastly, run -`docker-compose up` and Compose will start and run your entire app. +3. Run `docker-compose up` and Compose starts and runs your entire app. A `docker-compose.yml` looks like this: @@ -79,7 +78,7 @@ The features of Compose that make it effective are: Compose uses a project name to isolate environments from each other. You can make use of this project name in several different contexts: -* on a dev host, to create multiple copies of a single environment (e.g., you want to run a stable copy for each feature branch of a project) +* on a dev host, to create multiple copies of a single environment, such as when you want to run a stable copy for each feature branch of a project * on a CI server, to keep builds from interfering with each other, you can set the project name to a unique build number * on a shared host or dev host, to prevent different projects, which may use the @@ -167,7 +166,7 @@ For details on using production-oriented features, see ## Release notes To see a detailed list of changes for past and current releases of Docker -Compose, please refer to the +Compose, refer to the [CHANGELOG](https://github.com/docker/compose/blob/master/CHANGELOG.md). ## Getting help @@ -176,11 +175,11 @@ Docker Compose is under active development. If you need help, would like to contribute, or simply want to talk about the project with like-minded individuals, we have a number of open channels for communication. -* To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/compose/issues). +* To report bugs or file feature requests: use the [issue tracker on Github](https://github.com/docker/compose/issues). -* To talk about the project with people in real time: please join the +* To talk about the project with people in real time: join the `#docker-compose` channel on freenode IRC. -* To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/compose/pulls). +* To contribute code or documentation changes: submit a [pull request on Github](https://github.com/docker/compose/pulls). -For more information and resources, please visit the [Getting Help project page](/opensource/get-help/). +For more information and resources, visit the [Getting Help project page](/opensource/get-help/). diff --git a/compose/production.md b/compose/production.md index f33e0b221f4..a860c247dbd 100644 --- a/compose/production.md +++ b/compose/production.md @@ -14,18 +14,18 @@ up your application, you can run Compose apps on a Swarm cluster. ### Modify your Compose file for production -You'll almost certainly want to make changes to your app configuration that are -more appropriate to a live environment. These changes may include: +You probably need to make changes to your app configuration to make it ready for +production. These changes may include: - Removing any volume bindings for application code, so that code stays inside the container and can't be changed from outside - Binding to different ports on the host -- Setting environment variables differently (e.g., to decrease the verbosity of +- Setting environment variables differently, such as when you need to decrease the verbosity of logging, or to enable email sending) -- Specifying a restart policy (e.g., `restart: always`) to avoid downtime -- Adding extra services (e.g., a log aggregator) +- Specifying a restart policy like `restart: always` to avoid downtime +- Adding extra services such as a log aggregator -For this reason, you'll probably want to define an additional Compose file, say +For this reason, consider defining an additional Compose file, say `production.yml`, which specifies production-appropriate configuration. This configuration file only needs to include the changes you'd like to make from the original Compose file. The additional Compose file @@ -41,14 +41,14 @@ complete example. ### Deploying changes -When you make changes to your app code, you'll need to rebuild your image and +When you make changes to your app code, remember to rebuild your image and recreate your app's containers. To redeploy a service called -`web`, you would use: +`web`, use: $ docker-compose build web $ docker-compose up --no-deps -d web -This will first rebuild the image for `web` and then stop, destroy, and recreate +This first rebuilds the image for `web` and then stop, destroy, and recreate *just* the `web` service. The `--no-deps` flag prevents Compose from also recreating any services which `web` depends on. @@ -62,7 +62,7 @@ remote Docker hosts very easy, and is recommended even if you're not deploying remotely. Once you've set up your environment variables, all the normal `docker-compose` -commands will work with no further configuration. +commands work with no further configuration. ### Running Compose on a Swarm cluster diff --git a/compose/rails.md b/compose/rails.md index c27a85b56e1..6e92b25b0c8 100644 --- a/compose/rails.md +++ b/compose/rails.md @@ -4,15 +4,14 @@ keywords: documentation, docs, docker, compose, orchestration, containers title: "Quickstart: Compose and Rails" --- -This Quickstart guide will show you how to use Docker Compose to set up and run -a Rails/PostgreSQL app. Before starting, you'll need to have [Compose -installed](install.md). +This Quickstart guide shows you how to use Docker Compose to set up and run +a Rails/PostgreSQL app. Before starting, [install Compose](install.md). ### Define the project -Start by setting up the four files you'll need to build the app. First, since +Start by setting up the four files needed to build the app. First, since your app is going to run inside a Docker container containing all of its -dependencies, you'll need to define exactly what needs to be included in the +dependencies, define exactly what needs to be included in the container. This is done using a file called `Dockerfile`. To begin with, the Dockerfile consists of: @@ -25,7 +24,7 @@ Dockerfile consists of: RUN bundle install COPY . /myapp -That'll put your application code inside an image that will build a container +That'll put your application code inside an image that builds a container with Ruby, Bundler and all your dependencies inside it. For more information on how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) @@ -37,7 +36,7 @@ in a moment by `rails new`. source 'https://rubygems.org' gem 'rails', '5.0.0.1' -You'll need an empty `Gemfile.lock` in order to build our `Dockerfile`. +Create an empty `Gemfile.lock` to build our `Dockerfile`. touch Gemfile.lock @@ -71,8 +70,8 @@ using [docker-compose run](/compose/reference/run/): docker-compose run web rails new . --force --database=postgresql -First, Compose will build the image for the `web` service using the -`Dockerfile`. Then it will run `rails new` inside a new container, using that +First, Compose builds the image for the `web` service using the +`Dockerfile`. Then it runs `rails new` inside a new container, using that image. Once it's done, you should have generated a fresh app. List the files. @@ -240,7 +239,7 @@ To restart the application: ### Rebuild the application If you make changes to the Gemfile or the Compose file to try out some different -configurations, you will need to rebuild. Some changes will require only +configurations, you need to rebuild. Some changes require only `docker-compose up --build`, but a full rebuild requires a re-run of `docker-compose run web bundle install` to sync changes in the `Gemfile.lock` to the host, followed by `docker-compose up --build`. diff --git a/compose/reference/build.md b/compose/reference/build.md index 5ec37013063..7ee88b27551 100644 --- a/compose/reference/build.md +++ b/compose/reference/build.md @@ -17,9 +17,9 @@ Options: --build-arg key=val Set build-time variables for one service. ``` -Services are built once and then tagged, by default as `project_service`, e.g., -`composetest_db`. If the Compose file specifies an -[image](/compose/compose-file/index.md#image) name, the image will be +Services are built once and then tagged, by default as `project_service`. For +example, `composetest_db`. If the Compose file specifies an +[image](/compose/compose-file/index.md#image) name, the image is tagged with that name, substituting any variables beforehand. See [variable substitution](#variable-substitution). diff --git a/compose/reference/bundle.md b/compose/reference/bundle.md index 9b6354ac9bb..52940dc3d43 100644 --- a/compose/reference/bundle.md +++ b/compose/reference/bundle.md @@ -22,4 +22,4 @@ Images must have digests stored, which requires interaction with a Docker registry. If digests aren't stored for all images, you can fetch them with `docker-compose pull` or `docker-compose push`. To push images automatically when bundling, pass `--push-images`. Only services with -a `build` option specified will have their images pushed. +a `build` option specified have their images pushed. diff --git a/compose/reference/envvars.md b/compose/reference/envvars.md index e31f2c49210..53eab795202 100644 --- a/compose/reference/envvars.md +++ b/compose/reference/envvars.md @@ -88,7 +88,7 @@ Supported values: `true` or `1` to enable, `false` or `0` to disable. ## COMPOSE\_PATH\_SEPARATOR -If set, the value of the `COMPOSE_FILE` environment variable will be separated +If set, the value of the `COMPOSE_FILE` environment variable is separated using this character as path separator. diff --git a/compose/reference/events.md b/compose/reference/events.md index b3558aa19fe..d79ee3a1d67 100644 --- a/compose/reference/events.md +++ b/compose/reference/events.md @@ -14,7 +14,7 @@ Options: Stream container events for every container in the project. -With the `--json` flag, a json object will be printed one per line with the +With the `--json` flag, a json object is printed one per line with the format: ``` diff --git a/compose/reference/exec.md b/compose/reference/exec.md index f44719c054e..3be26683499 100644 --- a/compose/reference/exec.md +++ b/compose/reference/exec.md @@ -20,4 +20,4 @@ Options: This is equivalent of `docker exec`. With this subcommand you can run arbitrary commands in your services. Commands are by default allocating a TTY, so you can -do e.g. `docker-compose exec web sh` to get an interactive prompt. +use a command such as `docker-compose exec web sh` to get an interactive prompt. diff --git a/compose/reference/overview.md b/compose/reference/overview.md index c1f245e4eb7..f85787defc3 100644 --- a/compose/reference/overview.md +++ b/compose/reference/overview.md @@ -101,7 +101,7 @@ webapp: ``` If the `docker-compose.admin.yml` also specifies this same service, any matching -fields will override the previous file. New values, add to the `webapp` service +fields override the previous file. New values, add to the `webapp` service configuration. ``` diff --git a/compose/reference/pull.md b/compose/reference/pull.md index 72fc4d76fb9..53825813907 100644 --- a/compose/reference/pull.md +++ b/compose/reference/pull.md @@ -34,7 +34,7 @@ services: - db ``` -If you run `docker-compose pull ServiceName` in the same directory as the `docker-compose.yml` file that defines the service, Docker will pull the associated image. For example, to call the `postgres` image configured as the `db` service in our example, you would run `docker-compose pull db`. +If you run `docker-compose pull ServiceName` in the same directory as the `docker-compose.yml` file that defines the service, Docker pulls the associated image. For example, to call the `postgres` image configured as the `db` service in our example, you would run `docker-compose pull db`. ``` $ docker-compose pull db diff --git a/compose/reference/restart.md b/compose/reference/restart.md index f11b0785359..bf7b5eddb45 100644 --- a/compose/reference/restart.md +++ b/compose/reference/restart.md @@ -14,6 +14,6 @@ Options: Restarts all stopped and running services. -If you make changes to your `docker-compose.yml` configuration these changes will not be reflected after running this command. +If you make changes to your `docker-compose.yml` configuration these changes are not reflected after running this command. -For example, changes to environment variables (which are added after a container is built, but before the container's command is executed) will not be updated after restarting. +For example, changes to environment variables (which are added after a container is built, but before the container's command is executed) are not updated after restarting. diff --git a/compose/reference/rm.md b/compose/reference/rm.md index 3d2c7cd99f6..3808a5add24 100644 --- a/compose/reference/rm.md +++ b/compose/reference/rm.md @@ -16,12 +16,12 @@ Options: Removes stopped service containers. -By default, anonymous volumes attached to containers will not be removed. You +By default, anonymous volumes attached to containers are not removed. You can override this with `-v`. To list all volumes, use `docker volume ls`. -Any data which is not in a volume will be lost. +Any data which is not in a volume is lost. -Running the command with no options will also remove one-off containers created +Running the command with no options also removes one-off containers created by `docker-compose up` or `docker-compose run`: ```none diff --git a/compose/reference/run.md b/compose/reference/run.md index ba3179b02ba..876bde93e37 100644 --- a/compose/reference/run.md +++ b/compose/reference/run.md @@ -49,7 +49,7 @@ If you start a service configured with links, the `run` command first checks to docker-compose run db psql -h db -U docker -This will open an interactive PostgreSQL shell for the linked `db` container. +This opens an interactive PostgreSQL shell for the linked `db` container. If you do not want the `run` command to start linked containers, use the `--no-deps` flag: diff --git a/compose/reference/scale.md b/compose/reference/scale.md index 2173ee45b45..c99a002ec80 100644 --- a/compose/reference/scale.md +++ b/compose/reference/scale.md @@ -22,4 +22,4 @@ Numbers are specified as arguments in the form `service=num`. For example: [Compose file version 3.x](/compose/compose-file/index.md), you can specify [replicas](/compose/compose-file/index.md#replicas) under the [deploy](/compose/compose-file/index.md#deploy) key as part of a -service configuration for [Swarm mode](/engine/swarm/). Note that the `deploy` key and its sub-options (including `replicas`) will only work with the `docker stack deploy` command, not `docker compose up` or `docker-compose run`. +service configuration for [Swarm mode](/engine/swarm/). The `deploy` key and its sub-options (including `replicas`) only works with the `docker stack deploy` command, not `docker compose up` or `docker-compose run`. diff --git a/compose/startup-order.md b/compose/startup-order.md index 05d07d7e3f4..95a33ef8291 100644 --- a/compose/startup-order.md +++ b/compose/startup-order.md @@ -10,7 +10,7 @@ You can control the order of service startup with the containers in dependency order, where dependencies are determined by `depends_on`, `links`, `volumes_from`, and `network_mode: "service:..."`. -However, Compose will not wait until a container is "ready" (whatever that means +However, Compose does not wait until a container is "ready" (whatever that means for your particular application) - only until it's running. There's a good reason for this. @@ -19,9 +19,9 @@ a subset of a much larger problem of distributed systems. In production, your database could become unavailable or move hosts at any time. Your application needs to be resilient to these types of failures. -To handle this, your application should attempt to re-establish a connection to +To handle this, design your application to attempt to re-establish a connection to the database after a failure. If the application retries the connection, -it should eventually be able to connect to the database. +it can eventually connect to the database. The best solution is to perform this check in your application code, both at startup and whenever a connection is lost for any reason. However, if you don't @@ -31,7 +31,7 @@ script: - Use a tool such as [wait-for-it](https://github.com/vishnubob/wait-for-it), [dockerize](https://github.com/jwilder/dockerize), or sh-compatible [wait-for](https://github.com/Eficode/wait-for). These are small - wrapper scripts which you can include in your application's image and will + wrapper scripts which you can include in your application's image to poll a given host and port until it's accepting TCP connections. For example, to use `wait-for-it.sh` or `wait-for` to wrap your service's command: @@ -48,7 +48,7 @@ script: db: image: postgres - >**Tip**: There are limitations to this first solution; e.g., it doesn't verify when a specific service is really ready. If you add more arguments to the command, you'll need to use the `bash shift` command with a loop, as shown in the next example. + >**Tip**: There are limitations to this first solution. For example, it doesn't verify when a specific service is really ready. If you add more arguments to the command, use the `bash shift` command with a loop, as shown in the next example. - Alternatively, write your own wrapper script to perform a more application-specific health check. For example, you might want to wait until Postgres is definitely diff --git a/compose/swarm.md b/compose/swarm.md index 7467b43d8d0..d6686609afc 100644 --- a/compose/swarm.md +++ b/compose/swarm.md @@ -13,8 +13,8 @@ you were using a single Docker host. The actual extent of integration depends on which version of the [Compose file format](compose-file.md#versioning) you are using: -1. If you're using version 1 along with `links`, your app will work, but Swarm - will schedule all containers on one host, because links between containers +1. If you're using version 1 along with `links`, your app works, but Swarm + schedules all containers on one host, because links between containers do not work across hosts with the old networking system. 2. If you're using version 2, your app should work with no changes: @@ -35,12 +35,12 @@ set up a Swarm cluster with [Docker Machine](/machine/overview.md) and the overl ### Building images Swarm can build an image from a Dockerfile just like a single-host Docker -instance can, but the resulting image will only live on a single node and won't +instance can, but the resulting image only lives on a single node and won't be distributed to other nodes. If you want to use Compose to scale the service in question to multiple nodes, -you'll have to build it yourself, push it to a registry (e.g. the Docker Hub) -and reference it from `docker-compose.yml`: +build the image, push it to a registry such as Docker Hub, and reference it +from `docker-compose.yml`: $ docker build -t myusername/web . $ docker push myusername/web @@ -56,7 +56,7 @@ and reference it from `docker-compose.yml`: If a service has multiple dependencies of the type which force co-scheduling (see [Automatic scheduling](swarm.md#automatic-scheduling) below), it's possible that -Swarm will schedule the dependencies on different nodes, making the dependent +Swarm schedules the dependencies on different nodes, making the dependent service impossible to schedule. For example, here `foo` needs to be co-scheduled with `bar` and `baz`: @@ -97,7 +97,7 @@ all three services end up on the same node: ### Host ports and recreating containers -If a service maps a port from the host, e.g. `80:8000`, then you may get an +If a service maps a port from the host, such as `80:8000`, then you may get an error like this when running `docker-compose up` on it after the first time: docker: Error response from daemon: unable to find a node that satisfies @@ -130,7 +130,7 @@ There are two viable workarounds for this problem: web-logs: driver: custom-volume-driver -- Remove the old container before creating the new one. You will lose any data +- Remove the old container before creating the new one. You lose any data in the volume. $ docker-compose stop web @@ -141,7 +141,7 @@ There are two viable workarounds for this problem: ### Automatic scheduling -Some configuration options will result in containers being automatically +Some configuration options result in containers being automatically scheduled on the same Swarm node to ensure that they work correctly. These are: - `network_mode: "service:..."` and `network_mode: "container:..."` (and diff --git a/compose/wordpress.md b/compose/wordpress.md index 3ccaf95ed60..280788d3c94 100644 --- a/compose/wordpress.md +++ b/compose/wordpress.md @@ -6,7 +6,7 @@ title: "Quickstart: Compose and WordPress" You can use Docker Compose to easily run WordPress in an isolated environment built with Docker containers. This quick-start guide demonstrates how to use -Compose to set up and run WordPress. Before starting, you'll need to have +Compose to set up and run WordPress. Before starting, install [Compose installed](/compose/install.md). ### Define the project @@ -17,8 +17,8 @@ Compose to set up and run WordPress. Before starting, you'll need to have This directory is the context for your application image. The directory should only contain resources to build that image. - This project directory will contain a `docker-compose.yml` file which will - be complete in itself for a good starter wordpress project. + This project directory contains a `docker-compose.yml` file which + is complete in itself for a good starter wordpress project. >**Tip**: You can use either a `.yml` or `.yaml` extension for this file. They both work. @@ -29,7 +29,7 @@ Compose to set up and run WordPress. Before starting, you'll need to have cd my_wordpress/ -3. Create a `docker-compose.yml` file that will start your +3. Create a `docker-compose.yml` file that starts your `WordPress` blog and a separate `MySQL` instance with a volume mount for data persistence: @@ -112,7 +112,7 @@ At this point, WordPress should be running on port `8000` of your Docker Host, and you can complete the "famous five-minute installation" as a WordPress administrator. -> **Note**: The WordPress site will not be immediately available on port `8000` +> **Note**: The WordPress site is not immediately available on port `8000` because the containers are still being initialized and may take a couple of minutes before the first load. diff --git a/cs-engine/1.12/index.md b/cs-engine/1.12/index.md index 8a0d4e86208..44297babe2b 100644 --- a/cs-engine/1.12/index.md +++ b/cs-engine/1.12/index.md @@ -116,7 +116,7 @@ to update its RHEL kernel. $ sudo docker info ``` -8. Only users with `sudo` access will be able to run `docker` commands. +8. Only users with `sudo` access can run `docker` commands. Optionally, add non-sudo access to the Docker socket by adding your user to the `docker` group. @@ -207,7 +207,7 @@ to update its RHEL kernel. $ sudo docker info ``` -6. Only users with `sudo` access will be able to run `docker` commands. +6. Only users with `sudo` access can run `docker` commands. Optionally, add non-sudo access to the Docker socket by adding your user to the `docker` group. @@ -286,7 +286,7 @@ to update its RHEL kernel. $ sudo docker info ``` -6. Only users with `sudo` access will be able to run `docker` commands. +6. Only users with `sudo` access can run `docker` commands. Optionally, add non-sudo access to the Docker socket by adding your user to the `docker` group. @@ -298,7 +298,7 @@ to update its RHEL kernel. 7. [Configure Btrfs for graph storage](/engine/userguide/storagedriver/btrfs-driver.md). This is the only graph storage driver supported on SLES. - + ## Install using packages If you need to install Docker on an air-gapped system with no access to the diff --git a/cs-engine/1.12/upgrade.md b/cs-engine/1.12/upgrade.md index fcf7c3b2f23..8e80bdae637 100644 --- a/cs-engine/1.12/upgrade.md +++ b/cs-engine/1.12/upgrade.md @@ -162,7 +162,7 @@ Use these instructions to update APT-based systems. ## Upgrade from a legacy version Use these instructions if you're upgrading your CS Docker Engine from a version -prior to 1.9. In this case you'll have to first uninstall CS Docker Engine, and +prior to 1.9. In this case, first uninstall CS Docker Engine, and then install the latest version. ### CentOS 7.1 & RHEL 7.0/7.1 diff --git a/cs-engine/1.13/index.md b/cs-engine/1.13/index.md index 3127e837817..365eed32ba6 100644 --- a/cs-engine/1.13/index.md +++ b/cs-engine/1.13/index.md @@ -119,7 +119,7 @@ to update its RHEL kernel. $ sudo docker info ``` -8. Only users with `sudo` access will be able to run `docker` commands. +8. Only users with `sudo` access can run `docker` commands. Optionally, add non-sudo access to the Docker socket by adding your user to the `docker` group. @@ -209,7 +209,7 @@ to update its RHEL kernel. $ sudo docker info ``` -6. Only users with `sudo` access will be able to run `docker` commands. +6. Only users with `sudo` access can run `docker` commands. Optionally, add non-sudo access to the Docker socket by adding your user to the `docker` group. @@ -288,7 +288,7 @@ to update its RHEL kernel. $ sudo docker info ``` -6. Only users with `sudo` access will be able to run `docker` commands. +6. Only users with `sudo` access can run `docker` commands. Optionally, add non-sudo access to the Docker socket by adding your user to the `docker` group. diff --git a/cs-engine/1.13/upgrade.md b/cs-engine/1.13/upgrade.md index 7ad5a45f641..12e2e7ae699 100644 --- a/cs-engine/1.13/upgrade.md +++ b/cs-engine/1.13/upgrade.md @@ -157,7 +157,7 @@ Use these instructions to update APT-based systems. ## Upgrade from a legacy version Use these instructions if you're upgrading your CS Docker Engine from a version -prior to 1.9. In this case you'll have to first uninstall CS Docker Engine, and +prior to 1.9. In this case, first uninstall CS Docker Engine, and then install the latest version. ### CentOS 7.1 & RHEL 7.0/7.1 diff --git a/datacenter/dtr/2.0/configure/config-general.md b/datacenter/dtr/2.0/configure/config-general.md index 592e3747a08..726080c7b4f 100644 --- a/datacenter/dtr/2.0/configure/config-general.md +++ b/datacenter/dtr/2.0/configure/config-general.md @@ -36,7 +36,7 @@ trusted images. After pushing images in the Trusted Registry, you can see which image tags were signed by viewing the appropriate repositories through Trusted Registry's web interface. -To configure your Docker client to be able to push signed images to Docker +To configure your Docker client to push signed images to Docker Trusted Registry refer to the CLI Reference's [Environment Variables Section](/engine/reference/commandline/cli.md#environment-variables) and [Notary Section](/engine/reference/commandline/cli.md#notary). diff --git a/datacenter/dtr/2.0/configure/config-security.md b/datacenter/dtr/2.0/configure/config-security.md index 06e1b069013..a21e77b49e2 100644 --- a/datacenter/dtr/2.0/configure/config-security.md +++ b/datacenter/dtr/2.0/configure/config-security.md @@ -13,7 +13,7 @@ This cert must be accompanied by its private key, entered below. * *SSL Private Key*: The hash from the private key associated with the provided SSL Certificate (as a standard x509 key pair). -In order to run, the Trusted Registry requires encrypted communications through +The Trusted Registry requires encrypted communications through HTTPS/SSL between (a) the Trusted Registry and your Docker Engine(s), and (b) between your web browser and the Trusted Registry admin server. There are a few options for setting this up: @@ -45,7 +45,7 @@ use it. 2. If your enterprise can't provide keys, you can use a public Certificate Authority (CA) like "InstantSSL.com" or "RapidSSL.com" to generate a certificate. If your certificates are generated using a globally trusted -Certificate Authority, you won't need to install them on all of your +Certificate Authority, you don't need to install them on all of your client Docker daemons. 3. Use the self-signed registry certificate generated by Docker Trusted @@ -131,7 +131,7 @@ $ sudo /bin/systemctl restart docker.service #### Docker Machine and Boot2Docker -You'll need to make some persistent changes using `bootsync.sh` in your +You need to make some persistent changes using `bootsync.sh` in your Boot2Docker-based virtual machine (as documented in [local customization](https://github.com/boot2docker/boot2docker/blob/master/doc/FAQ.md#local-customisation-with-persistent-partition)). To do this: 1. `docker-machine ssh dev` to enter the VM @@ -167,7 +167,7 @@ If for some reason you can't install the certificate chain on a client Docker host, or your certificates do not have a global CA, you can configure your Docker daemon to run in "insecure" mode. This is done by adding an extra flag, `--insecure-registry host-ip|domain-name`, to your client Docker daemon startup -flags. You'll need to restart the Docker daemon for the change to take effect. +flags. Restart the Docker daemon for the change to take effect. This flag means that the communications between your Docker client and the Trusted Registry server are still encrypted, but the client Docker daemon is not diff --git a/datacenter/dtr/2.0/configure/config-storage.md b/datacenter/dtr/2.0/configure/config-storage.md index 06277ae07ec..cff78332a1f 100644 --- a/datacenter/dtr/2.0/configure/config-storage.md +++ b/datacenter/dtr/2.0/configure/config-storage.md @@ -23,7 +23,7 @@ different storage backend allows you to: * Take advantage of other features that are critical to your organization At first, you might have explored Docker Trusted Registry and Docker Engine by -[installing](../install/index.md) them on your system in order to familiarize +[installing](../install/index.md) them on your system to familiarize yourself with them. However, for various reasons such as deployment purposes or continuous integration, it makes sense to think about your long term organization’s needs when selecting a storage backend. The Trusted Registry @@ -223,7 +223,7 @@ include: 2. Select Download to get the text based file. It contains a minimum amount of information and you're going to need additional data based on your driver and business requirements. -3. Go [here](/registry/configuration.md#list-of-configuration-options") to see the open source YAML file. Copy the sections you need and paste into your `storage.yml` file. Note that some settings may contradict others, so +3. Go [here](/registry/configuration.md#list-of-configuration-options") to see the open source YAML file. Copy the sections you need and paste into your `storage.yml` file. Some settings may contradict others, so ensure your choices make sense. 4. Save the YAML file and return to the UI. 5. On the Storage screen, upload the file, review your changes, and click Save. diff --git a/datacenter/dtr/2.0/high-availability/index.md b/datacenter/dtr/2.0/high-availability/index.md index fc17e928733..eb083e76ff5 100644 --- a/datacenter/dtr/2.0/high-availability/index.md +++ b/datacenter/dtr/2.0/high-availability/index.md @@ -11,7 +11,7 @@ Docker Trusted Registry (DTR) is designed for high availability. When you first install DTR, you create a cluster with a single DTR replica. Replicas are single instances of DTR that can be joined together to form a cluster. -When joining new replicas to the cluster, you'll be creating new DTR instances +When joining new replicas to the cluster, you create new DTR instances that are running the same set of services. Any change to the state of an instance is replicated across all other instances. diff --git a/datacenter/dtr/2.0/install/license.md b/datacenter/dtr/2.0/install/license.md index f0e8cf77e8f..f9cd60d2c2d 100644 --- a/datacenter/dtr/2.0/install/license.md +++ b/datacenter/dtr/2.0/install/license.md @@ -11,7 +11,7 @@ By default, you don't need to license your Docker Trusted Registry. When installing DTR, it automatically starts using the same license file used on your Docker Universal Control Plane cluster. -However, there are some situations when you have to manually license your +However, there are some situations when you need to manually license your DTR installation: * When upgrading to a new major version, diff --git a/datacenter/dtr/2.0/install/upgrade/upgrade-major.md b/datacenter/dtr/2.0/install/upgrade/upgrade-major.md index f06fde4092d..e770bc134e9 100644 --- a/datacenter/dtr/2.0/install/upgrade/upgrade-major.md +++ b/datacenter/dtr/2.0/install/upgrade/upgrade-major.md @@ -26,7 +26,7 @@ To upgrade to DTR 2.0, you first need to do a fresh installation of DTR 2.0. This can be done on the same node where DTR 1.4.3 is already running or on a new node. -If you decide to install the new DTR on the same node, you'll need +If you decide to install the new DTR on the same node, you need to install it on a port other than 443, since DTR 1.4.3 is already using it. Use these instructions to install DTR 2.0: diff --git a/datacenter/dtr/2.0/release-notes/prior-release-notes.md b/datacenter/dtr/2.0/release-notes/prior-release-notes.md index 39d2b47bbdc..aec7bcb2538 100644 --- a/datacenter/dtr/2.0/release-notes/prior-release-notes.md +++ b/datacenter/dtr/2.0/release-notes/prior-release-notes.md @@ -301,9 +301,12 @@ The following notable issues have been remediated: **DHE 1.0 Upgrade Warning** -Customers who are currently using DHE 1.0 **must** follow the [upgrading instructions](https://forums.docker.com/t/upgrading-docker-hub-enterprise-to-docker-trusted-registry/1925) in our support Knowledge Base. These instructions will show you how to modify existing authentication data and storage volume -settings to move to Docker Trusted Registry. Note that automatic upgrading has -been disabled for DHE users because of these issues. +If you currently use DHE 1.0, you **must** follow the +[upgrading instructions](https://forums.docker.com/t/upgrading-docker-hub-enterprise-to-docker-trusted-registry/1925) +in our support Knowledge Base. These instructions show you how to modify +existing authentication data and storage volume settings to move to Docker +Trusted Registry. Automatic upgrading has been disabled for DHE users because of +these issues. ## Version 1.0.1 (11 May 2015) diff --git a/datacenter/dtr/2.0/repos-and-images/index.md b/datacenter/dtr/2.0/repos-and-images/index.md index 516f1f34080..4d906bd96f8 100644 --- a/datacenter/dtr/2.0/repos-and-images/index.md +++ b/datacenter/dtr/2.0/repos-and-images/index.md @@ -9,7 +9,7 @@ image registry like Docker Trusted Registry. If DTR is using the default configurations or was configured to use self-signed certificates, you need to configure your Docker Engine to trust DTR. -Otherwise, when you try to login or push and pull images to DTR, you'll get an +Otherwise, when you try to login or push and pull images to DTR, you get an error: ```bash diff --git a/datacenter/dtr/2.0/repos-and-images/push-an-image.md b/datacenter/dtr/2.0/repos-and-images/push-an-image.md index cf2336acc1d..8f36d3af3f1 100644 --- a/datacenter/dtr/2.0/repos-and-images/push-an-image.md +++ b/datacenter/dtr/2.0/repos-and-images/push-an-image.md @@ -12,9 +12,9 @@ title: Push an image to DTR Pushing an image to Docker Trusted Registry is the same as pushing an image to Docker Hub. Since DTR is secure by default, you need to create the image repository before -being able to push the image to DTR. +you can push the image to DTR. -In this example, we'll create the 'golang' repository in DTR, and push the +In this example, we create the 'golang' repository in DTR, and push the Golang 1.7 image to it. ## Create a repository diff --git a/datacenter/dtr/2.0/user-management/create-and-manage-orgs.md b/datacenter/dtr/2.0/user-management/create-and-manage-orgs.md index 661ae8657c6..6495904e701 100644 --- a/datacenter/dtr/2.0/user-management/create-and-manage-orgs.md +++ b/datacenter/dtr/2.0/user-management/create-and-manage-orgs.md @@ -1,6 +1,5 @@ --- -description: Learn how to set up organizations to enforce security in Docker Trusted - Registry. +description: Learn how to set up organizations to enforce security in Docker Trusted Registry. keywords: docker, registry, security, permissions, organizations redirect_from: - /docker-trusted-registry/user-management/create-and-manage-orgs/ @@ -25,7 +24,7 @@ organization. ![](../images/create-and-manage-orgs-2.png) Repositories owned by this organization will contain the organization name, so -to pull an image from that repository, you'll use: +to pull an image from that repository, use: ```bash $ docker pull //: @@ -33,7 +32,7 @@ $ docker pull //: Click **Save** to create the organization, and then **click the organization** to define which users are allowed to manage this -organization. These users will be able to edit the organization settings, edit +organization. These users can edit the organization settings, edit all repositories owned by the organization, and define the user permissions for this organization. diff --git a/datacenter/dtr/2.0/user-management/create-and-manage-teams.md b/datacenter/dtr/2.0/user-management/create-and-manage-teams.md index ceb7e9ee8c0..475445be555 100644 --- a/datacenter/dtr/2.0/user-management/create-and-manage-teams.md +++ b/datacenter/dtr/2.0/user-management/create-and-manage-teams.md @@ -14,7 +14,7 @@ A team defines the permissions a set of users have for a set of repositories. To create a new team, go to the **DTR web UI**, and navigate to the **Organizations** page. Then **click the organization** where you want to create the team. In this -example, we'll create the 'billing' team under the 'whale' organization. +example, we create the 'billing' team under the 'whale' organization. ![](../images/create-and-manage-teams-1.png) diff --git a/datacenter/dtr/2.1/guides/configure/configure-storage.md b/datacenter/dtr/2.1/guides/configure/configure-storage.md index a20aae4823c..e3932d132da 100644 --- a/datacenter/dtr/2.1/guides/configure/configure-storage.md +++ b/datacenter/dtr/2.1/guides/configure/configure-storage.md @@ -20,7 +20,7 @@ While there is a default storage backend, `filesystem`, the Trusted Registry off At first, you might have explored Docker Trusted Registry and Docker Engine by [installing](../install/index.md) -them on your system in order to familiarize yourself with them. +them on your system to familiarize yourself with them. However, for various reasons such as deployment purposes or continuous integration, it makes sense to think about your long term organization’s needs when selecting a storage backend. The Trusted Registry natively supports TLS and @@ -198,13 +198,19 @@ include: **To configure**: -1. Navigate to the Trusted Registry UI > Settings > Storage. -2. Select Download to get the text based file. It contains a minimum amount -of information and you're going to need additional data based on your driver and -business requirements. -3. Go [here](/registry/configuration.md#list-of-configuration-options") to see the open source YAML file. Copy the sections you need and paste into your `storage.yml` file. Note that some settings may contradict others, so -ensure your choices make sense. -4. Save the YAML file and return to the UI. +1. Navigate to the Trusted Registry UI > Settings > Storage. + +2. Select Download to get the text based file. It contains a minimum amount of + information and you're going to need additional data based on your driver + and business requirements. + +3. Go [here](/registry/configuration.md#list-of-configuration-options") to see + the open source YAML file. Copy the sections you need and paste into your + `storage.yml` file. Some settings may contradict others, so ensure your + choices make sense. + +4. Save the YAML file and return to the UI. + 5. On the Storage screen, upload the file, review your changes, and click Save. ## See also diff --git a/datacenter/dtr/2.1/guides/configure/index.md b/datacenter/dtr/2.1/guides/configure/index.md index 91f4f065d50..0322fa33415 100644 --- a/datacenter/dtr/2.1/guides/configure/index.md +++ b/datacenter/dtr/2.1/guides/configure/index.md @@ -8,7 +8,7 @@ title: Use your own certificates By default the DTR services are exposed using HTTPS, to ensure all communications between clients and DTR is encrypted. Since DTR replicas use self-signed certificates for this, when a client accesses -DTR, their browsers won't trust this certificate, so the browser displays a +DTR, their browsers don't trust this certificate, so the browser displays a warning message. You can configure DTR to use your own certificates, so that it is automatically @@ -37,7 +37,7 @@ Finally, click **Save** for the changes to take effect. If you're using certificates issued by a globally trusted certificate authority, any web browser or client tool should now trust DTR. If you're using an internal -certificate authority, you'll need to [configure your system to trust that +certificate authority, you need to [configure your system to trust that certificate authority](../repos-and-images/index.md). ## Where to go next diff --git a/datacenter/dtr/2.1/guides/high-availability/index.md b/datacenter/dtr/2.1/guides/high-availability/index.md index 130bf8a7168..9d1ba9e33a5 100644 --- a/datacenter/dtr/2.1/guides/high-availability/index.md +++ b/datacenter/dtr/2.1/guides/high-availability/index.md @@ -9,7 +9,7 @@ Docker Trusted Registry (DTR) is designed for high availability. When you first install DTR, you create a cluster with a single DTR replica. Replicas are single instances of DTR that can be joined together to form a cluster. -When joining new replicas to the cluster, you'll be creating new DTR instances +When joining new replicas to the cluster, you create new DTR instances that are running the same set of services. Any change to the state of an instance is replicated across all other instances. diff --git a/datacenter/dtr/2.1/guides/install/license.md b/datacenter/dtr/2.1/guides/install/license.md index 4ba07b3c553..9c75d2d8f4d 100644 --- a/datacenter/dtr/2.1/guides/install/license.md +++ b/datacenter/dtr/2.1/guides/install/license.md @@ -8,7 +8,7 @@ By default, you don't need to license your Docker Trusted Registry. When installing DTR, it automatically starts using the same license file used on your Docker Universal Control Plane cluster. -However, there are some situations when you have to manually license your +However, there are some situations when you need to manually license your DTR installation: * When upgrading to a new major version, diff --git a/datacenter/dtr/2.1/guides/repos-and-images/index.md b/datacenter/dtr/2.1/guides/repos-and-images/index.md index a2c7236ddfc..c838c6831dd 100644 --- a/datacenter/dtr/2.1/guides/repos-and-images/index.md +++ b/datacenter/dtr/2.1/guides/repos-and-images/index.md @@ -10,7 +10,7 @@ image registry like Docker Trusted Registry. If DTR is using the default configurations or was configured to use self-signed certificates, you need to configure your Docker Engine to trust DTR. -Otherwise, when you try to login or push and pull images to DTR, you'll get an +Otherwise, when you try to login or push and pull images to DTR, you get an error: ```none diff --git a/datacenter/dtr/2.1/guides/repos-and-images/push-an-image.md b/datacenter/dtr/2.1/guides/repos-and-images/push-an-image.md index 5f03c6cbef7..64581e97d10 100644 --- a/datacenter/dtr/2.1/guides/repos-and-images/push-an-image.md +++ b/datacenter/dtr/2.1/guides/repos-and-images/push-an-image.md @@ -7,9 +7,9 @@ title: Push an image to DTR Pushing an image to Docker Trusted Registry is the same as pushing an image to Docker Hub. Since DTR is secure by default, you need to create the image repository before -being able to push the image to DTR. +you can push the image to DTR. -In this example, we'll create the 'golang' repository in DTR, and push the +In this example, we create the 'golang' repository in DTR, and push the Golang 1.7 image to it. ## Create a repository diff --git a/datacenter/dtr/2.1/guides/user-management/create-and-manage-orgs.md b/datacenter/dtr/2.1/guides/user-management/create-and-manage-orgs.md index 96382174cf6..fe5a8439c3f 100644 --- a/datacenter/dtr/2.1/guides/user-management/create-and-manage-orgs.md +++ b/datacenter/dtr/2.1/guides/user-management/create-and-manage-orgs.md @@ -23,7 +23,7 @@ organization. ![](../images/create-and-manage-orgs-2.png) Repositories owned by this organization will contain the organization name, so -to pull an image from that repository, you'll use: +to pull an image from that repository, use: ```bash $ docker pull //: @@ -31,7 +31,7 @@ $ docker pull //: Click **Save** to create the organization, and then **click the organization** to define which users are allowed to manage this -organization. These users will be able to edit the organization settings, edit +organization. These users can edit the organization settings, edit all repositories owned by the organization, and define the user permissions for this organization. diff --git a/datacenter/dtr/2.1/guides/user-management/create-and-manage-teams.md b/datacenter/dtr/2.1/guides/user-management/create-and-manage-teams.md index 1cd002b51a8..72a09363703 100644 --- a/datacenter/dtr/2.1/guides/user-management/create-and-manage-teams.md +++ b/datacenter/dtr/2.1/guides/user-management/create-and-manage-teams.md @@ -12,7 +12,7 @@ A team defines the permissions a set of users have for a set of repositories. To create a new team, go to the **DTR web UI**, and navigate to the **Organizations** page. Then **click the organization** where you want to create the team. In this -example, we'll create the 'billing' team under the 'whale' organization. +example, we create the 'billing' team under the 'whale' organization. ![](../images/create-and-manage-teams-1.png) diff --git a/datacenter/dtr/2.1/reference/cli/install.md b/datacenter/dtr/2.1/reference/cli/install.md index b45bf7d7cd7..9cf26f35d31 100644 --- a/datacenter/dtr/2.1/reference/cli/install.md +++ b/datacenter/dtr/2.1/reference/cli/install.md @@ -50,7 +50,7 @@ the 'join' command. |`--etcd-snapshot-count`|Set etcd's number of changes before creating a snapshot.| |`--ucp-insecure-tls`|Disable TLS verification for UCP| |`--ucp-ca`|Use a PEM-encoded TLS CA certificate for UCP| -|`--nfs-storage-url`|URL (with IP address or hostname) of the NFS mount if using NFS (e.g. nfs:///)| +|`--nfs-storage-url`|URL (with IP address or hostname) of the NFS mount if using NFS. For example, `nfs:///`| |`--ucp-node`|Specify the host to install Docker Trusted Registry| |`--replica-id`|Specify the replica ID. Must be unique per replica, leave blank for random| |`--unsafe`|Enable this flag to skip safety checks when installing or joining| diff --git a/datacenter/dtr/2.1/reference/cli/reconfigure.md b/datacenter/dtr/2.1/reference/cli/reconfigure.md index 33dce96f94a..c120284c165 100644 --- a/datacenter/dtr/2.1/reference/cli/reconfigure.md +++ b/datacenter/dtr/2.1/reference/cli/reconfigure.md @@ -53,5 +53,5 @@ effect. To have no down time, configure your DTR for high-availability. |`--etcd-snapshot-count`|Set etcd's number of changes before creating a snapshot.| |`--ucp-insecure-tls`|Disable TLS verification for UCP| |`--ucp-ca`|Use a PEM-encoded TLS CA certificate for UCP| -|`--nfs-storage-url`|URL (with IP address or hostname) of the NFS mount if using NFS (e.g. nfs:///)| +|`--nfs-storage-url`|URL (with IP address or hostname) of the NFS mount if using NFS. For example, `nfs:///`| |`--existing-replica-id`|ID of an existing replica in a cluster| diff --git a/datacenter/dtr/2.2/guides/admin/backups-and-disaster-recovery.md b/datacenter/dtr/2.2/guides/admin/backups-and-disaster-recovery.md index e8b747894cc..51bfc8784fa 100644 --- a/datacenter/dtr/2.2/guides/admin/backups-and-disaster-recovery.md +++ b/datacenter/dtr/2.2/guides/admin/backups-and-disaster-recovery.md @@ -159,7 +159,7 @@ To restore DTR, you need to: You need to restore DTR on the same UCP cluster where you've created the backup. If you restore on a different UCP cluster, all DTR resources will be -owned by users that don't exist, so you'll not be able to manage the resources, +owned by users that don't exist, so you can't manage the resources, even though they're stored in the DTR data store. When restoring, you need to use the same version of the `docker/dtr` image diff --git a/datacenter/dtr/2.2/guides/admin/configure/create-and-manage-orgs.md b/datacenter/dtr/2.2/guides/admin/configure/create-and-manage-orgs.md index 7dfbfb8b881..3a802e9ac05 100644 --- a/datacenter/dtr/2.2/guides/admin/configure/create-and-manage-orgs.md +++ b/datacenter/dtr/2.2/guides/admin/configure/create-and-manage-orgs.md @@ -25,7 +25,7 @@ organization. ![](../images/create-and-manage-orgs-2.png) Repositories owned by this organization will contain the organization name, so -to pull an image from that repository, you'll use: +to pull an image from that repository, use: ```bash $ docker pull //: @@ -33,7 +33,7 @@ $ docker pull //: Click **Save** to create the organization, and then **click the organization** to define which users are allowed to manage this -organization. These users will be able to edit the organization settings, edit +organization. These users can edit the organization settings, edit all repositories owned by the organization, and define the user permissions for this organization. diff --git a/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/chaining.md b/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/chaining.md index 3b90796b3f0..5679d806433 100644 --- a/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/chaining.md +++ b/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/chaining.md @@ -12,7 +12,7 @@ caches together for faster pulls. Too many levels of chaining might slow down pulls, so you should try different configurations and benchmark them, to find out the right configuration. -In this example we'll show how to configure two caches. A dedicated cache for +This example shows how to configure two caches. A dedicated cache for the Asia region that pulls images directly from DTR, and a cache for China, that pulls images from the Asia cache. @@ -73,7 +73,7 @@ middleware: - /certs/dtr-ca.pem ``` -Both CAs are needed for the downstream cache. +Both CAs are needed for the downstream cache. -Similarly, the China cache needs to be registered with DTR. See [deploy a simple cache](/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/#deploy-a-simple-cache) for how to use the API. -Ultimately the downstream cache needs to be configured for the user in question. +Similarly, the China cache needs to be registered with DTR. See [deploy a simple cache](/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/#deploy-a-simple-cache) for how to use the API. +Ultimately the downstream cache needs to be configured for the user in question. diff --git a/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/index.md b/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/index.md index 3868ba4136d..ba4f13776ea 100644 --- a/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/index.md +++ b/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/index.md @@ -223,7 +223,7 @@ tab, and change the **Content Cache** settings to use the **region-us** cache. ![](../../../images/cache-docker-images-4.png){: .with-border} -Now when you pull images, you'll be using the cache. To test this, try pulling +Now when you pull images, you use the cache. To test this, try pulling an image from DTR. You can inspect the logs of the cache service, to validate that the cache is being used, and troubleshoot problems. diff --git a/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/tls.md b/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/tls.md index aae1c821c13..3094fcf7970 100644 --- a/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/tls.md +++ b/datacenter/dtr/2.2/guides/admin/configure/deploy-caches/tls.md @@ -15,16 +15,16 @@ You can learn more about the supported configuration in the ## Get the TLS certificate and keys Before deploying a DTR cache with TLS, you need to get a public key -certificate for the domain name where you'll deploy the cache. You'll also +certificate for the domain name used to deploy the cache. You also need the public and private key files for that certificate. -Once you have then, transfer those files to the host where you'll deploy +Once you have then, transfer those files to the host used to deploy the DTR cache. ## Create the cache configuration -Use SSH to log into the host where you'll deploy the DTR cache, and navigate to +Use SSH to log into the host used to deploy the DTR cache, and navigate to the directory where you've stored the TLS certificate and keys. Create the `config.yml` file with the following content: diff --git a/datacenter/dtr/2.2/guides/admin/configure/external-storage/s3.md b/datacenter/dtr/2.2/guides/admin/configure/external-storage/s3.md index c5668634527..753e29f5d7d 100644 --- a/datacenter/dtr/2.2/guides/admin/configure/external-storage/s3.md +++ b/datacenter/dtr/2.2/guides/admin/configure/external-storage/s3.md @@ -26,8 +26,8 @@ Then, as a best practice you should just for the DTR integration and apply a IAM policy that ensures the user has limited permissions. -This user only needs permissions to access the bucket that you'll use to store -images, and be able to read, write, and delete files. +This user only needs permissions to access the bucket that you use to store +images, and to read, write, and delete files. Here's an example of a policy like that: diff --git a/datacenter/dtr/2.2/guides/admin/configure/garbage-collection.md b/datacenter/dtr/2.2/guides/admin/configure/garbage-collection.md index e1466ca4f4e..971f4911f2f 100644 --- a/datacenter/dtr/2.2/guides/admin/configure/garbage-collection.md +++ b/datacenter/dtr/2.2/guides/admin/configure/garbage-collection.md @@ -38,7 +38,7 @@ Here you can configure GC to run **until it's done** or **with a timeout**. The timeout ensures that your registry will be in read-only mode for a maximum amount of time. -Select an option (either "Until done" or "For N minutes") and you'll have the +Select an option (either "Until done" or "For N minutes") and you have the option to configure GC to run via a cron job, with several default crons provided: @@ -88,7 +88,7 @@ If we delete `example.com/user/blog:latest` but *not* `example.com/user/blog:1.11.0` we expect that `example.com/user/blog:1.11.0` can still be pulled. -This means that we can't delete layers when tags or manifests are deleted. +This means that we can't delete layers when tags or manifests are deleted. Instead, we need to pause writing and take reference counts to see how many times a file is used. If the file is never used only then is it safe to delete. diff --git a/datacenter/dtr/2.2/guides/admin/configure/license-your-installation.md b/datacenter/dtr/2.2/guides/admin/configure/license-your-installation.md index e4ac34b489b..19063a61a87 100644 --- a/datacenter/dtr/2.2/guides/admin/configure/license-your-installation.md +++ b/datacenter/dtr/2.2/guides/admin/configure/license-your-installation.md @@ -8,7 +8,7 @@ By default, you don't need to license your Docker Trusted Registry. When installing DTR, it automatically starts using the same license file used on your Docker Universal Control Plane cluster. -However, there are some situations when you have to manually license your +However, there are some situations when you need to manually license your DTR installation: * When upgrading to a new major version, diff --git a/datacenter/dtr/2.2/guides/admin/configure/use-a-load-balancer.md b/datacenter/dtr/2.2/guides/admin/configure/use-a-load-balancer.md index a65e4bb6f4a..3ff600901f4 100644 --- a/datacenter/dtr/2.2/guides/admin/configure/use-a-load-balancer.md +++ b/datacenter/dtr/2.2/guides/admin/configure/use-a-load-balancer.md @@ -51,9 +51,8 @@ with more details on any one of these services: * Metadata persistence (rethinkdb) * Content trust (notary) -Note that this endpoint is for checking the health of a *single* replica. To get -the health of every replica in a cluster, querying each replica individiually is -the preferred way to do it in real time. +This endpoint only checks the health of a *single* replica. To get +the health of every replica in a cluster, query each replica individually. The `/api/v0/meta/cluster_status` [endpoint](https://docs.docker.com/datacenter/dtr/2.2/reference/api/) diff --git a/datacenter/dtr/2.2/guides/admin/configure/use-your-own-tls-certificates.md b/datacenter/dtr/2.2/guides/admin/configure/use-your-own-tls-certificates.md index 364d91b538b..37c44f11fad 100644 --- a/datacenter/dtr/2.2/guides/admin/configure/use-your-own-tls-certificates.md +++ b/datacenter/dtr/2.2/guides/admin/configure/use-your-own-tls-certificates.md @@ -8,7 +8,7 @@ keywords: docker, dtr, tls By default the DTR services are exposed using HTTPS, to ensure all communications between clients and DTR is encrypted. Since DTR replicas use self-signed certificates for this, when a client accesses -DTR, their browsers won't trust this certificate, so the browser displays a +DTR, their browsers don't trust this certificate, so the browser displays a warning message. You can configure DTR to use your own certificates, so that it is automatically @@ -37,7 +37,7 @@ Finally, click **Save** for the changes to take effect. If you're using certificates issued by a globally trusted certificate authority, any web browser or client tool should now trust DTR. If you're using an internal -certificate authority, you'll need to configure your system to trust that +certificate authority, you need to configure your system to trust that certificate authority. ## Where to go next diff --git a/datacenter/dtr/2.2/guides/admin/manage-users/create-and-manage-teams.md b/datacenter/dtr/2.2/guides/admin/manage-users/create-and-manage-teams.md index a097cecce18..a8834b1edce 100644 --- a/datacenter/dtr/2.2/guides/admin/manage-users/create-and-manage-teams.md +++ b/datacenter/dtr/2.2/guides/admin/manage-users/create-and-manage-teams.md @@ -12,7 +12,7 @@ defines the permissions a set of users have for a set of repositories. To create a new team, go to the **DTR web UI**, and navigate to the **Organizations** page. Then **click the organization** where you want to create the team. In this -example, we'll create the 'billing' team under the 'whale' organization. +example, we create the 'billing' team under the 'whale' organization. ![](../../images/create-and-manage-teams-1.png) diff --git a/datacenter/dtr/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md b/datacenter/dtr/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md index 6ad19f0aa45..11e58a9ef2f 100644 --- a/datacenter/dtr/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md +++ b/datacenter/dtr/2.2/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md @@ -68,7 +68,7 @@ Jobs can be in one of the following status: ## Job capacity -Each job runner has a limited capacity and won't claim jobs that require an +Each job runner has a limited capacity and doesn't claim jobs that require an higher capacity. You can see the capacity of a job runner using the `GET /api/v0/workers` endpoint: @@ -123,8 +123,8 @@ are available: } ``` -Our worker will be able to pick up job id `0` and `2` since it has the capacity -for both, while id `1` will have to wait until the previous scan job is complete: +Our worker can pick up job id `0` and `2` since it has the capacity +for both, while id `1` needs to wait until the previous scan job is complete: ```json { diff --git a/datacenter/dtr/2.2/guides/admin/upgrade.md b/datacenter/dtr/2.2/guides/admin/upgrade.md index 350ee365046..1dbe4bbe130 100644 --- a/datacenter/dtr/2.2/guides/admin/upgrade.md +++ b/datacenter/dtr/2.2/guides/admin/upgrade.md @@ -13,8 +13,8 @@ support upgrades according to the following rules: * When upgrading between minor versions, you can't skip versions, but you can upgrade from any patch versions of the previous minor version to any patch version of the current minor version. -* When upgrading between major versions you also have to upgrade one major - version at a time, but you have to upgrade to the earliest available minor +* When upgrading between major versions you also need to upgrade one major + version at a time, but you need to upgrade to the earliest available minor version. We also strongly recommend upgrading to the latest minor/patch version for your major version first. diff --git a/datacenter/dtr/2.2/guides/user/access-dtr/configure-your-notary-client.md b/datacenter/dtr/2.2/guides/user/access-dtr/configure-your-notary-client.md index 289e6364042..59bcb2e0e39 100644 --- a/datacenter/dtr/2.2/guides/user/access-dtr/configure-your-notary-client.md +++ b/datacenter/dtr/2.2/guides/user/access-dtr/configure-your-notary-client.md @@ -6,8 +6,8 @@ keywords: docker, registry, notary, trust The Docker CLI client makes it easy to sign images but to streamline that process it generates a set of private and public keys that are not tied -to your UCP account. This means that you'll be able to push and sign images to -DTR, but UCP won't trust those images since it doesn't know anything about +to your UCP account. This means that you can push and sign images to +DTR, but UCP doesn't trust those images since it doesn't know anything about the keys you're using. So before signing and pushing images to DTR you should: @@ -111,7 +111,7 @@ Import the private key in your UCP bundle into the Notary CLI client: notary key import ``` -The private key is copied to `~/.docker/trust`, and you'll be prompted for a +The private key is copied to `~/.docker/trust`, and you are prompted for a password to encrypt it. You can validate what keys Notary knows about by running: diff --git a/datacenter/dtr/2.2/guides/user/access-dtr/index.md b/datacenter/dtr/2.2/guides/user/access-dtr/index.md index ec5f505aaf7..9cea619e9ab 100644 --- a/datacenter/dtr/2.2/guides/user/access-dtr/index.md +++ b/datacenter/dtr/2.2/guides/user/access-dtr/index.md @@ -9,7 +9,7 @@ image registry like Docker Trusted Registry. If DTR is using the default configurations or was configured to use self-signed certificates, you need to configure your Docker Engine to trust DTR. Otherwise, -when you try to log in, push to, or pull images from DTR, you'll get an error: +when you try to log in, push to, or pull images from DTR, you get an error: ```none $ docker login dtr.example.org diff --git a/datacenter/dtr/2.2/guides/user/create-and-manage-webhooks.md b/datacenter/dtr/2.2/guides/user/create-and-manage-webhooks.md index 4ef6089d917..d31c9be2797 100644 --- a/datacenter/dtr/2.2/guides/user/create-and-manage-webhooks.md +++ b/datacenter/dtr/2.2/guides/user/create-and-manage-webhooks.md @@ -27,15 +27,14 @@ The webhook events you can subscribe to are: - Security scanner update complete -In order to subscribe to an event you need to be at least an admin of the -particular repository (for repository events) or namespace -(for namespace events). A global administrator can subscribe to any event. +You need to be at least an admin of the repository or namespace in question to +subscribe to an event for the repository or namespace. A global administrator can subscribe to any event. For example, a user must be an admin of repository "foo/bar" to subscribe to its tag push events. ## Subscribing to events -In order to subscribe to events you must send an API query to +To subscribe to events you must send an API query to `/api/v0/webhooks` with the following payload: ``` @@ -110,12 +109,12 @@ fake data. To send a test payload, fire a `POST` request to ``` Change `type` to the event type that you want to receive. DTR will then send -an example payload to the endpoint specified. Note that the example +an example payload to the endpoint specified. The example payload sent is always the same. ## Content structure -Note that comments (`// here`) are added for documentation only; they are not +Comments (`// here`) are added for documentation only; they are not present in POST payloads. ### Repository event content structure @@ -307,7 +306,6 @@ To delete a webhook subscription send a `DELETE` request to `/api/v0/webhooks/{id}`, replacing `{id}` with the webhook subscription ID which you would like to delete. -Note that in order to delete a subscription you must be either a system -administrator or an administrator for the resource which the payload subscribes -to. For example, as a normal user you can only delete subscriptions for -repositories which you are an admin of. +Only a system administrator or an administrator for the resource which the +payload subscribes to can delete a subscription. As a normal user, you can only +delete subscriptions for repositories which you administer. diff --git a/datacenter/dtr/2.2/guides/user/manage-images/index.md b/datacenter/dtr/2.2/guides/user/manage-images/index.md index f0056fba69e..9e6e0aef2ff 100644 --- a/datacenter/dtr/2.2/guides/user/manage-images/index.md +++ b/datacenter/dtr/2.2/guides/user/manage-images/index.md @@ -5,9 +5,9 @@ keywords: docker, registry, repository --- Since DTR is secure by default, you need to create the image repository before -being able to push the image to DTR. +you can push the image to DTR. -In this example, we'll create the 'golang' repository in DTR. +In this example, we create the 'golang' repository in DTR. ## Create a repository diff --git a/datacenter/dtr/2.2/guides/user/manage-images/pull-and-push-images.md b/datacenter/dtr/2.2/guides/user/manage-images/pull-and-push-images.md index 1784c91e0e9..21b5f702bf1 100644 --- a/datacenter/dtr/2.2/guides/user/manage-images/pull-and-push-images.md +++ b/datacenter/dtr/2.2/guides/user/manage-images/pull-and-push-images.md @@ -41,7 +41,7 @@ to store the image. In this example the full name of our repository is ### Tag the image -In this example we'll pull the Golang image from Docker Hub and tag with +In this example we pull the Golang image from Docker Hub and tag with the full DTR and repository name. A tag defines where the image was pulled from, and where it will be pushed to. diff --git a/datacenter/dtr/2.2/guides/user/manage-images/sign-images/delegate-image-signing.md b/datacenter/dtr/2.2/guides/user/manage-images/sign-images/delegate-image-signing.md index af08c38e909..2a356494485 100644 --- a/datacenter/dtr/2.2/guides/user/manage-images/sign-images/delegate-image-signing.md +++ b/datacenter/dtr/2.2/guides/user/manage-images/sign-images/delegate-image-signing.md @@ -49,7 +49,7 @@ UCP requires that you delegate trust to two different roles: * `targets/releases` * `targets/`, where `` is the UCP team the user belongs to -In this example we'll delegate trust to `targets/releases` and `targets/qa`: +In this example we delegate trust to `targets/releases` and `targets/qa`: ```none # Delegate trust, and add that public key with the role targets/releases @@ -63,9 +63,9 @@ notary delegation add --publish \ --all-paths ``` -Now members from the QA team just have to [configure their Notary CLI client +Now members from the QA team just need to [configure their Notary CLI client with UCP private keys](../../access-dtr/configure-your-notary-client.md) -to be able to [push and sign images](index.md) into the `dev/nginx` repository. +before [pushing and signing images](index.md) into the `dev/nginx` repository. ## Where to go next diff --git a/datacenter/dtr/2.2/guides/user/manage-images/sign-images/index.md b/datacenter/dtr/2.2/guides/user/manage-images/sign-images/index.md index b0040053160..547494cb057 100644 --- a/datacenter/dtr/2.2/guides/user/manage-images/sign-images/index.md +++ b/datacenter/dtr/2.2/guides/user/manage-images/sign-images/index.md @@ -29,7 +29,7 @@ to the Notary Server internal to DTR. ## Sign images that UCP can trust -With the command above you'll be able to sign your DTR images, but UCP won't +With the command above you can sign your DTR images, but UCP doesn't trust them because it can't tie the private key you're using to sign the images to your UCP account. @@ -41,8 +41,8 @@ To sign images in a way that UCP trusts them you need to: In this example we're going to pull an NGINX image from Docker Store, re-tag it as `dtr.example.org/dev/nginx:1`, push the image to DTR and sign it -in a way that is trusted by UCP. If you manage multiple repositories, you'll -have to do the same procedure for every one of them. +in a way that is trusted by UCP. If you manage multiple repositories, you +need to do the same procedure for every one of them. ### Configure your Notary client @@ -79,7 +79,7 @@ repository. ![DTR](../../../images/sign-an-image-3.png){: .with-border} -DTR shows that the image is signed, but UCP won't trust the image +DTR shows that the image is signed, but UCP doesn't trust the image because it doesn't have any information about the private keys used to sign the image. @@ -94,7 +94,7 @@ UCP requires that you delegate trust to two different roles: * `targets/releases` * `targets/`, where `` is the UCP team the user belongs to -In this example we'll delegate trust to `targets/releases` and `targets/admin`: +In this example we delegate trust to `targets/releases` and `targets/admin`: ```none # Delegate trust, and add that public key with the role targets/releases @@ -108,7 +108,7 @@ notary delegation add --publish \ --all-paths ``` -To push the new signing metadata to the Notary server, you'll have to push +To push the new signing metadata to the Notary server, you need to push the image again: ```none diff --git a/datacenter/dtr/2.3/guides/admin/backups-and-disaster-recovery.md b/datacenter/dtr/2.3/guides/admin/backups-and-disaster-recovery.md index eca6a976147..655c5c7812d 100644 --- a/datacenter/dtr/2.3/guides/admin/backups-and-disaster-recovery.md +++ b/datacenter/dtr/2.3/guides/admin/backups-and-disaster-recovery.md @@ -163,7 +163,7 @@ To restore DTR, you need to: You need to restore DTR on the same UCP cluster where you've created the backup. If you restore on a different UCP cluster, all DTR resources will be -owned by users that don't exist, so you'll not be able to manage the resources, +owned by users that don't exist, so you can't manage the resources, even though they're stored in the DTR data store. When restoring, you need to use the same version of the `docker/dtr` image diff --git a/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/chaining.md b/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/chaining.md index 63184c38cfe..a4bdcf7a42c 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/chaining.md +++ b/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/chaining.md @@ -12,7 +12,7 @@ caches together for faster pulls. Too many levels of chaining might slow down pulls, so you should try different configurations and benchmark them, to find out the right configuration. -In this example we'll show how to configure two caches. A dedicated cache for +This example shows how to configure two caches. A dedicated cache for the Asia region that pulls images directly from DTR, and a cache for China, that pulls images from the Asia cache. diff --git a/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/index.md b/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/index.md index aac2639ec67..3c4bc066118 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/index.md +++ b/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/index.md @@ -209,7 +209,7 @@ tab, and change the **Content Cache** settings to use the **region-us** cache. You can also automate this through the `/api/v0/accounts/{username}/settings` API. -Now when you pull images, you'll be using the cache. To test this, try pulling +Now when you pull images, you use the cache. To test this, try pulling an image from DTR. You can inspect the logs of the cache service, to validate that the cache is being used, and troubleshoot problems. diff --git a/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/tls.md b/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/tls.md index fb4f073da12..647889b71e2 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/tls.md +++ b/datacenter/dtr/2.3/guides/admin/configure/deploy-caches/tls.md @@ -15,16 +15,16 @@ You can learn more about the supported configuration in the ## Get the TLS certificate and keys Before deploying a DTR cache with TLS you need to get a public key -certificate for the domain name were you'll deploy the cache. You'll also -need the public and private key files for that certificate. +certificate for the domain name used to deploy the cache, as well as +the public and private key files for that certificate. -Once you have then, transfer those file to the host where you'll deploy +Once you have then, transfer those file to the host used to deploy the DTR cache. ## Create the cache configuration -Use SSH to log into the host where you'll deploy the DTR cache, and navigate to +Use SSH to log into the host used to deploy the DTR cache, and navigate to the directory where you've stored the TLS certificate and keys. Create the `config.yml` file with the following content: diff --git a/datacenter/dtr/2.3/guides/admin/configure/enable-single-sign-on.md b/datacenter/dtr/2.3/guides/admin/configure/enable-single-sign-on.md index da8012e30c0..3d5ae48589a 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/enable-single-sign-on.md +++ b/datacenter/dtr/2.3/guides/admin/configure/enable-single-sign-on.md @@ -1,14 +1,14 @@ --- title: Enable single sign-on -description: Learn how to set up single sign-on between UCP and DTR, so that your users only have to authenticate once +description: Learn how to set up single sign-on between UCP and DTR, so that your users only need to authenticate once keywords: dtr, login, sso --- -By default, users are shared between UCP and DTR, but you have to authenticate +By default, users are shared between UCP and DTR, but you need to authenticate separately on the web UI of both applications. You can configure DTR to have single sign-on (SSO) with UCP, so that users only -have to authenticate once. +need to authenticate once. ## At installation time diff --git a/datacenter/dtr/2.3/guides/admin/configure/external-storage/s3.md b/datacenter/dtr/2.3/guides/admin/configure/external-storage/s3.md index e86bb967009..2e15f8e9ec1 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/external-storage/s3.md +++ b/datacenter/dtr/2.3/guides/admin/configure/external-storage/s3.md @@ -26,8 +26,8 @@ Then, as a best practice you should just for the DTR integration and apply an IAM policy that ensures the user has limited permissions. -This user only needs permissions to access the bucket that you'll use to store -images, and be able to read, write, and delete files. +This user only needs permissions to access the bucket that you use to store +images, and to read, write, and delete files. Here's an example of a policy like that: @@ -99,10 +99,10 @@ Once you click **Save**, DTR validates the configurations and saves the changes. ## Configure your clients If you're using a TLS certificate in your storage backend that's not globally -trusted, you'll have to configure all Docker Engines that push or pull from DTR +trusted, you need to configure all Docker Engines that push or pull from DTR to trust that certificate. When you push or pull an image DTR redirects the requests to the storage backend, so if clients don't trust the TLS certificates -of both DTR and the storage backend, they won't be able to push or pull images. +of both DTR and the storage backend, they can't push or pull images. [Learn how to configure the Docker client](../../../user/access-dtr/index.md). And if you've configured DTR to skip TLS verification, you also need to diff --git a/datacenter/dtr/2.3/guides/admin/configure/garbage-collection.md b/datacenter/dtr/2.3/guides/admin/configure/garbage-collection.md index 43c67517e60..4c384d7d8c2 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/garbage-collection.md +++ b/datacenter/dtr/2.3/guides/admin/configure/garbage-collection.md @@ -73,7 +73,7 @@ files of that image tag since it's possible that there are other tags that also use the same files. To delete unused image layers, DTR: -1. Becomes read-only to make sure that no one is able to push an image, thus +1. Becomes read-only to make sure that no one can push an image, thus changing the underlying files in the filesystem. 2. Check all the manifest files and keep a record of the files that are referenced. diff --git a/datacenter/dtr/2.3/guides/admin/configure/license-your-installation.md b/datacenter/dtr/2.3/guides/admin/configure/license-your-installation.md index eabe2be0dfa..3c14e283a53 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/license-your-installation.md +++ b/datacenter/dtr/2.3/guides/admin/configure/license-your-installation.md @@ -8,7 +8,7 @@ By default, you don't need to license your Docker Trusted Registry. When installing DTR, it automatically starts using the same license file used on your Docker Universal Control Plane cluster. -However, there are some situations when you have to manually license your +However, there are some situations when you need to manually license your DTR installation: * When upgrading to a new major version, diff --git a/datacenter/dtr/2.3/guides/admin/configure/use-a-load-balancer.md b/datacenter/dtr/2.3/guides/admin/configure/use-a-load-balancer.md index 181e82097b8..1bd4194c98c 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/use-a-load-balancer.md +++ b/datacenter/dtr/2.3/guides/admin/configure/use-a-load-balancer.md @@ -53,7 +53,7 @@ with more details on any one of these services: * Metadata persistence (rethinkdb) * Content trust (notary) -Note that this endpoint is for checking the health of a *single* replica. To get +This endpoint is for checking the health of a *single* replica. To get the health of every replica in a cluster, querying each replica individiually is the preferred way to do it in real time. diff --git a/datacenter/dtr/2.3/guides/admin/configure/use-a-web-proxy.md b/datacenter/dtr/2.3/guides/admin/configure/use-a-web-proxy.md index 5b60e8703bd..9490e5e2ad9 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/use-a-web-proxy.md +++ b/datacenter/dtr/2.3/guides/admin/configure/use-a-web-proxy.md @@ -7,7 +7,7 @@ keywords: dtr, configure, http, proxy Docker Trusted Registry makes outgoing connections to check for new versions, automatically renew its license, and update its vulnerability database. -If DTR can't access the internet, then you'll have to manually apply updates. +If DTR can't access the internet, then you need to manually apply updates. One option to keep your environment secure while still allowing DTR access to the internet is to use a web proxy. If you have an HTTP or HTTPS proxy, you diff --git a/datacenter/dtr/2.3/guides/admin/configure/use-your-own-tls-certificates.md b/datacenter/dtr/2.3/guides/admin/configure/use-your-own-tls-certificates.md index 5bfddcf985d..a6e87d5e854 100644 --- a/datacenter/dtr/2.3/guides/admin/configure/use-your-own-tls-certificates.md +++ b/datacenter/dtr/2.3/guides/admin/configure/use-your-own-tls-certificates.md @@ -8,7 +8,7 @@ keywords: dtr, tls By default the DTR services are exposed using HTTPS, to ensure all communications between clients and DTR is encrypted. Since DTR replicas use self-signed certificates for this, when a client accesses -DTR, their browsers won't trust this certificate, so the browser displays a +DTR, their browsers don't trust this certificate, so the browser displays a warning message. You can configure DTR to use your own certificates, so that it is automatically @@ -37,7 +37,7 @@ Finally, click **Save** for the changes to take effect. If you're using certificates issued by a globally trusted certificate authority, any web browser or client tool should now trust DTR. If you're using an internal -certificate authority, you'll need to configure your system to trust that +certificate authority, you need to configure your system to trust that certificate authority. ## Where to go next diff --git a/datacenter/dtr/2.3/guides/admin/install/index.md b/datacenter/dtr/2.3/guides/admin/install/index.md index 9f1f9afcfe5..c09d036e3f2 100644 --- a/datacenter/dtr/2.3/guides/admin/install/index.md +++ b/datacenter/dtr/2.3/guides/admin/install/index.md @@ -34,7 +34,7 @@ choose **Docker Trusted Registry**. ![](../../images/install-dtr-2.png){: .with-border} -After you configure all the options, you'll have a snippet that you can use +After you configure all the options, you have a snippet that you can use to deploy DTR. It should look like this: ```none diff --git a/datacenter/dtr/2.3/guides/admin/manage-users/create-and-manage-orgs.md b/datacenter/dtr/2.3/guides/admin/manage-users/create-and-manage-orgs.md index 1e811e74ebc..ed35c7bfed3 100644 --- a/datacenter/dtr/2.3/guides/admin/manage-users/create-and-manage-orgs.md +++ b/datacenter/dtr/2.3/guides/admin/manage-users/create-and-manage-orgs.md @@ -22,8 +22,8 @@ organization. ![](../../images/create-and-manage-orgs-2.png){: .with-border} -Repositories owned by this organization will contain the organization name, so -to pull an image from that repository, you'll use: +Repositories owned by this organization contain the organization name, so +to pull an image from that repository, use: ```bash $ docker pull //: @@ -31,7 +31,7 @@ $ docker pull //: Click **Save** to create the organization, and then **click the organization** to define which users are allowed to manage this -organization. These users will be able to edit the organization settings, edit +organization. These users can edit the organization settings, edit all repositories owned by the organization, and define the user permissions for this organization. diff --git a/datacenter/dtr/2.3/guides/admin/manage-users/create-and-manage-teams.md b/datacenter/dtr/2.3/guides/admin/manage-users/create-and-manage-teams.md index 18ee0e5f97a..435305d92e7 100644 --- a/datacenter/dtr/2.3/guides/admin/manage-users/create-and-manage-teams.md +++ b/datacenter/dtr/2.3/guides/admin/manage-users/create-and-manage-teams.md @@ -12,7 +12,7 @@ defines the permissions a set of users have for a set of repositories. To create a new team, go to the **DTR web UI**, and navigate to the **Organizations** page. Then **click the organization** where you want to create the team. In this -example, we'll create the 'billing' team under the 'whale' organization. +example, we create the 'billing' team under the 'whale' organization. ![](../../images/create-and-manage-teams-1.png){: .with-border} diff --git a/datacenter/dtr/2.3/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md b/datacenter/dtr/2.3/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md index f3beb8811e6..90f25bb5944 100644 --- a/datacenter/dtr/2.3/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md +++ b/datacenter/dtr/2.3/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md @@ -69,7 +69,7 @@ Jobs can be in one of the following status: ## Job capacity -Each job runner has a limited capacity and won't claim jobs that require an +Each job runner has a limited capacity and doesn't claim jobs that require an higher capacity. You can see the capacity of a job runner using the `GET /api/v0/workers` endpoint: @@ -124,8 +124,8 @@ are available: } ``` -Our worker will be able to pick up job id `0` and `2` since it has the capacity -for both, while id `1` will have to wait until the previous scan job is complete: +Our worker can pick up job id `0` and `2` since it has the capacity +for both, while id `1` will need to wait until the previous scan job is complete: ```json { diff --git a/datacenter/dtr/2.3/guides/admin/upgrade.md b/datacenter/dtr/2.3/guides/admin/upgrade.md index ac945bcc2fd..0d1ad160ce4 100644 --- a/datacenter/dtr/2.3/guides/admin/upgrade.md +++ b/datacenter/dtr/2.3/guides/admin/upgrade.md @@ -13,8 +13,8 @@ support upgrades according to the following rules: * When upgrading between minor versions, you can't skip versions, but you can upgrade from any patch versions of the previous minor version to any patch version of the current minor version. -* When upgrading between major versions you also have to upgrade one major - version at a time, but you have to upgrade to the earliest available minor +* When upgrading between major versions you also need to upgrade one major + version at a time, but you need to upgrade to the earliest available minor version. We also strongly recommend upgrading to the latest minor/patch version for your major version first. diff --git a/datacenter/dtr/2.3/guides/release-notes.md b/datacenter/dtr/2.3/guides/release-notes.md index 8116e6eca2f..adf102ad31f 100644 --- a/datacenter/dtr/2.3/guides/release-notes.md +++ b/datacenter/dtr/2.3/guides/release-notes.md @@ -99,7 +99,7 @@ vulnerability database. ### Known issues -* You can't upgrade from 2.3.0 to 2.3.1. Please upgrade to 2.3.2 directly. +* You can't upgrade from 2.3.0 to 2.3.1. Upgrade to 2.3.2 directly. ## DTR 2.3.0 diff --git a/datacenter/dtr/2.3/guides/user/access-dtr/configure-your-notary-client.md b/datacenter/dtr/2.3/guides/user/access-dtr/configure-your-notary-client.md index d352c82c00e..2142a73f7e5 100644 --- a/datacenter/dtr/2.3/guides/user/access-dtr/configure-your-notary-client.md +++ b/datacenter/dtr/2.3/guides/user/access-dtr/configure-your-notary-client.md @@ -6,8 +6,8 @@ keywords: registry, notary, trust The Docker CLI client makes it easy to sign images but to streamline that process it generates a set of private and public keys that are not tied -to your UCP account. This means that you'll be able to push and sign images to -DTR, but UCP won't trust those images since it doesn't know anything about +to your UCP account. This means that you can push and sign images to +DTR, but UCP doesn't trust those images since it doesn't know anything about the keys you're using. So before signing and pushing images to DTR you should: @@ -121,7 +121,7 @@ Import the private key in your UCP bundle into the Notary CLI client: notary key import ``` -The private key is copied to `~/.docker/trust`, and you'll be prompted for a +The private key is copied to `~/.docker/trust`, and you are prompted for a password to encrypt it. You can validate what keys Notary knows about by running: diff --git a/datacenter/dtr/2.3/guides/user/access-dtr/index.md b/datacenter/dtr/2.3/guides/user/access-dtr/index.md index af8bee99f61..d22a84c15c2 100644 --- a/datacenter/dtr/2.3/guides/user/access-dtr/index.md +++ b/datacenter/dtr/2.3/guides/user/access-dtr/index.md @@ -9,7 +9,7 @@ image registry like Docker Trusted Registry. If DTR is using the default configurations or was configured to use self-signed certificates, you need to configure your Docker Engine to trust DTR. Otherwise, -when you try to log in, push to, or pull images from DTR, you'll get an error: +when you try to log in, push to, or pull images from DTR, you get an error: ```none $ docker login dtr.example.org diff --git a/datacenter/dtr/2.3/guides/user/create-and-manage-webhooks.md b/datacenter/dtr/2.3/guides/user/create-and-manage-webhooks.md index a706eceb5d7..89d30efbd60 100644 --- a/datacenter/dtr/2.3/guides/user/create-and-manage-webhooks.md +++ b/datacenter/dtr/2.3/guides/user/create-and-manage-webhooks.md @@ -17,7 +17,7 @@ the **Webhooks** tab, and click **New Webhook**. Select the event that will trigger the webhook, and set the URL to send information about the event. Once everything is set up, click **Test** for DTR to send a JSON payload to the URL you set up, so that you can validate -that the integration is working. You'll get an event that looks like this: +that the integration is working. You get an event that looks like this: ``` { diff --git a/datacenter/dtr/2.3/guides/user/create-promotion-policies.md b/datacenter/dtr/2.3/guides/user/create-promotion-policies.md index ece2a2bca44..555cd39b322 100644 --- a/datacenter/dtr/2.3/guides/user/create-promotion-policies.md +++ b/datacenter/dtr/2.3/guides/user/create-promotion-policies.md @@ -76,7 +76,7 @@ your new tag: In this example, if a tag in the `docker/website-dev` doesn't have -vulnerabilities and the tag name contains `stable`, we'll automatically +vulnerabilities and the tag name contains `stable`, we automatically push that image to `docker/website-prod` and tag it with the timestamp of when the image was promoted. diff --git a/datacenter/dtr/2.3/guides/user/manage-images/index.md b/datacenter/dtr/2.3/guides/user/manage-images/index.md index b8369d3c6e5..a1e578d932d 100644 --- a/datacenter/dtr/2.3/guides/user/manage-images/index.md +++ b/datacenter/dtr/2.3/guides/user/manage-images/index.md @@ -5,9 +5,9 @@ keywords: registry, repository --- Since DTR is secure by default, you need to create the image repository before -being able to push the image to DTR. +you can push the image to DTR. -In this example, we'll create the 'golang' repository in DTR. +In this example, we create the 'golang' repository in DTR. ## Create a repository diff --git a/datacenter/dtr/2.3/guides/user/manage-images/prevent-tags-from-being-overwritten.md b/datacenter/dtr/2.3/guides/user/manage-images/prevent-tags-from-being-overwritten.md index 435c44a2531..3e87c02f551 100644 --- a/datacenter/dtr/2.3/guides/user/manage-images/prevent-tags-from-being-overwritten.md +++ b/datacenter/dtr/2.3/guides/user/manage-images/prevent-tags-from-being-overwritten.md @@ -18,7 +18,7 @@ This might make it difficult to trace back the image to the build that generated it. To prevent this from happening, you can configure a repository to be immutable. -Once you push a tag, DTR won't allow anyone else to push another tag with the same +Once you push a tag, DTR doesn't allow anyone else to push another tag with the same name. ## Make tags immutable diff --git a/datacenter/dtr/2.3/guides/user/manage-images/pull-and-push-images.md b/datacenter/dtr/2.3/guides/user/manage-images/pull-and-push-images.md index ddfd5c4c66e..1f577f27870 100644 --- a/datacenter/dtr/2.3/guides/user/manage-images/pull-and-push-images.md +++ b/datacenter/dtr/2.3/guides/user/manage-images/pull-and-push-images.md @@ -46,7 +46,7 @@ to store the image. In this example the full name of our repository is ### Tag the image -In this example we'll pull the {{ repo }} image from Docker Hub and tag with +In this example we pull the {{ repo }} image from Docker Hub and tag with the full DTR and repository name. A tag defines where the image was pulled from, and where it will be pushed to. @@ -78,7 +78,7 @@ Official Microsoft Windows images or any image you create based on them aren't distributable by default. When you push a Windows image to DTR, Docker only pushes the image manifest but not the image layers. This means that: -* DTR won't be able to scan those images for vulnerabilities since DTR doesn't +* DTR can't scan those images for vulnerabilities since DTR doesn't have access to the layers * When a user pulls a Windows image from DTR, they are redirected to a Microsoft registry to fetch the layers diff --git a/datacenter/dtr/2.3/guides/user/manage-images/scan-images-for-vulnerabilities.md b/datacenter/dtr/2.3/guides/user/manage-images/scan-images-for-vulnerabilities.md index e1201dd9fc5..22a079b99f8 100644 --- a/datacenter/dtr/2.3/guides/user/manage-images/scan-images-for-vulnerabilities.md +++ b/datacenter/dtr/2.3/guides/user/manage-images/scan-images-for-vulnerabilities.md @@ -38,7 +38,7 @@ this database is updated, DTR reviews the indexed components for newly discovered vulnerabilities. DTR scans both Linux and Windows images, but but by default Docker doesn't push -foreign image layers for Windows images so DTR won't be able to scan them. If +foreign image layers for Windows images so DTR can't scan them. If you want DTR to scan your Windows images, [configure Docker to always push image layers](pull-and-push-images.md), and it will scan the non-foreign layers. diff --git a/datacenter/dtr/2.3/guides/user/manage-images/sign-images/delegate-image-signing.md b/datacenter/dtr/2.3/guides/user/manage-images/sign-images/delegate-image-signing.md index 296343ff4d8..2c124236d8d 100644 --- a/datacenter/dtr/2.3/guides/user/manage-images/sign-images/delegate-image-signing.md +++ b/datacenter/dtr/2.3/guides/user/manage-images/sign-images/delegate-image-signing.md @@ -49,7 +49,7 @@ UCP requires that you delegate trust to two different roles: * `targets/releases` * `targets/`, where `` is the UCP team the user belongs to -In this example we'll delegate trust to `targets/releases` and `targets/qa`: +In this example we delegate trust to `targets/releases` and `targets/qa`: ```none # Delegate trust, and add that public key with the role targets/releases @@ -63,9 +63,9 @@ notary delegation add --publish \ --all-paths ``` -Now members from the QA team just have to [configure their Notary CLI client +Now members from the QA team just need to [configure their Notary CLI client with UCP private keys](../../access-dtr/configure-your-notary-client.md) -to be able to [push and sign images](index.md) into the `dev/nginx` repository. +before [pushing and signing images](index.md) into the `dev/nginx` repository. ## Where to go next diff --git a/datacenter/dtr/2.3/guides/user/manage-images/sign-images/index.md b/datacenter/dtr/2.3/guides/user/manage-images/sign-images/index.md index 883db9eaa8d..add8e4487ce 100644 --- a/datacenter/dtr/2.3/guides/user/manage-images/sign-images/index.md +++ b/datacenter/dtr/2.3/guides/user/manage-images/sign-images/index.md @@ -29,7 +29,7 @@ to the Notary Server internal to DTR. ## Sign images that UCP can trust -With the command above you'll be able to sign your DTR images, but UCP won't +With the command above, you can sign your DTR images, but UCP doesn't trust them because it can't tie the private key you're using to sign the images to your UCP account. @@ -41,8 +41,8 @@ To sign images in a way that UCP trusts them, you need to: In this example we're going to pull an NGINX image from Docker Store, re-tag it as `dtr.example.org/dev/nginx:1`, push the image to DTR and sign it -in a way that is trusted by UCP. If you manage multiple repositories, you'll -have to do the same procedure for every one of them. +in a way that is trusted by UCP. If you manage multiple repositories, you +need to do the same procedure for every one of them. ### Configure your Notary client @@ -79,7 +79,7 @@ repository. ![DTR](../../../images/sign-an-image-3.png){: .with-border} -DTR shows that the image is signed, but UCP won't trust the image +DTR shows that the image is signed, but UCP doesn't trust the image because it doesn't have any information about the private keys used to sign the image. @@ -94,7 +94,7 @@ UCP requires that you delegate trust to two different roles: * `targets/releases` * `targets/`, where `` is the UCP team the user belongs to -In this example we'll delegate trust to `targets/releases` and `targets/admin`: +In this example we delegate trust to `targets/releases` and `targets/admin`: ```none # Delegate trust, and add that public key with the role targets/releases @@ -108,7 +108,7 @@ notary delegation add --publish \ --all-paths ``` -To push the new signing metadata to the Notary server, you'll have to push +To push the new signing metadata to the Notary server, you need to push the image again: ```none diff --git a/datacenter/dtr/2.4/guides/admin/backups-and-disaster-recovery.md b/datacenter/dtr/2.4/guides/admin/backups-and-disaster-recovery.md index c7dfd8b91d8..0ee30d02dc8 100644 --- a/datacenter/dtr/2.4/guides/admin/backups-and-disaster-recovery.md +++ b/datacenter/dtr/2.4/guides/admin/backups-and-disaster-recovery.md @@ -161,7 +161,7 @@ To restore DTR, you need to: You need to restore DTR on the same UCP cluster where you've created the backup. If you restore on a different UCP cluster, all DTR resources will be -owned by users that don't exist, so you'll not be able to manage the resources, +owned by users that don't exist, so you can't manage the resources, even though they're stored in the DTR data store. When restoring, you need to use the same version of the `docker/dtr` image diff --git a/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/chaining.md b/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/chaining.md index 63184c38cfe..a4bdcf7a42c 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/chaining.md +++ b/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/chaining.md @@ -12,7 +12,7 @@ caches together for faster pulls. Too many levels of chaining might slow down pulls, so you should try different configurations and benchmark them, to find out the right configuration. -In this example we'll show how to configure two caches. A dedicated cache for +This example shows how to configure two caches. A dedicated cache for the Asia region that pulls images directly from DTR, and a cache for China, that pulls images from the Asia cache. diff --git a/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/index.md b/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/index.md index aac2639ec67..3c4bc066118 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/index.md +++ b/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/index.md @@ -209,7 +209,7 @@ tab, and change the **Content Cache** settings to use the **region-us** cache. You can also automate this through the `/api/v0/accounts/{username}/settings` API. -Now when you pull images, you'll be using the cache. To test this, try pulling +Now when you pull images, you use the cache. To test this, try pulling an image from DTR. You can inspect the logs of the cache service, to validate that the cache is being used, and troubleshoot problems. diff --git a/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/tls.md b/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/tls.md index fb4f073da12..1087dc6186f 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/tls.md +++ b/datacenter/dtr/2.4/guides/admin/configure/deploy-caches/tls.md @@ -14,17 +14,17 @@ You can learn more about the supported configuration in the ## Get the TLS certificate and keys -Before deploying a DTR cache with TLS you need to get a public key -certificate for the domain name were you'll deploy the cache. You'll also -need the public and private key files for that certificate. +Before deploying a DTR cache with TLS you need to obtain a public key +certificate for the domain name where you deploy the cache, as well as +the public and private key files for that certificate. -Once you have then, transfer those file to the host where you'll deploy +Once you have then, transfer those file to the host where you plan to deploy the DTR cache. ## Create the cache configuration -Use SSH to log into the host where you'll deploy the DTR cache, and navigate to +Use SSH to log into the host where you plan to deploy the DTR cache, and navigate to the directory where you've stored the TLS certificate and keys. Create the `config.yml` file with the following content: diff --git a/datacenter/dtr/2.4/guides/admin/configure/enable-single-sign-on.md b/datacenter/dtr/2.4/guides/admin/configure/enable-single-sign-on.md index da8012e30c0..3d5ae48589a 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/enable-single-sign-on.md +++ b/datacenter/dtr/2.4/guides/admin/configure/enable-single-sign-on.md @@ -1,14 +1,14 @@ --- title: Enable single sign-on -description: Learn how to set up single sign-on between UCP and DTR, so that your users only have to authenticate once +description: Learn how to set up single sign-on between UCP and DTR, so that your users only need to authenticate once keywords: dtr, login, sso --- -By default, users are shared between UCP and DTR, but you have to authenticate +By default, users are shared between UCP and DTR, but you need to authenticate separately on the web UI of both applications. You can configure DTR to have single sign-on (SSO) with UCP, so that users only -have to authenticate once. +need to authenticate once. ## At installation time diff --git a/datacenter/dtr/2.4/guides/admin/configure/external-storage/s3.md b/datacenter/dtr/2.4/guides/admin/configure/external-storage/s3.md index e86bb967009..d42e3b0bb0d 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/external-storage/s3.md +++ b/datacenter/dtr/2.4/guides/admin/configure/external-storage/s3.md @@ -26,8 +26,8 @@ Then, as a best practice you should just for the DTR integration and apply an IAM policy that ensures the user has limited permissions. -This user only needs permissions to access the bucket that you'll use to store -images, and be able to read, write, and delete files. +This user only needs permissions to access the bucket that you use to store +images, and the ability read, write, and delete files. Here's an example of a policy like that: @@ -98,11 +98,11 @@ Once you click **Save**, DTR validates the configurations and saves the changes. ## Configure your clients -If you're using a TLS certificate in your storage backend that's not globally -trusted, you'll have to configure all Docker Engines that push or pull from DTR +If you use a TLS certificate in your storage backend that's not globally +trusted, you need to configure all Docker Engines that push or pull from DTR to trust that certificate. When you push or pull an image DTR redirects the requests to the storage backend, so if clients don't trust the TLS certificates -of both DTR and the storage backend, they won't be able to push or pull images. +of both DTR and the storage backend, they can't push or pull images. [Learn how to configure the Docker client](../../../user/access-dtr/index.md). And if you've configured DTR to skip TLS verification, you also need to diff --git a/datacenter/dtr/2.4/guides/admin/configure/garbage-collection.md b/datacenter/dtr/2.4/guides/admin/configure/garbage-collection.md index 43c67517e60..4c384d7d8c2 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/garbage-collection.md +++ b/datacenter/dtr/2.4/guides/admin/configure/garbage-collection.md @@ -73,7 +73,7 @@ files of that image tag since it's possible that there are other tags that also use the same files. To delete unused image layers, DTR: -1. Becomes read-only to make sure that no one is able to push an image, thus +1. Becomes read-only to make sure that no one can push an image, thus changing the underlying files in the filesystem. 2. Check all the manifest files and keep a record of the files that are referenced. diff --git a/datacenter/dtr/2.4/guides/admin/configure/license-your-installation.md b/datacenter/dtr/2.4/guides/admin/configure/license-your-installation.md index eabe2be0dfa..3c14e283a53 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/license-your-installation.md +++ b/datacenter/dtr/2.4/guides/admin/configure/license-your-installation.md @@ -8,7 +8,7 @@ By default, you don't need to license your Docker Trusted Registry. When installing DTR, it automatically starts using the same license file used on your Docker Universal Control Plane cluster. -However, there are some situations when you have to manually license your +However, there are some situations when you need to manually license your DTR installation: * When upgrading to a new major version, diff --git a/datacenter/dtr/2.4/guides/admin/configure/use-a-load-balancer.md b/datacenter/dtr/2.4/guides/admin/configure/use-a-load-balancer.md index 181e82097b8..1bd4194c98c 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/use-a-load-balancer.md +++ b/datacenter/dtr/2.4/guides/admin/configure/use-a-load-balancer.md @@ -53,7 +53,7 @@ with more details on any one of these services: * Metadata persistence (rethinkdb) * Content trust (notary) -Note that this endpoint is for checking the health of a *single* replica. To get +This endpoint is for checking the health of a *single* replica. To get the health of every replica in a cluster, querying each replica individiually is the preferred way to do it in real time. diff --git a/datacenter/dtr/2.4/guides/admin/configure/use-a-web-proxy.md b/datacenter/dtr/2.4/guides/admin/configure/use-a-web-proxy.md index 5b60e8703bd..9490e5e2ad9 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/use-a-web-proxy.md +++ b/datacenter/dtr/2.4/guides/admin/configure/use-a-web-proxy.md @@ -7,7 +7,7 @@ keywords: dtr, configure, http, proxy Docker Trusted Registry makes outgoing connections to check for new versions, automatically renew its license, and update its vulnerability database. -If DTR can't access the internet, then you'll have to manually apply updates. +If DTR can't access the internet, then you need to manually apply updates. One option to keep your environment secure while still allowing DTR access to the internet is to use a web proxy. If you have an HTTP or HTTPS proxy, you diff --git a/datacenter/dtr/2.4/guides/admin/configure/use-your-own-tls-certificates.md b/datacenter/dtr/2.4/guides/admin/configure/use-your-own-tls-certificates.md index 5bfddcf985d..5ff5fe3031c 100644 --- a/datacenter/dtr/2.4/guides/admin/configure/use-your-own-tls-certificates.md +++ b/datacenter/dtr/2.4/guides/admin/configure/use-your-own-tls-certificates.md @@ -8,7 +8,7 @@ keywords: dtr, tls By default the DTR services are exposed using HTTPS, to ensure all communications between clients and DTR is encrypted. Since DTR replicas use self-signed certificates for this, when a client accesses -DTR, their browsers won't trust this certificate, so the browser displays a +DTR, their browsers don't trust this certificate, so the browser displays a warning message. You can configure DTR to use your own certificates, so that it is automatically @@ -37,7 +37,7 @@ Finally, click **Save** for the changes to take effect. If you're using certificates issued by a globally trusted certificate authority, any web browser or client tool should now trust DTR. If you're using an internal -certificate authority, you'll need to configure your system to trust that +certificate authority, configure your system to trust that certificate authority. ## Where to go next diff --git a/datacenter/dtr/2.4/guides/admin/install/index.md b/datacenter/dtr/2.4/guides/admin/install/index.md index 9f1f9afcfe5..c09d036e3f2 100644 --- a/datacenter/dtr/2.4/guides/admin/install/index.md +++ b/datacenter/dtr/2.4/guides/admin/install/index.md @@ -34,7 +34,7 @@ choose **Docker Trusted Registry**. ![](../../images/install-dtr-2.png){: .with-border} -After you configure all the options, you'll have a snippet that you can use +After you configure all the options, you have a snippet that you can use to deploy DTR. It should look like this: ```none diff --git a/datacenter/dtr/2.4/guides/admin/manage-users/create-and-manage-orgs.md b/datacenter/dtr/2.4/guides/admin/manage-users/create-and-manage-orgs.md index 1e811e74ebc..fd4ba27b524 100644 --- a/datacenter/dtr/2.4/guides/admin/manage-users/create-and-manage-orgs.md +++ b/datacenter/dtr/2.4/guides/admin/manage-users/create-and-manage-orgs.md @@ -23,7 +23,7 @@ organization. ![](../../images/create-and-manage-orgs-2.png){: .with-border} Repositories owned by this organization will contain the organization name, so -to pull an image from that repository, you'll use: +to pull an image from that repository, use: ```bash $ docker pull //: @@ -31,7 +31,7 @@ $ docker pull //: Click **Save** to create the organization, and then **click the organization** to define which users are allowed to manage this -organization. These users will be able to edit the organization settings, edit +organization. These users can edit the organization settings, edit all repositories owned by the organization, and define the user permissions for this organization. diff --git a/datacenter/dtr/2.4/guides/admin/manage-users/create-and-manage-teams.md b/datacenter/dtr/2.4/guides/admin/manage-users/create-and-manage-teams.md index 18ee0e5f97a..435305d92e7 100644 --- a/datacenter/dtr/2.4/guides/admin/manage-users/create-and-manage-teams.md +++ b/datacenter/dtr/2.4/guides/admin/manage-users/create-and-manage-teams.md @@ -12,7 +12,7 @@ defines the permissions a set of users have for a set of repositories. To create a new team, go to the **DTR web UI**, and navigate to the **Organizations** page. Then **click the organization** where you want to create the team. In this -example, we'll create the 'billing' team under the 'whale' organization. +example, we create the 'billing' team under the 'whale' organization. ![](../../images/create-and-manage-teams-1.png){: .with-border} diff --git a/datacenter/dtr/2.4/guides/admin/monitor-and-troubleshoot/notary-audit-logs.md b/datacenter/dtr/2.4/guides/admin/monitor-and-troubleshoot/notary-audit-logs.md index 44e543f8b5a..577821f6c03 100644 --- a/datacenter/dtr/2.4/guides/admin/monitor-and-troubleshoot/notary-audit-logs.md +++ b/datacenter/dtr/2.4/guides/admin/monitor-and-troubleshoot/notary-audit-logs.md @@ -258,8 +258,8 @@ export TOKEN=$(curl --insecure --silent \ 'https:///auth/token?realm=dtr&service=dtr&scope=repository:/:pull' | jq --raw-output .token) # Get audit logs for all repositories and pretty-print it -# If you pushed the image less than 60 seconds ago, it's possible that -# Docker Content Trust won't show any events. Retry the command after a while. +# If you pushed the image less than 60 seconds ago, Docker Content Trust may not +# show any events. Retry the command after a while. curl --insecure --silent \ --header "Authorization: Bearer $TOKEN" \ "https:///v2///_trust/changefeed?records=10&change_id=0" | jq . diff --git a/datacenter/dtr/2.4/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md b/datacenter/dtr/2.4/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md index f3beb8811e6..3864cc5ec90 100644 --- a/datacenter/dtr/2.4/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md +++ b/datacenter/dtr/2.4/guides/admin/monitor-and-troubleshoot/troubleshoot-batch-jobs.md @@ -69,7 +69,7 @@ Jobs can be in one of the following status: ## Job capacity -Each job runner has a limited capacity and won't claim jobs that require an +Each job runner has a limited capacity and doesn't claim jobs that require an higher capacity. You can see the capacity of a job runner using the `GET /api/v0/workers` endpoint: @@ -124,8 +124,8 @@ are available: } ``` -Our worker will be able to pick up job id `0` and `2` since it has the capacity -for both, while id `1` will have to wait until the previous scan job is complete: +Our worker can pick up job id `0` and `2` since it has the capacity +for both, while id `1` needs to wait until the previous scan job is complete: ```json { diff --git a/datacenter/dtr/2.4/guides/admin/upgrade.md b/datacenter/dtr/2.4/guides/admin/upgrade.md index 0a2aa2ad14a..383dc933f9c 100644 --- a/datacenter/dtr/2.4/guides/admin/upgrade.md +++ b/datacenter/dtr/2.4/guides/admin/upgrade.md @@ -15,8 +15,8 @@ support upgrades according to the following rules: * When upgrading between minor versions, you can't skip versions, but you can upgrade from any patch versions of the previous minor version to any patch version of the current minor version. -* When upgrading between major versions you also have to upgrade one major - version at a time, but you have to upgrade to the earliest available minor +* When upgrading between major versions you also need to upgrade one major + version at a time, but you need to upgrade to the earliest available minor version. We also strongly recommend upgrading to the latest minor/patch version for your major version first. diff --git a/datacenter/dtr/2.4/guides/user/access-dtr/configure-your-notary-client.md b/datacenter/dtr/2.4/guides/user/access-dtr/configure-your-notary-client.md index d352c82c00e..2142a73f7e5 100644 --- a/datacenter/dtr/2.4/guides/user/access-dtr/configure-your-notary-client.md +++ b/datacenter/dtr/2.4/guides/user/access-dtr/configure-your-notary-client.md @@ -6,8 +6,8 @@ keywords: registry, notary, trust The Docker CLI client makes it easy to sign images but to streamline that process it generates a set of private and public keys that are not tied -to your UCP account. This means that you'll be able to push and sign images to -DTR, but UCP won't trust those images since it doesn't know anything about +to your UCP account. This means that you can push and sign images to +DTR, but UCP doesn't trust those images since it doesn't know anything about the keys you're using. So before signing and pushing images to DTR you should: @@ -121,7 +121,7 @@ Import the private key in your UCP bundle into the Notary CLI client: notary key import ``` -The private key is copied to `~/.docker/trust`, and you'll be prompted for a +The private key is copied to `~/.docker/trust`, and you are prompted for a password to encrypt it. You can validate what keys Notary knows about by running: diff --git a/datacenter/dtr/2.4/guides/user/access-dtr/index.md b/datacenter/dtr/2.4/guides/user/access-dtr/index.md index af8bee99f61..d22a84c15c2 100644 --- a/datacenter/dtr/2.4/guides/user/access-dtr/index.md +++ b/datacenter/dtr/2.4/guides/user/access-dtr/index.md @@ -9,7 +9,7 @@ image registry like Docker Trusted Registry. If DTR is using the default configurations or was configured to use self-signed certificates, you need to configure your Docker Engine to trust DTR. Otherwise, -when you try to log in, push to, or pull images from DTR, you'll get an error: +when you try to log in, push to, or pull images from DTR, you get an error: ```none $ docker login dtr.example.org diff --git a/datacenter/dtr/2.4/guides/user/create-and-manage-webhooks.md b/datacenter/dtr/2.4/guides/user/create-and-manage-webhooks.md index a706eceb5d7..89d30efbd60 100644 --- a/datacenter/dtr/2.4/guides/user/create-and-manage-webhooks.md +++ b/datacenter/dtr/2.4/guides/user/create-and-manage-webhooks.md @@ -17,7 +17,7 @@ the **Webhooks** tab, and click **New Webhook**. Select the event that will trigger the webhook, and set the URL to send information about the event. Once everything is set up, click **Test** for DTR to send a JSON payload to the URL you set up, so that you can validate -that the integration is working. You'll get an event that looks like this: +that the integration is working. You get an event that looks like this: ``` { diff --git a/datacenter/dtr/2.4/guides/user/create-promotion-policies.md b/datacenter/dtr/2.4/guides/user/create-promotion-policies.md index 2205fb68989..69776c3d962 100644 --- a/datacenter/dtr/2.4/guides/user/create-promotion-policies.md +++ b/datacenter/dtr/2.4/guides/user/create-promotion-policies.md @@ -74,7 +74,7 @@ your new tag: In this example, if a tag in the `docker/website-dev` doesn't have -vulnerabilities and the tag name contains `stable`, we'll automatically +vulnerabilities and the tag name contains `stable`, we automatically push that image to `docker/website-prod` and tag it with the timestamp of when the image was promoted. diff --git a/datacenter/dtr/2.4/guides/user/manage-images/index.md b/datacenter/dtr/2.4/guides/user/manage-images/index.md index b8369d3c6e5..4de52d76374 100644 --- a/datacenter/dtr/2.4/guides/user/manage-images/index.md +++ b/datacenter/dtr/2.4/guides/user/manage-images/index.md @@ -5,9 +5,9 @@ keywords: registry, repository --- Since DTR is secure by default, you need to create the image repository before -being able to push the image to DTR. +pushing the image to DTR. -In this example, we'll create the 'golang' repository in DTR. +In this example, we create the 'golang' repository in DTR. ## Create a repository diff --git a/datacenter/dtr/2.4/guides/user/manage-images/prevent-tags-from-being-overwritten.md b/datacenter/dtr/2.4/guides/user/manage-images/prevent-tags-from-being-overwritten.md index 435c44a2531..3e87c02f551 100644 --- a/datacenter/dtr/2.4/guides/user/manage-images/prevent-tags-from-being-overwritten.md +++ b/datacenter/dtr/2.4/guides/user/manage-images/prevent-tags-from-being-overwritten.md @@ -18,7 +18,7 @@ This might make it difficult to trace back the image to the build that generated it. To prevent this from happening, you can configure a repository to be immutable. -Once you push a tag, DTR won't allow anyone else to push another tag with the same +Once you push a tag, DTR doesn't allow anyone else to push another tag with the same name. ## Make tags immutable diff --git a/datacenter/dtr/2.4/guides/user/manage-images/pull-and-push-images.md b/datacenter/dtr/2.4/guides/user/manage-images/pull-and-push-images.md index ddfd5c4c66e..1f577f27870 100644 --- a/datacenter/dtr/2.4/guides/user/manage-images/pull-and-push-images.md +++ b/datacenter/dtr/2.4/guides/user/manage-images/pull-and-push-images.md @@ -46,7 +46,7 @@ to store the image. In this example the full name of our repository is ### Tag the image -In this example we'll pull the {{ repo }} image from Docker Hub and tag with +In this example we pull the {{ repo }} image from Docker Hub and tag with the full DTR and repository name. A tag defines where the image was pulled from, and where it will be pushed to. @@ -78,7 +78,7 @@ Official Microsoft Windows images or any image you create based on them aren't distributable by default. When you push a Windows image to DTR, Docker only pushes the image manifest but not the image layers. This means that: -* DTR won't be able to scan those images for vulnerabilities since DTR doesn't +* DTR can't scan those images for vulnerabilities since DTR doesn't have access to the layers * When a user pulls a Windows image from DTR, they are redirected to a Microsoft registry to fetch the layers diff --git a/datacenter/dtr/2.4/guides/user/manage-images/scan-images-for-vulnerabilities.md b/datacenter/dtr/2.4/guides/user/manage-images/scan-images-for-vulnerabilities.md index e1201dd9fc5..22a079b99f8 100644 --- a/datacenter/dtr/2.4/guides/user/manage-images/scan-images-for-vulnerabilities.md +++ b/datacenter/dtr/2.4/guides/user/manage-images/scan-images-for-vulnerabilities.md @@ -38,7 +38,7 @@ this database is updated, DTR reviews the indexed components for newly discovered vulnerabilities. DTR scans both Linux and Windows images, but but by default Docker doesn't push -foreign image layers for Windows images so DTR won't be able to scan them. If +foreign image layers for Windows images so DTR can't scan them. If you want DTR to scan your Windows images, [configure Docker to always push image layers](pull-and-push-images.md), and it will scan the non-foreign layers. diff --git a/datacenter/dtr/2.4/guides/user/manage-images/sign-images/delegate-image-signing.md b/datacenter/dtr/2.4/guides/user/manage-images/sign-images/delegate-image-signing.md index 296343ff4d8..7818fd360ee 100644 --- a/datacenter/dtr/2.4/guides/user/manage-images/sign-images/delegate-image-signing.md +++ b/datacenter/dtr/2.4/guides/user/manage-images/sign-images/delegate-image-signing.md @@ -49,7 +49,7 @@ UCP requires that you delegate trust to two different roles: * `targets/releases` * `targets/`, where `` is the UCP team the user belongs to -In this example we'll delegate trust to `targets/releases` and `targets/qa`: +In this example we delegate trust to `targets/releases` and `targets/qa`: ```none # Delegate trust, and add that public key with the role targets/releases @@ -63,9 +63,9 @@ notary delegation add --publish \ --all-paths ``` -Now members from the QA team just have to [configure their Notary CLI client +Now members from the QA team just need to [configure their Notary CLI client with UCP private keys](../../access-dtr/configure-your-notary-client.md) -to be able to [push and sign images](index.md) into the `dev/nginx` repository. +to [push and sign images](index.md) into the `dev/nginx` repository. ## Where to go next diff --git a/datacenter/dtr/2.4/guides/user/manage-images/sign-images/index.md b/datacenter/dtr/2.4/guides/user/manage-images/sign-images/index.md index 883db9eaa8d..add8e4487ce 100644 --- a/datacenter/dtr/2.4/guides/user/manage-images/sign-images/index.md +++ b/datacenter/dtr/2.4/guides/user/manage-images/sign-images/index.md @@ -29,7 +29,7 @@ to the Notary Server internal to DTR. ## Sign images that UCP can trust -With the command above you'll be able to sign your DTR images, but UCP won't +With the command above, you can sign your DTR images, but UCP doesn't trust them because it can't tie the private key you're using to sign the images to your UCP account. @@ -41,8 +41,8 @@ To sign images in a way that UCP trusts them, you need to: In this example we're going to pull an NGINX image from Docker Store, re-tag it as `dtr.example.org/dev/nginx:1`, push the image to DTR and sign it -in a way that is trusted by UCP. If you manage multiple repositories, you'll -have to do the same procedure for every one of them. +in a way that is trusted by UCP. If you manage multiple repositories, you +need to do the same procedure for every one of them. ### Configure your Notary client @@ -79,7 +79,7 @@ repository. ![DTR](../../../images/sign-an-image-3.png){: .with-border} -DTR shows that the image is signed, but UCP won't trust the image +DTR shows that the image is signed, but UCP doesn't trust the image because it doesn't have any information about the private keys used to sign the image. @@ -94,7 +94,7 @@ UCP requires that you delegate trust to two different roles: * `targets/releases` * `targets/`, where `` is the UCP team the user belongs to -In this example we'll delegate trust to `targets/releases` and `targets/admin`: +In this example we delegate trust to `targets/releases` and `targets/admin`: ```none # Delegate trust, and add that public key with the role targets/releases @@ -108,7 +108,7 @@ notary delegation add --publish \ --all-paths ``` -To push the new signing metadata to the Notary server, you'll have to push +To push the new signing metadata to the Notary server, you need to push the image again: ```none diff --git a/datacenter/ucp/1.1/access-ucp/cli-based-access.md b/datacenter/ucp/1.1/access-ucp/cli-based-access.md index 5a66d961ca2..eacadde47f5 100644 --- a/datacenter/ucp/1.1/access-ucp/cli-based-access.md +++ b/datacenter/ucp/1.1/access-ucp/cli-based-access.md @@ -78,7 +78,7 @@ Cluster Managers: 1 ## Download client certificates using the REST API You can also download client certificate bundles using the UCP REST API. In -this example we'll be using `curl` for making the web requests to the API, and +this example we use `curl` for making the web requests to the API, and `jq` to parse the responses. To install these tools on an Ubuntu distribution, you can run: diff --git a/datacenter/ucp/1.1/applications/deploy-app-cli.md b/datacenter/ucp/1.1/applications/deploy-app-cli.md index f8a8801623b..69d0a1de629 100644 --- a/datacenter/ucp/1.1/applications/deploy-app-cli.md +++ b/datacenter/ucp/1.1/applications/deploy-app-cli.md @@ -14,7 +14,7 @@ application. ## Get a client certificate bundle Docker UCP secures your cluster with role-based access control, so that only -authorized users can deploy applications to the cluster. To be able to run +authorized users can deploy applications to the cluster. To run Docker commands on the UCP cluster, you need to authenticate your requests using client certificates. diff --git a/datacenter/ucp/1.1/configuration/configure-logs.md b/datacenter/ucp/1.1/configuration/configure-logs.md index 77e4b7229e3..a67576e6b98 100644 --- a/datacenter/ucp/1.1/configuration/configure-logs.md +++ b/datacenter/ucp/1.1/configuration/configure-logs.md @@ -50,8 +50,8 @@ running Kibana and browse log/event entries. You should specify the "time" field for indexing. When deployed in a production environment, you should secure your ELK -stack. UCP does not do this itself, but there are a number of 3rd party -options that can accomplish this (e.g. Shield plug-in for Kibana) +stack. UCP does not do this itself, but a number of third-party +options can accomplish this, such as Shield plug-in for Kibana. ## Where to go next diff --git a/datacenter/ucp/1.1/configuration/dtr-integration.md b/datacenter/ucp/1.1/configuration/dtr-integration.md index 3176565ab98..ad7edeaaad5 100644 --- a/datacenter/ucp/1.1/configuration/dtr-integration.md +++ b/datacenter/ucp/1.1/configuration/dtr-integration.md @@ -16,11 +16,11 @@ At an high-level, there are three steps to integrate UCP with DTR: * Configure DTR to trust UCP, * Configure the Docker Engine running on each UCP node to trust DTR. -When this is done, you'll be able to use a [UCP client bundle](../access-ucp/cli-based-access.md) -to push and pull your private images to a UCP node, without have to run the +When this is done, you can use a [UCP client bundle](../access-ucp/cli-based-access.md) +to push and pull your private images to a UCP node, without running the `docker login` command. -You'll also be to able pull images from the UCP web UI, without having to +You can also pull images from the UCP web UI, without having to provide your credentials. ## Prerequisites @@ -107,7 +107,7 @@ the **DTR** tab. ## Step 4. Configure DTR to trust UCP -In this step, you'll configure DTR to trust the UCP cluster root CA. This way, +In this step, you configure DTR to trust the UCP cluster root CA. This way, requests to DTR that present a certificate issued by the UCP cluster root CA are authorized: @@ -189,10 +189,10 @@ commands in the UCP cluster. ## Troubleshooting -When UCP or DTR are misconfigured, you'll get errors when pushing and pulling +When UCP or DTR are misconfigured, you get errors when pushing and pulling images from a UCP node to a private DTR repository. -When UCP can't communicate with DTR, you'll get: +When UCP can't communicate with DTR, you see: ```none $ docker push dtr/username/hello-world:1 @@ -207,7 +207,7 @@ In this case, check that UCP is properly configured and that it can communicate with DTR. When one of the components is misconfigured, and doesn't trust the root CA -certificate of the other components, you'll get an error like: +certificate of the other components, you see an error like: ```none $ docker push dtr/username/hello-world:1 diff --git a/datacenter/ucp/1.1/configuration/multi-host-networking.md b/datacenter/ucp/1.1/configuration/multi-host-networking.md index cced7b3d3a6..aa0bb4eafc3 100644 --- a/datacenter/ucp/1.1/configuration/multi-host-networking.md +++ b/datacenter/ucp/1.1/configuration/multi-host-networking.md @@ -13,7 +13,7 @@ Launching a container on one host, makes the container available to all hosts in that container network. Another name for this capability is multi-host networking. This page explains how to use the `engine-discovery` command to enable -multi-host container networks on your UCP installation. You'll do a complete +multi-host container networks on your UCP installation. The end result is a complete configuration on all nodes within your UCP deployment. ## About container networks and UCP @@ -95,7 +95,7 @@ Do this procedure on one node at a time: Do this procedure on one node at a time because if you restart all the controller daemons at the same time, you can increase the startup delay. This is -because `etcd` has to come up and establish quorum before the daemons can fully +because `etcd` needs to start and establish quorum before the daemons can fully recover. To enable the networking feature, do the following. @@ -206,7 +206,7 @@ the remaining nodes in the cluster. Once your UCP installation is up and running, you may need to add a new worker node or a new replica node. If you add a new worker node, you must run `engine-discovery` on the node after you `join` it to the cluster. If you need -to add a replica, you'll need: +to add a replica, you need: 1. Re-run network configuration process on the controller to add the replica.. 2. Run network configuration process on the new replica. @@ -270,7 +270,7 @@ A ping requires that inbound ICMP requests are allowed on the controller. $ sudo /usr/bin/docker daemon -D --cluster-advertise eth0:12376 --cluster-store etcd://CONTROLLER_PUBLIC_IP_OR_DOMAIN:12379 --cluster-store-opt kv.cacertfile=/var/lib/docker/discovery_certs/ca.pem --cluster-store-opt kv.certfile=/var/lib/docker/discovery_certs/cert.pem --cluster-store-opt kv.keyfile=/var/lib/docker/discovery_certs/key.pem ``` -Remember, you'll need to restart the daemon each time you change the start options. +Remember to restart the daemon each time you change the start options. ## Where to go next diff --git a/datacenter/ucp/1.1/configuration/use-externally-signed-certs.md b/datacenter/ucp/1.1/configuration/use-externally-signed-certs.md index ecffb576e82..2a51acfacb7 100644 --- a/datacenter/ucp/1.1/configuration/use-externally-signed-certs.md +++ b/datacenter/ucp/1.1/configuration/use-externally-signed-certs.md @@ -22,7 +22,7 @@ at any time. Since client certificate bundles are signed and verified with the UCP server certificates, if you replace the UCP server certificates, users have to -download new client certificate bundles to be able to run Docker commands on +download new client certificate bundles to run Docker commands on the cluster. ## Replace existing certificates @@ -41,7 +41,7 @@ To replace the server certificates used by UCP, for each controller node: $ docker cp ca.pem replace-certs:/data/ca.pem $ docker cp key.pem replace-certs:/data/key.pem - # Remove the container, since you won't need it any longer + # Remove the container, since you don't need it any longer $ docker rm replace-certs ``` @@ -56,6 +56,6 @@ To replace the server certificates used by UCP, for each controller node: 4. Let your users know. - After replacing the certificates your users won't be able to authenticate + After replacing the certificates your users can't authenticate with their old client certificate bundles. Ask your users to go to the UCP web UI and [get new client certificate bundles](../access-ucp/cli-based-access.md). \ No newline at end of file diff --git a/datacenter/ucp/1.1/high-availability/set-up-high-availability.md b/datacenter/ucp/1.1/high-availability/set-up-high-availability.md index a6f3ce5ddef..919756657bc 100644 --- a/datacenter/ucp/1.1/high-availability/set-up-high-availability.md +++ b/datacenter/ucp/1.1/high-availability/set-up-high-availability.md @@ -35,7 +35,7 @@ your cluster: When sizing your cluster, follow these rules of thumb: -* Don't create a cluster with just one replica. Your cluster won't tolerate any +* Don't create a cluster with just one replica. Your cluster can't tolerate any failures, and it's possible that you experience performance degradation. * When a replica fails, the number of failures tolerated by your cluster decreases. Don't leave that replica offline for long. diff --git a/datacenter/ucp/1.1/install-sandbox-2.md b/datacenter/ucp/1.1/install-sandbox-2.md index c4add6ff57d..e98d4504ea8 100644 --- a/datacenter/ucp/1.1/install-sandbox-2.md +++ b/datacenter/ucp/1.1/install-sandbox-2.md @@ -12,7 +12,7 @@ installation including both UCP and DTR using the instructions [here](install-sandbox.md). If you haven't done this, we can't promise that this tutorial workflow will work exactly the same. -In the second half of this tutorial, we'll walk you through a typical deployment +In the second half of this tutorial, we walk you through a typical deployment workflow using your sandbox installation of DDC as if it was a production instance installed on your organization's network. @@ -27,7 +27,7 @@ Over the course of this tutorial, we will: ## Step 1: Set --insecure registry or set up DTR trust and log in -First, we'll set up a security exception that allows a the Docker-machine hosts +First, we set up a security exception that allows a the Docker-machine hosts used in your UCP cluster to push images to and pull images from DTR even though the DTR instance has a self-signed certificate. For a production deployment, you would @@ -42,8 +42,8 @@ configuration steps for a production deployment. {:.warning} To allow the Docker Engine to connect to DTR despite it having a self-signed -certificate, we'll specify that there is one insecure registry that we'll allow -the Engine instance to connect to. We'll add this exception by editing the +certificate, we specify that there is one insecure registry that we allow +the Engine instance to connect to. We add this exception by editing the configuration file where docker-machine stores the host's configuration details. 1. Edit the file found at `~/.docker/machine/machines/node1/config.json` using @@ -80,7 +80,7 @@ registry. ## Step 2: Create an image repository in DTR -In this step, we'll create an image repository in DTR that you will be able to +In this step, we create an image repository in DTR that you will be able to push Docker images to. Remember a Docker image is a combination of code and filesystem used as a template to create a container. @@ -102,8 +102,8 @@ filesystem used as a template to create a container. 1. In your terminal, make sure `node1` is active using `docker-machine ls`. This is the node that you configured the security exception for, and if you - are connecting to a Docker Engine without this exception you won't be able - to push to your DTR instance. + are connecting to a Docker Engine without this exception you can't + push to your DTR instance. If necessary, use `docker-machine env` to make `node1` active. @@ -150,7 +150,7 @@ sandbox DTR instance. ## Step 4: Pull your image from DTR into UCP UCP does not automatically pull images from DTR. To make an image from DTR -appear in UCP, you'll use the UCP web UI to perform a `docker pull`. This `pull` +appear in UCP, you use the UCP web UI to perform a `docker pull`. This `pull` command pulls the image and makes it available on all nodes in the UCP cluster. 1. From the UCP dashboard, click **Images** in the left navigation. @@ -178,7 +178,7 @@ an Nginx web server or a database like Postgres. A UCP administrator initiates Engine actions using the UCP dashboard or the Docker Engine CLI. In this step, you deploy a container from the UCP dashboard. -The container runs an Nginx server, so you'll need to launch the `nginx` image +The container runs an Nginx server, so you need to launch the `nginx` image inside of it. 1. Log in to the UCP **Dashboard**. @@ -192,7 +192,7 @@ inside of it. 3. Click **+ Deploy Container**. - We'll deploy the simple `nginx` container you just pulled, using specific + Deploy the simple `nginx` container you just pulled, using specific values for each field. If you already know what you're doing, feel free to explore once you've completed this example. @@ -252,7 +252,7 @@ running. In this step, you open the running server. 3. Scroll down to the ports section. - You'll see an IP address with port `8080` for the server. + Look for an IP address with port `8080` for the server. 4. Copy the IP address to your browser and paste the information you copied. diff --git a/datacenter/ucp/1.1/install-sandbox.md b/datacenter/ucp/1.1/install-sandbox.md index ec12c6af021..b61c9342882 100644 --- a/datacenter/ucp/1.1/install-sandbox.md +++ b/datacenter/ucp/1.1/install-sandbox.md @@ -10,7 +10,7 @@ keywords: Docker Datacenter, orchestration, trial This page introduces Docker Datacenter (also known as DDC): a combination of Docker Universal Control Plane (UCP) and Docker Trusted Registry (DTR), and walks you through installing it on a local (non-production) host or sandbox. -Once you've installed, we'll also give you a guided tour so you can evaluate its +Once you've installed, we also give you a guided tour so you can evaluate its features. The instructions here are for a sandbox installation on Mac OS X or Windows @@ -26,14 +26,14 @@ configuration. ## Introduction: About this example -In this tutorial, we'll use Docker's provisioning tool - Docker Machine - to +In this tutorial, we use Docker's provisioning tool - Docker Machine - to create two virtual hosts. These two hosts are VirtualBox VMs running a small footprint Linux image called `boot2docker.iso`, with the open source version of Docker Engine installed. ![Docker Machine setup](images/explain.png) -A UCP installation consists of an UCP controller and one or more hosts. We'll +A UCP installation consists of an UCP controller and one or more hosts. We install UCP on one host, then join the second node to UCP as a swarm member. The two VMs create a simple swarm cluster with one controller, which by default secures the cluster via self-signed TLS certificates. @@ -41,9 +41,9 @@ secures the cluster via self-signed TLS certificates. ![Sandbox](images/sandbox.png) DDC's second component is DTR, which must be installed on a host that's a member -of the UCP swarm. So next, we'll then install DTR on that second node. +of the UCP swarm. So next, we install DTR on that second node. -Once you've installed UCP and DTR you'll [work through a tutorial](install-sandbox-2.md) to deploy a +Once you've installed UCP and DTR, use this [tutorial](install-sandbox-2.md) to deploy a container through UCP, and explore the user interface. **Quit Docker**). Otherwise, you will get an + of the application ( ![whale menu](/docker-for-mac/images/whale-x.png){: .inline} --> **Quit Docker**). Otherwise, you get an "application in use" error when you try to copy the new app from the `.dmg` to `/Applications`. @@ -287,24 +287,22 @@ know before you install](install.md#what-to-know-before-you-install). * Run the uninstall commands from the menu. -

-* If `docker` commands aren't working properly or as expected: +* If `docker` commands aren't working properly or as expected, you may need to + unset some environment variables, to make sure you are not using the legacy + Docker Machine environment in your shell or command window. Unset the + `DOCKER_HOST` environment variable and related variables. - * Make sure you are not using the legacy Docker Machine environment in your shell or command window. You do not need `DOCKER_HOST` set, so unset it as it -may be pointing at another Docker (e.g. VirtualBox). If you use bash, `unset -${!DOCKER_*}` will unset existing `DOCKER` environment variables you have set. + * If you use bash, use the following command: `unset ${!DOCKER_*}` * For other shells, unset each environment variable individually as described in [Setting up to run Docker for Mac](docker-toolbox.md#setting-up-to-run-docker-for-mac) in [Docker for Mac vs. Docker Toolbox](docker-toolbox.md). -

-* Note that network connections will fail if the macOS Firewall is set to +* Network connections fail if the macOS Firewall is set to "Block all incoming connections". You can enable the firewall, but `bootpd` must be allowed incoming connections so that the VM can get an IP address. -

-* For the `hello-world-nginx` example, Docker for Mac must be running in order to get to the webserver on `http://localhost/`. Make sure that the Docker whale +* For the `hello-world-nginx` example, Docker for Mac must be running to get to the webserver on `http://localhost/`. Make sure that the Docker whale is showing in the menu bar, and that you run the Docker commands in a shell that is connected to the Docker for Mac Engine (not Engine from Toolbox). Otherwise, you might start the webserver container but get a "web page not available" error @@ -324,21 +322,20 @@ your docker app. ## Known issues -* IPv6 is not yet supported on Docker for Mac. If you are using IPv6, and haven't upgraded to Beta 24 or v1.12.1 stable or newer, you will see a network +* IPv6 is not yet supported on Docker for Mac. If you are using IPv6, and haven't upgraded to Beta 24 or v1.12.1 stable or newer, you see a network timeout when you run `docker` commands that need access to external network servers. The aforementioned releases include a workaround for this because Docker for Mac does not yet support IPv6. See "IPv6 workaround to auto-filter DNS addresses" in [Workarounds for common problems](troubleshoot.md#workarounds-for-common-problems). -

* You might encounter errors when using `docker-compose up` with Docker for Mac (`ValueError: Extra Data`). We've identified this is likely related to data and/or events being passed all at once rather than one by one, so sometimes the data comes back as 2+ objects concatenated and causes an error. -

-* Force-ejecting the `.dmg` after running `Docker.app` from it results in an unresponsive whale in the menu bar, Docker tasks "not responding" in activity monitor, helper processes running, and supporting technologies consuming large percentages of CPU. Please reboot, and then re-start Docker for Mac. If needed,`force quit` any Docker related applications as part of the reboot. - -

+* Force-ejecting the `.dmg` after running `Docker.app` from it can cause the + whale icon to become unresponsive, Docker tasks to show as not responding in + the Activity Monitor, and for some processes to consume a large amount of CPU + resources. Reboot and restart Docker to resolve these issues. * Docker does not auto-start on login even when it is enabled in ![whale menu](/docker-for-mac/images/whale-x.png){: .inline} --> **Preferences**. This is related to a set of issues with Docker @@ -352,23 +349,21 @@ Hardware Accelerated Execution Manager (HAXM)](https://software.intel.com/en-us/android/articles/intel-hardware-accelerated-execution-manager/), the current workaround is not to run them at the same time. You can pause `HyperKit` by quitting Docker for Mac temporarily while you work with HAXM. This -will allow you to continue work with the other tools and prevent `HyperKit` from +allows you to continue work with the other tools and prevent `HyperKit` from interfering. -

* If you are working with applications like [Apache Maven](https://maven.apache.org/) that expect settings for `DOCKER_HOST` and `DOCKER_CERT_PATH` environment variables, specify these to connect to Docker instances through Unix sockets. For example: export DOCKER_HOST=unix:///var/run/docker.sock -* `docker-compose` 1.7.1 performs DNS unnecessary lookups for `localunixsocket.local` which can take 5s to timeout on some networks. If `docker-compose` commands seem very slow but seem to speed up when the network is disabled (e.g. when disconnected from wifi), try appending `127.0.0.1 localunixsocket.local` to the file `/etc/hosts`. +* `docker-compose` 1.7.1 performs DNS unnecessary lookups for `localunixsocket.local` which can take 5s to timeout on some networks. If `docker-compose` commands seem very slow but seem to speed up when the network is disabled, try appending `127.0.0.1 localunixsocket.local` to the file `/etc/hosts`. Alternatively you could create a plain-text TCP proxy on localhost:1234 using: docker run -d -v /var/run/docker.sock:/var/run/docker.sock -p 127.0.0.1:1234:1234 bobrik/socat TCP-LISTEN:1234,fork UNIX-CONNECT:/var/run/docker.sock and then `export DOCKER_HOST=tcp://localhost:1234`. -

@@ -391,11 +386,9 @@ directories in Docker volumes, perform temporary file system operations outside of `osxfs` mounts, and use third-party tools like Unison or `rsync` to synchronize between container directories and bind-mounted directories. We are actively working on `osxfs` performance using a number of different techniques. -To learn more, please see the topic on [Performance issues, solutions, and +To learn more, see the topic on [Performance issues, solutions, and roadmap](osxfs.md#performance-issues-solutions-and-roadmap). -

- * If your system does not have access to an NTP server, then after a hibernate the time seen by Docker for Mac may be considerably out of sync with the host. Furthermore, the time may slowly drift out of sync during use. To manually reset the time after hibernation, run: docker run --rm --privileged alpine hwclock -s diff --git a/docker-for-windows/faqs.md b/docker-for-windows/faqs.md index 3d5b7e31c43..b1392a56297 100644 --- a/docker-for-windows/faqs.md +++ b/docker-for-windows/faqs.md @@ -106,7 +106,7 @@ information, see [Stable and Edge channels](#questions-about-stable-and-edge-cha Yes! You can use Docker for Windows to test single-node features of [swarm mode](/engine/swarm/index.md) introduced with Docker Engine 1.12, including initializing a swarm with a single node, creating services, and scaling -services. Docker “Moby” on Hyper-V will serve as the single swarm node. You can +services. Docker “Moby” on Hyper-V serves as the single swarm node. You can also use Docker Machine, which comes with Docker for Windows, to create and experiment with a multi-node swarm. Check out the tutorial at [Get started with swarm mode](/engine/swarm/swarm-tutorial/index.md). @@ -133,7 +133,7 @@ but rather sets permissions to a default value of (`read`, `write`, `execute` permissions for `user`, `read` and `execute` for `group`) which is not configurable. -For workarounds and to learn more, please see [Permissions errors on data +For workarounds and to learn more, see [Permissions errors on data directories for shared volumes](troubleshoot.md#permissions-errors-on-data-directories-for-shared-volumes). @@ -147,8 +147,8 @@ does not work](troubleshoot.md#inotify-on-shared-drives-does-not-work) in ### Are symlinks supported? Docker for Windows supports symbolic links (symlinks) created within containers. -Symlinks will resolve within and across containers. -Symlinks created elsewhere (e.g., on the host) will not work. +Symlinks resolve within and across containers. +Symlinks created outside of Docker do not work. To learn more about the reasons for this limitation, see the following discussions: @@ -167,7 +167,7 @@ Certification Authorities or Intermediate Certification Authorities. Docker for Windows creates a certificate bundle of all user-trusted CAs based on the Windows certificate store, and appends it to Moby trusted certificates. So -if an enterprise SSL certificate is trusted by the user on the host, it will be +if an enterprise SSL certificate is trusted by the user on the host, it is trusted by Docker for Windows. To learn more about how to install a CA root certificate for the registry, see @@ -176,7 +176,7 @@ in the Docker Engine topics. ### How do I add client certificates? -Starting with Docker for Windows 17.06.0-ce, you do not have to push your +Starting with Docker for Windows 17.06.0-ce, you do not need to push your certificates with `git` commands anymore. You can put your client certificates in `~/.docker/certs.d/:/client.cert` and `~/.docker/certs.d/:/client.key`. @@ -190,17 +190,17 @@ directory on Moby (the Docker for Windows virtual machine running on Hyper-V). the changes to take effect. > > * The registry cannot be listed as an _insecure registry_ (see [Docker -Daemon](/docker-for-windows/index.md#docker-daemon)). Docker for Windows will -ignore certificates listed under insecure registries, and will not send client +Daemon](/docker-for-windows/index.md#docker-daemon)). Docker for Windows +ignores certificates listed under insecure registries, and does not send client certificates. Commands like `docker run` that attempt to pull from -the registry will produce error messages on the command line, as well as on the +the registry produce error messages on the command line, as well as on the registry. To learn more about how to set the client TLS certificate for verification, see [Verify repository client with certificates](/engine/security/certificates.md) in the Docker Engine topics. -### Why does Docker for Windows sometimes lose network connectivity (e.g., `push`/`pull` doesn't work)? +### Why does Docker for Windows sometimes lose network connectivity, causing `push` or `pull` commands to fail? Networking is not yet fully Stable across network changes and system sleep cycles. Exit and start Docker to restore connectivity. diff --git a/docker-for-windows/index.md b/docker-for-windows/index.md index 942f3d95c8c..91a8212a3d4 100644 --- a/docker-for-windows/index.md +++ b/docker-for-windows/index.md @@ -20,7 +20,7 @@ Docker is a full development platform for creating containerized apps, and Docker for Windows is the best way to get started with Docker on Windows systems. -> **Got Docker for Windows?** If you have not yet installed Docker for Windows, please see [Install Docker for Windows](install.md) for an explanation of stable +> **Got Docker for Windows?** If you have not yet installed Docker for Windows, see [Install Docker for Windows](install.md) for an explanation of stable and edge channels, system requirements, and download/install information. >**Looking for system requirements?** Check out @@ -140,8 +140,8 @@ and make sure `docker` commands are working properly. ``` >**Note**: The outputs above are examples. Your output for commands like - > `docker version` and `docker info` will vary depending on your product - > versions (e.g., as you install newer versions). + > `docker version` and `docker info` varies depending on your product + > versions. 3. Run `docker run hello-world` to test pulling an image from Docker Hub and starting a container. @@ -164,7 +164,7 @@ and make sure `docker` commands are working properly. PS C:\Users\jdoe> docker run -it ubuntu bash ``` - This will download the `ubuntu` container image and start it. Here is the output of running this command in a powershell. + This downloads the `ubuntu` container image and start it. Here is the output of running this command in a powershell. ```none PS C:\Users\jdoe> docker run -it ubuntu bash @@ -187,7 +187,7 @@ and make sure `docker` commands are working properly. PS C:\Users\jdoe> docker run -d -p 80:80 --name webserver nginx ``` - This will download the `nginx` container image and start it. Here is the + This downloads the `nginx` container image and start it. Here is the output of running this command in a powershell. ```none @@ -226,15 +226,15 @@ and make sure `docker` commands are working properly. 8. Stop or remove containers and images. - The `nginx` webserver will continue to run in the container on that port + The `nginx` webserver continues to run in the container on that port until you stop and/or remove the container. If you want to stop the webserver, type: `docker stop webserver` and start it again with `docker start webserver`. To stop and remove the running container with a single command, type: - `docker rm -f webserver`. This will remove the container, but not the + `docker rm -f webserver`. This removes the container, but not the `nginx` image. You can list local images with `docker images`. You might - want to keep some images around so that you don't have to pull them again + want to keep some images around so that you don't need to pull them again from Docker Hub. To remove an image you no longer need, use `docker rmi` followed by an image ID or image name. For example, `docker rmi nginx`. **Want more example applications?** [Get Started](/get-started/) and [Samples](/samples) are great places to start. @@ -251,7 +251,7 @@ PowerShell Module as follows. > > * Make sure you have administrator permissions to run an elevated PowerShell. -1. Start an "elevated" PowerShell (i.e., run it as administrator). +1. Start an "elevated" PowerShell, running as an administrator. To do this, search for PowerShell, right-click, and choose **Run as administrator**. @@ -378,23 +378,22 @@ available to your containers. ![Shared Drives](images/settings-shared-drives.png) -You will be asked to provide your Windows system username and password (domain +You are prompted for your Windows system username and password (domain user) to apply shared drives. You can select an option to have Docker store the -credentials so that you don't have to re-enter them every time. +credentials so that you don't need to re-enter them every time. Permissions to access shared drives are tied to the credentials you provide here. If you run `docker` commands and tasks under a different username than the -one used here to set up sharing, your containers will not have permissions to +one used here to set up sharing, your containers do not have permissions to access the mounted volumes. > Tips on shared drives, permissions, and volume mounts > - * Shared drives are only required for volume mounting + * Shared drives are only required for mounting volumes in [Linux containers](#switch-between-windows-and-linux-containers), not for - Windows containers. For Linux containers, you need to share the drive where - your project is located (i.e., where the Dockerfile and volume are located). - Runtime errors such as file not found or cannot start service may indicate - shared drives are needed. (See also + Windows containers. For Linux containers, you need to share the drive where the + Dockerfile and volume are located. If you get errors such as `file not found` + or `cannot start service` you may need to enable shared drives. See [Volume mounting requires shared drives for Linux containers](troubleshoot.md#volume-mounting-requires-shared-drives-for-linux-containers).) > * If possible, avoid volume mounts from the Windows host, and instead mount on @@ -403,7 +402,7 @@ volume](https://docs.docker.com/engine/tutorials/dockervolumes.md#data-volumes) (named volume) or [data container](/engine/tutorials/dockervolumes.md#creating-and-mounting-a-data-volume-container). There are a number of issues with using host-mounted volumes and network paths -for database files. Please see the troubleshooting topic on [Volume mounts from +for database files. see the troubleshooting topic on [Volume mounts from host paths use a nobrl option to override database locking](/docker-for-windows/troubleshoot.md#volume-mounts-from-host-paths-use-a-nobrl-option-to-override-database-locking). > @@ -464,7 +463,7 @@ Cancel. * **Memory** - Change the amount of memory the Docker for Windows Linux VM uses. -Please note, updating these settings requires a reconfiguration and reboot of the Linux VM. This will take a few seconds. +After you change these settings, the Linux VM restarts. This takes a few seconds. ### Network @@ -484,16 +483,16 @@ You can configure Docker for Windows networking to work on a virtual private net > `8.8.8.8`. For more information, see > [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting. -Note that updating these settings requires a reconfiguration and reboot of the Linux VM. +Updating these settings requires a reconfiguration and reboot of the Linux VM. ### Proxies Docker for Windows lets you configure HTTP/HTTPS Proxy Settings and automatically propagate these to Docker and to your containers. -For example, if you set your proxy settings to `http://proxy.example.com`, Docker will use this proxy when pulling containers. +For example, if you set your proxy settings to `http://proxy.example.com`, Docker uses this proxy when pulling containers. ![Proxies](/docker-for-windows/images/proxies.png) -When you start a container, you will see that your proxy settings propagate into the containers. For example: +When you start a container, your proxy settings propagate into the containers. For example: ```ps PS C:\Users\jdoe> docker run alpine env @@ -514,7 +513,7 @@ If you have containers that you wish to keep running across restarts, you should ### Docker daemon You can configure options on the Docker daemon that determine how your -containers will run. You can configure some **Basic** options on the daemon with interactive settings, or switch to **Advanced** to edit the JSON directly. +containers run. You can configure some **Basic** options on the daemon with interactive settings, or switch to **Advanced** to edit the JSON directly. The settings offered on **Basic** dialog can be configured directly in the JSON as well. This version just surfaces @@ -600,7 +599,7 @@ In that topic, see also: * [Windows configuration file](/engine/reference/commandline/dockerd.md#windows-configuration-file) -Note that updating these settings requires a reconfiguration and reboot of the +Updating these settings requires a reconfiguration and reboot of the Linux VM. ### Switch between Windows and Linux containers @@ -641,9 +640,8 @@ If you are interested in working with Windows containers, here are some guides t [forked here to use containers](https://github.com/friism/MusicStore), is a good example of a multi-container application. - > **Disclaimer:** This lab is still in work, and is based off of the blog, but - > you can test and leverage the example walkthroughs now, if you want to start - > experimenting. Please check back as the lab evolves. + > **Disclaimer:** This lab is still under development, and is adapted from a + > blog post. Check back as the lab evolves. * This troubleshooting issue is useful for understanding how to connect to Windows containers from the local host: [Limitations of Windows containers for `localhost` and published ports](troubleshoot.md#limitations-of-windows-containers-for-localhost-and-published-ports) @@ -755,7 +753,7 @@ behavior, and steps to reproduce the issue. * **Reset to Toolbox default machine content** - Imports containers and images from the existing Docker Toolbox machine named `default`. (This option is - enabled only if you have Toolbox installed.) The VirtualBox VM will not be + enabled only if you have Toolbox installed.) The VirtualBox VM is not removed. * **Reset to factory defaults** - Resets Docker to factory defaults. This is diff --git a/docker-for-windows/install.md b/docker-for-windows/install.md index 45e4716631f..329ec6ce74d 100644 --- a/docker-for-windows/install.md +++ b/docker-for-windows/install.md @@ -21,7 +21,7 @@ versions here](release-notes.md). ## Download Docker for Windows -If you have not already done so, please install Docker for Windows. You can +If you have not already done so, install Docker for Windows. You can download installers from the **Stable** or **Edge** channel. Both Stable and Edge installers come with -* Virtualization must be enabled. Typically, virtualization is enabled by default. (Note that this is different from having Hyper-V enabled.) For more +* Virtualization must be enabled. Typically, virtualization is enabled by default. This is different from having Hyper-V enabled. For more detail see [Virtualization must be enabled](troubleshoot.md#virtualization-must-be-enabled) in Troubleshooting.

-* The current version of Docker for Windows runs on 64bit Windows 10 Pro, Enterprise and Education (1607 Anniversary Update, Build 14393 or later). In the future we will support more versions of Windows 10. +* The current version of Docker for Windows runs on 64bit Windows 10 Pro, Enterprise and Education (1607 Anniversary Update, Build 14393 or later).

* Containers and images created with Docker for Windows are shared between all user accounts on machines where it is installed. This is because all -Windows accounts will use the same VM to build and run containers. In the -future, Docker for Windows will better isolate user content. +Windows accounts use the same VM to build and run containers.

* Nested virtualization scenarios, such as running Docker for Windows on a VMWare or Parallels instance, might work, but come with no -guarantees (i.e., not officially supported). For more information, see +guarantees. For more information, see [Running Docker for Windows in nested virtualization scenarios](troubleshoot.md#running-docker-for-windows-in-nested-virtualization-scenarios)

* **What the Docker for Windows install includes**: The installation provides [Docker Engine](/engine/userguide/), Docker CLI client, [Docker Compose](/compose/overview.md), [Docker Machine](/machine/overview.md), and [Kitematic](/kitematic/userguide.md). @@ -131,7 +130,7 @@ Windows containers. 2. Follow the install wizard to accept the license, authorize the installer, and proceed with the install. - You will be asked to authorize `Docker.app` with your system password during the install process. + You are asked to authorize `Docker.app` with your system password during the install process. Privileged access is needed to install networking components, links to the Docker apps, and manage the Hyper-V VMs. @@ -141,7 +140,7 @@ Windows containers. ## Start Docker for Windows -Docker will not start automatically. To start it, search for Docker, select the +Docker does not start automatically. To start it, search for Docker, select the app in the search results, and click it (or hit Return). ![search for Docker app](/docker-for-windows/images/docker-app-search.png) diff --git a/docker-for-windows/release-notes.md b/docker-for-windows/release-notes.md index 0d7ac08c90c..e90d0a8d61b 100644 --- a/docker-for-windows/release-notes.md +++ b/docker-for-windows/release-notes.md @@ -9,7 +9,7 @@ title: Docker for Windows Release notes Here are the main improvements and issues per release, starting with the current release. The documentation is always updated for each release. -For system requirements, please see +For system requirements, see [What to know before you install](install.md#what-to-know-before-you-install). Release notes for _stable_ and _edge_ releases are listed below. (Starting with @@ -59,8 +59,8 @@ about both kinds of releases, and download stable and edge product installers at - Fix uninstaller issue (in some specific cases dockerd process was not killed properly) - Fix Net Promoter Score Gui bug. Fixes [for-win/#1277](https://github.com/docker/for-win/issues/1277) - Fix `docker.for.win.localhost` not working in proxy settings. Fixes [for-win/#1130](https://github.com/docker/for-win/issues/1130) - - Increased timeout for VM boot startup to 2 minutes. - + - Increased timeout for VM boot startup to 2 minutes. + ### Docker Community Edition 17.09.0-ce-win33 2017-10-06 (Stable) @@ -146,7 +146,7 @@ about both kinds of releases, and download stable and edge product installers at **New** - Windows Server 2016 support -- Windows 10586 is marked as deprecated; it will not be supported going forward in stable releases +- Windows 10586 is marked as deprecated; it is not supported going forward in stable releases - Integration with Docker Cloud, with the ability to control remote Swarms from the local command line interface (CLI) and view your repositories - Unified login between the Docker CLI and Docker Hub, Docker Cloud. - Sharing a drive can be done on demand, the first time a mount is requested @@ -374,7 +374,7 @@ We did not distribute a 1.12.4 stable release >**Important Note**: > -> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: +> The auto-update function in Beta 21 cannot install this update. To install the latest beta manually if you are still on Beta 21, download the installer here: > [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) @@ -384,7 +384,7 @@ We did not distribute a 1.12.4 stable release * To support trusted registry transparently, all trusted CAs (root or intermediate) on the Windows host are automatically copied to Moby -* `Reset Credentials` will also unshare the shared drives +* `Reset Credentials` also unshares the shared drives * Logs are now rotated every day @@ -409,7 +409,7 @@ We did not distribute a 1.12.4 stable release * Uploading a diagnostic now shows a proper status message in the Settings -* Docker will stop asking to import from Toolbox after an upgrade +* Docker stops asking to import from Toolbox after an upgrade * Docker can now import from Toolbox just after HyperV is activated @@ -439,7 +439,7 @@ We did not distribute a 1.12.4 stable release * VnpKit: reduce the number of sockets used by UDP NAT, reduce the probability -* `slirp`: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules will time out earlier than expected +* `slirp`: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules time out earlier than expected * Fixed password handling for host file system sharing @@ -461,7 +461,7 @@ We did not distribute a 1.12.4 stable release **Known issues** -* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Remove stale network adapters](troubleshoot.md#4-remove-stale-network-adapters) under [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting. +* Docker automatically disables lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Remove stale network adapters](troubleshoot.md#4-remove-stale-network-adapters) under [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting. ### Docker for Windows 1.12.0, 2016-07-28 (stable) @@ -482,7 +482,7 @@ We did not distribute a 1.12.4 stable release * Upgrades - [Docker 18.01.0-ce](https://github.com/docker/docker-ce/releases/tag/v18.01.0-ce) - Linux Kernel 4.9.75 - + * Bug fixes and minor changes - Fix linuxKit port-forwarder sometimes not being able to start. Fixes [docker/for-win#1506](https://github.com/docker/for-win/issues/1506) - Fix certificate management when connecting to a private registry. Fixes [docker/for-win#1512](https://github.com/docker/for-win/issues/1512) @@ -536,7 +536,7 @@ We did not distribute a 1.12.4 stable release - [Docker 17.11.0-ce-rc4](https://github.com/docker/docker-ce/releases/tag/v17.11.0-ce-rc4) - [Docker compose 1.17.1](https://github.com/docker/compose/releases/tag/1.17.1) - Linux Kernel 4.9.60 - + * Bug fixes and minor changes - Increased timeout for VM boot startup to 2 minutes. @@ -578,7 +578,7 @@ We did not distribute a 1.12.4 stable release * New - VM entirely built with Linuxkit - Experimental support for Microsoft Linux Containers On Windows, on Windows 10 RS3. - + ### Docker Community Edition 17.09.0-ce-win34 2017-10-06 (Edge) @@ -876,7 +876,7 @@ registry access (fixes [docker/for-win#569](https://github.com/docker/for-win/is **New** - Introduce Docker Community Edition -- Integration with Docker Cloud: control remote Swarms from the local CLI and view your repositories. This feature will be rolled out to all users progressively. +- Integration with Docker Cloud: control remote Swarms from the local CLI and view your repositories. This feature is being rolled out to all users progressively. **Upgrades** @@ -1182,7 +1182,7 @@ dialogs](/docker-for-windows/index.md#about-the-docker-windows-containers-specif >**Important Note**: > -> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: +> The auto-update function in Beta 21 cannot install this update. To install the latest beta manually if you are still on Beta 21, download the installer here: > [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) @@ -1196,7 +1196,7 @@ work. Some insider builds may not work. **New** - Restore the VM's configuration when it was changed by the user -- Overlay2 is now the default storage driver. After a factory reset overlay2 will automatically be used +- Overlay2 is now the default storage driver. After a factory reset overlay2 is automatically used - Detect firewall configuration that might block the file sharing - Send more GUI usage statistics to help us improve the product @@ -1218,7 +1218,7 @@ work. Some insider builds may not work. >**Important Note**: > -> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: +> The auto-update function in Beta 21 cannot install this update. To install the latest beta manually if you are still on Beta 21, download the installer here: > [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) @@ -1257,7 +1257,7 @@ work. Some insider builds may not work. >**Important Note**: > -> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: +> The auto-update function in Beta 21 cannot install this update. To install the latest beta manually if you are still on Beta 21, download the installer here: > [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) @@ -1295,7 +1295,7 @@ work. Some insider builds may not work. >**Important Note**: > -> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: +> The auto-update function in Beta 21 cannot install this update. To install the latest beta manually if you are still on Beta 21, download the installer here: > [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) @@ -1303,11 +1303,11 @@ work. Some insider builds may not work. **New** -* Basic support for Windows containers. On Windows 10 build >= 14372, a switch in the `systray` icon will change which daemon (Linux or Windows) the Docker CLI talks to +* Basic support for Windows containers. On Windows 10 build >= 14372, a switch in the `systray` icon changes which daemon (Linux or Windows) the Docker CLI talks to * To support trusted registry transparently, all trusted CAs (root or intermediate) on the Windows host are automatically copied to Moby -* `Reset Credentials` will also unshare the shared drives +* `Reset Credentials` also unshares the shared drives * Logs are now rotated every day @@ -1328,7 +1328,7 @@ work. Some insider builds may not work. >**Important Note**: > -> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: +> The auto-update function in Beta 21 cannot install this update. To install the latest beta manually if you are still on Beta 21, download the installer here: > [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) @@ -1348,13 +1348,13 @@ work. Some insider builds may not work. * Only UTF-8 passwords are supported for host filesystem sharing -* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Remove stale network adapters](troubleshoot.md#4-remove-stale-network-adapters) under [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting. +* Docker automatically disables lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Remove stale network adapters](troubleshoot.md#4-remove-stale-network-adapters) under [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting. ### Beta 24 Release (2016-08-23 1.12.1-beta24) >**Important Note**: > -> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: +> The auto-update function in Beta 21 cannot install this update. To install the latest beta manually if you are still on Beta 21, download the installer here: > [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) @@ -1369,19 +1369,19 @@ work. Some insider builds may not work. **Bug fixes and minor changes** -* `slirp`: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules will time out earlier than expected +* `slirp`: reduce the number of sockets used by UDP NAT, reduce the probability that NAT rules time out earlier than expected **Known issues** * Only UTF-8 passwords are supported for host filesystem sharing. -* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Remove stale network adapters](troubleshoot.md#4-remove-stale-network-adapters) under [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting. +* Docker automatically disables lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Remove stale network adapters](troubleshoot.md#4-remove-stale-network-adapters) under [Networking issues](troubleshoot.md#networking-issues) in Troubleshooting. ### Beta 23 Release (2016-08-16 1.12.1-rc1-beta23) >**Important Note**: > -> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: +> The auto-update function in Beta 21 cannot install this update. To install the latest beta manually if you are still on Beta 21, download the installer here: > [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) @@ -1404,7 +1404,7 @@ work. Some insider builds may not work. * Fixed password handling for host file system sharing * Automatically disable lingering net adapters that prevent Docker from starting or using the network * Automatically delete duplicated MobyLinuxVMs on a `reset to factory defaults` -* Docker will stop asking to import from toolbox after an upgrade +* Docker stops asking to import from toolbox after an upgrade * Docker can now import from toolbox just after hyperV is activated * Fixed Moby Diagnostics and Update Kernel * Added more debug information to the diagnostics @@ -1422,7 +1422,7 @@ work. Some insider builds may not work. **Known issues** * Only UTF-8 passwords are supported for host filesystem sharing -* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Troubleshooting](troubleshoot.md#networking-issues). +* Docker automatically disables lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Troubleshooting](troubleshoot.md#networking-issues). ### Beta 22 Release (2016-08-11 1.12.0-beta22) @@ -1430,13 +1430,13 @@ Unreleased. See Beta 23 for changes. **Known issues** -* Docker will automatically disable lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Troubleshooting](troubleshoot.md#networking-issues). +* Docker automatically disables lingering net adapters. The only way to remove them is manually using `devmgmt.msc` as documented in [Troubleshooting](troubleshoot.md#networking-issues). ### Beta 21 Release (2016-07-28 1.12.0-beta21) **New** -* Docker for Windows is now available from 2 channels: **stable** and **beta**. New features and bug fixes will go out first in auto-updates to users in the beta channel. Updates to the stable channel are much less frequent and happen in sync with major and minor releases of the Docker engine. Only features that are well-tested and ready for production are added to the stable channel releases. For downloads of both and more information, see the [Getting Started](/docker-for-windows/index.md#download-docker-for-windows). +* Docker for Windows is now available from 2 channels: **stable** and **beta**. New features and bug fixes go out first in auto-updates to users in the beta channel. Updates to the stable channel are much less frequent and happen in sync with major and minor releases of the Docker engine. Only features that are well-tested and ready for production are added to the stable channel releases. For downloads of both and more information, see the [Getting Started](/docker-for-windows/index.md#download-docker-for-windows). * Removed the docker host name. Containers with exported ports are reachable via localhost. @@ -1482,7 +1482,7 @@ Unreleased. See Beta 23 for changes. **New** -* Added an option to opt-out from sending usage statistics (will be available on the future stable channel) +* Added an option to opt-out from sending usage statistics (only on the Edge channel for now) * New error dialog box to upload crash reports **Upgrades** @@ -1545,7 +1545,7 @@ Unreleased. See Beta 23 for changes. **Bug fixes and minor changes** -* Interrupting a `docker build` with Ctrl-C will actually stop the build +* Interrupting a `docker build` with Ctrl-C actually stops the build * The docker API proxy was failing to deal with 1.12 features (health check for, for example) * When killing the VM process, ignore when the process is already stopped * When stopping the VM, always stop the docker proxy @@ -1731,7 +1731,7 @@ This Beta release includes some significant changes: * Due to limitation in the Windows NAT implementation, co-existence with other NAT prefixes needs to be carefully managed. See [Troubleshooting](troubleshoot.md) for more details. -* Logs for the windows service are not aggregated with logs from the GUI. This will be fixed in future versions. +* Logs for the windows service are not aggregated with logs from the GUI. This is expected to be fixed in future versions. ## Beta 10 Release (2016-05-03 1.11.0-beta10) @@ -1793,7 +1793,7 @@ are working on a solution. **Bug fixes and minor changes** * Better UI in the ShareDrive window -* The firewall alert dialog will not come up as often as it was +* The firewall alert dialog does not come up as often as previously * Configured MobyLinux VM with a fixed memory of 2GB * User password is no longer stored on the host-side KVP * Uninstall shortcut is available in registry @@ -1818,10 +1818,10 @@ are working on a solution. * Allow DNS/DHCP processes to restart on bind error * Less destructive migration from Docker Toolbox * Improved documentation -* Better error handling: Moby will restart itself if start takes too long. +* Better error handling: Moby restarts itself if start takes too long. * Kill proxy and exit docker before a new version is installed * The application cannot start twice now -* The proxy will stop automatically when the GUI is not running +* The proxy stops automatically when the GUI is not running * Removed existing proxy firewall rules before starting Moby * The application now collects more and better information on crashes and other issues * Improved all dialogs and windows @@ -1836,11 +1836,11 @@ are working on a solution. **Known issues** -- Settings are now serialized in JSON. This install will lose the current settings. +- Settings are now serialized in JSON. This install loses the current settings. -- Docker needs to open ports on the firewall. Sometimes, the user will see a firewall alert dialog. The user should allow the ports to be opened. +- Docker needs to open ports on the firewall. Sometimes, the user sees a firewall alert dialog. The user should allow the ports to be opened. -- The application was upgraded to 64 bits. The installation path changed to `C:\Program Files\Docker\Docker`. Users might have to close any Powershell/Cmd windows that were already open before the update to get the new `PATH`. In some cases, users may need to log off and on again. +- The application was upgraded to 64 bits. The installation path changed to `C:\Program Files\Docker\Docker`. Users might need to close any Powershell/Cmd windows that were already open before the update to get the new `PATH`. In some cases, users may need to log off and on again. **Bug Fixes** @@ -1882,7 +1882,7 @@ are working on a solution. - Docker needs to open ports on the firewall, which can activate a firewall alert dialog. Users should allow the ports to be opened. -- The application was upgraded to 64 bits. The installation path changed to `C:\Program Files\Docker\Docker`. If users have Powershell/Cmd windows already open before the update, they might have to close them to catch the new PATH. In some cases, users will need to log off and on again. +- The application was upgraded to 64 bits. The installation path changed to `C:\Program Files\Docker\Docker`. If users have Powershell/Cmd windows already open before the update, they might need to close them to catch the new PATH. In some cases, users need to log off and on again. **Bug Fixes** @@ -1931,7 +1931,7 @@ are working on a solution. - Display the third party licenses - Display the license agreement -- The application will refuse to start if Hyper-v is not enabled +- The application refuses to start if Hyper-v is not enabled - Rename `console` to `debug console` - Remove `machine` from notification - Open the feedback forum diff --git a/docker-for-windows/troubleshoot.md b/docker-for-windows/troubleshoot.md index c80a9c559b5..9e57041da0a 100644 --- a/docker-for-windows/troubleshoot.md +++ b/docker-for-windows/troubleshoot.md @@ -41,16 +41,16 @@ history of logs in your `AppData\Local` folder. If you encounter an issue and the suggested troubleshoot procedures outlined below don't fix it you can generate a diagnostics report. Click on the `Diagnose & Feedback` menu entry in the systray and then on the `Upload diagnostic...` link. -This will upload diagnostics to our server and provide you with a unique ID you +This uploads diagnostics to our server and provide you with a unique ID you can use in email or the forum to reference the upload. ## Troubleshooting ### Make sure certificates are set up correctly -Docker for Windows will ignore certificates listed under insecure registries, -and will not send client certificates to them. Commands like `docker run` that -attempt to pull from the registry will produce error messages on the command +Docker for Windows ignores certificates listed under insecure registries, +and does not send client certificates to them. Commands like `docker run` that +attempt to pull from the registry produces error messages on the command line, like this: ``` @@ -77,7 +77,7 @@ volumes](/docker-for-windows/index.md#shared-drives) to a default value of [0755](http://permissions-calculator.org/decode/0755/) (`read`, `write`, `execute` permissions for `user`, `read` and `execute` for `group`). If you are working with applications that require permissions different than this default, -you will likely get errors similar to the following. +you may get errors similar to the following. ```none Data directory (/var/www/html/data) is readable by other users. Please change the permissions to 0755 so that the directory cannot be listed by other users. @@ -103,7 +103,7 @@ drives](https://github.com/docker/docker.github.io/issues/3298). ### inotify on shared drives does not work -Currently, `inotify` does not work on Docker for Windows. This will become +Currently, `inotify` does not work on Docker for Windows. This becomes evident, for example, when an application needs to read/write to a container across a mounted drive. Instead of relying on filesystem inotify, we recommend using polling features for your framework or programming language. @@ -116,9 +116,9 @@ applications](https://github.com/remy/nodemon#application-isnt-restarting) ### Volume mounting requires shared drives for Linux containers If you are using mounted volumes and get runtime errors indicating an -application file is not found, a volume mount is denied, or a service cannot -start (e.g., with [Docker Compose](/compose/gettingstarted.md)), you might need -to enable [shared drives](/docker-for-windows/index.md#shared-drives). +application file is not found, access is denied to a volume mount, or a service +cannot start, such as when using [Docker Compose](/compose/gettingstarted.md), +you might need to enable [shared drives](/docker-for-windows/index.md#shared-drives). Volume mounting requires shared drives for Linux containers (not for Windows containers). Go to @@ -135,8 +135,8 @@ not Windows containers. Permissions to access shared drives are tied to the username and password you use to set up [shared drives](/docker-for-windows/index.md#shared-drives). If you run `docker` commands and tasks under a different username than the one used to set up shared -drives, your containers will not have permissions to access the mounted volumes. -The volumes will show as empty. +drives, your containers don't have permissions to access the mounted volumes. +The volumes show as empty. The solution to this is to switch to the domain user account and reset credentials on shared drives. @@ -206,12 +206,12 @@ Compose file documentation. ### Local security policies can block shared drives and cause login errors -You need permissions to mount shared drives in order to use the Docker for +You need permissions to mount shared drives to use the Docker for Windows [shared drives](/docker-for-windows/index.md#shared-drives) feature. -If local policy prevents this, you will get errors when you attempt to enable -shared drives on Docker. This is not something Docker can resolve, you do need -these permissions to use the feature. +If local policy prevents this, you get errors when you attempt to enable +shared drives on Docker. This is not something Docker can resolve, since you do +need these permissions to use the feature. Here are snip-its from example error messages: @@ -228,7 +228,7 @@ See also, Docker for Windo ### Understand symlinks limitations -Symlinks will work within and across containers. However, symlinks created outside of containers (for example, on the host) will not work. To learn more, see [Are symlinks supported?](faqs.md#are-symlinks-supported) in the FAQs. +Symlinks work within and across containers. However, symlinks created outside of containers (for example, on the host) do not work. To learn more, see [Are symlinks supported?](faqs.md#are-symlinks-supported) in the FAQs. ### Avoid unexpected syntax errors, use Unix style line endings for files in containers @@ -242,7 +242,7 @@ Keep this in mind when authoring files such as shell scripts using Windows tools, where the default is likely to be Windows style line endings. These commands ultimately get passed to Unix commands inside a Unix based container (for example, a shell script passed to `/bin/sh`). If Windows style line endings -are used, `docker run` will fail with syntax errors. +are used, `docker run` fails with syntax errors. For an example of this issue and the resolution, see this issue on GitHub: [Docker RUN fails to execute shell @@ -309,18 +309,18 @@ To fix existing containers, follow these steps. ### Hyper-V Docker for Windows requires a Hyper-V as well as the Hyper-V Module for Windows -Powershell to be installed and enabled. The Docker for Windows installer will -enable it for you. +Powershell to be installed and enabled. The Docker for Windows installer +enables it for you. See [these instructions](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick_start/walkthrough_install) to install Hyper-V manually. A reboot is *required*. If you install Hyper-V -without the reboot, Docker for Windows will not work correctly. On some systems, +without the reboot, Docker for Windows does not work correctly. On some systems, Virtualization needs to be enabled in the BIOS. The steps to do so are Vendor specific, but typically the BIOS option is called `Virtualization Technology (VTx)` or similar. -Once Hyper-V is enabled, it will show up as such on "Turn Windows features on or +Once Hyper-V is enabled, it shows up as such on "Turn Windows features on or off". ![Hyper-V on Windows features](images/hyper-v-enable-status.png ) @@ -344,15 +344,15 @@ Machine driver example](/machine/drivers/hyper-v.md#example). In addition to [Hyper-V](#hyper-v), virtualization must be enabled. If, at some point, if you manually uninstall Hyper-V or disable virtualization, -Docker for Windows will not start. +Docker for Windows cannot start. Verify that virtualization is enabled by checking the Performance tab on the Task Manager. ![Task Manager](images/win-virtualization-enabled.png) -See also, the user reported issue [Unable to run Docker for Windows on Windows -10 Enterprise](https://github.com/docker/for-win/issues/74) +Also, see the user-reported issue +[Unable to run Docker for Windows on Windows 10 Enterprise](https://github.com/docker/for-win/issues/74). ### Networking and WiFi problems upon Docker for Windows install @@ -382,7 +382,7 @@ troubleshooting topic](#virtualization-must-be-enabled). ![Hyper-V manager](images/hyperv-manager.png) 4. Set up an external network switch. If you plan at any point to use [Docker -Machine](/machine/overview.md) to set up multiple local VMs, you will need this +Machine](/machine/overview.md) to set up multiple local VMs, you need this anyway, as described in the topic on the [Hyper-V driver for [Docker Machine](/machine/drivers/hyper-v.md#example). You can replace `DockerNAT` with this switch. @@ -439,10 +439,10 @@ docker run -d -p 80:80 --name webserver nginx ``` Using `curl http://localhost`, or pointing your web browser at -`http://localhost` will not display the `nginx` web page (as it would do with +`http://localhost` does not display the `nginx` web page (as it would do with Linux containers). -In order to reach a Windows container from the local host, you need to specify +To reach a Windows container from the local host, you need to specify the IP address and port for the container that is running the service. You can get the container IP address by using [`docker inspect`](/engine/reference/commandline/inspect.md) with some @@ -458,7 +458,7 @@ $ docker inspect \ {% endraw %} ``` -This will give you the IP address of the container, for example: +This gives you the IP address of the container, for example: ```bash {% raw %} @@ -497,10 +497,13 @@ to work with Linux containers. #### If you still want to use nested virtualization -* Make sure your VMWare or Parallels has nested virtualization support enabled. -The path in both apps should be similar, e.g., **Hardware -> CPU & Memory -> Advanced Options -> Enable nested virtualization**. +* Make sure nested virtualization support is enabled in VMWare or Parallels. +Check the settings in +**Hardware -> CPU & Memory -> Advanced Options -> Enable nested virtualization** +(the exact menu sequence might vary slightly). -* Configure your VM with at least 2 CPUs and sufficient memory (e.g., 6GB). +* Configure your VM with at least 2 CPUs and sufficient memory to run your +workloads. * Make sure your system is more or less idle. @@ -512,13 +515,13 @@ Nehalem based Mac Pros and so do newer generations of Intel processors. #### Typical failures we see with nested virtualization -* Slow boot time of the Linux VM. If you look in the logs, you'll see +* Slow boot time of the Linux VM. If you look in the logs and find some entries prefixed with `Moby`. On real hardware, it takes 5-10 seconds to boot the Linux VM; roughly the time between the `Connected` log entry and the `* Starting Docker ... [ ok ]` log entry. If you boot the Linux VM inside a Windows VM, this may take considerably longer. We have a timeout of 60s or so. If the VM hasn't started by that time, we retry. If the retry fails we print an error. You -may be able to work around this by providing more resources to the Windows VM. +can sometimes work around this by providing more resources to the Windows VM. * Sometimes the VM fails to boot when Linux tries to calibrate the time stamp counter (TSC). This process is quite timing sensitive and may fail @@ -541,14 +544,14 @@ Here is an example command and error message: C:\Program Files\Docker\Docker\Resources\bin\docker.exe: Error while pulling image: Get https://index.docker.io/v1/repositories/library/hello-world/images: dial tcp: lookup index.docker.io on 10.0.75.1:53: no such host. See 'C:\Program Files\Docker\Docker\Resources\bin\docker.exe run --help'. -As an immediate workaround to this problem, reset the DNS server to use the Google DNS fixed address: `8.8.8.8`. You can configure this via the **Settings** -> **Network** dialog, as described in the topic [Network](/docker-for-windows/index.md#network). Docker will automatically restart when you apply this setting, which could take some time. +As an immediate workaround to this problem, reset the DNS server to use the Google DNS fixed address: `8.8.8.8`. You can configure this via the **Settings** -> **Network** dialog, as described in the topic [Network](/docker-for-windows/index.md#network). Docker automatically restarts when you apply this setting, which could take some time. We are currently investigating this issue. #### Networking issues on pre Beta 10 versions Docker for Windows Beta 10 and later fixed a number of issues around the networking setup. If you still experience networking issue, this may be related -to previous Docker for Windows installations. In this case, please quit Docker +to previous Docker for Windows installations. In this case, quit Docker for Windows and perform the following steps: ##### 1. Remove multiple `DockerNAT` VMswitches @@ -619,7 +622,7 @@ To create a larger NAT prefix, do the following. New-NetNat -Name DockerNAT -InternalIPInterfaceAddressPrefix 10.0.0.0/16 - The next time Docker for Windows starts, it will use the new, wider prefix. + The next time Docker for Windows starts, it uses the new, wider prefix. Alternatively, you can use a different NAT name and NAT prefix and adjust the NAT prefix Docker for Windows uses accordingly via the `Settings` panel. @@ -637,13 +640,13 @@ Restart your PC to stop / discard any vestige of the daemon running from the pre ### Unset `DOCKER_HOST` -You do not need `DOCKER_HOST` set, so unset as it may be pointing at -another Docker (e.g. VirtualBox). If you use bash, `unset ${!DOCKER_*}` -will unset existing `DOCKER` environment variables you have set. For other shells, unset each environment variable individually. +The `DOCKER_HOST` environmental variable does not need to be set. +If you use bash, use the command `unset ${!DOCKER_*}` to unset it. +For other shells, consult the shell's documentation. ### Make sure Docker is running for webserver examples -For the `hello-world-nginx` example and others, Docker for Windows must be running in order to get to the webserver on `http://localhost/`. Make sure that the Docker whale is showing in the menu bar, and that you run the Docker commands in a shell that is connected to the Docker for Windows Engine (not Engine from Toolbox). Otherwise, you might start the webserver container but get a "web page not available" error when you go to `docker`. +For the `hello-world-nginx` example and others, Docker for Windows must be running to get to the webserver on `http://localhost/`. Make sure that the Docker whale is showing in the menu bar, and that you run the Docker commands in a shell that is connected to the Docker for Windows Engine (not Engine from Toolbox). Otherwise, you might start the webserver container but get a "web page not available" error when you go to `docker`. ### How to solve `port already allocated` errors @@ -660,7 +663,13 @@ docker app. ### Docker fails to start when firewall or anti-virus software is installed -**Some firewalls and anti-virus software might be incompatible with Microsoft Windows 10 builds** (e.g., Windows 10 Anniversary Update). The conflict typically occurs after a Windows update or new install of the firewall, and manifests as an error response from the Docker daemon and a **Docker for Windows start failure**. The Comodo Firewall was one example of this problem, but users report that software has since been updated to work with these Windows 10 builds. +**Some firewalls and anti-virus software might be incompatible with Microsoft +**Windows 10 builds**, such as Windows 10 Anniversary Update. The conflict +typically occurs after a Windows update or new install of the firewall, and +manifests as an error response from the Docker daemon and a **Docker for +Windows start failure**. The Comodo Firewall was one example of this problem, +but users report that software has since been updated to work with these +Windows 10 builds. See the Comodo forums topics [Comodo Firewall conflict with Hyper-V](https://forums.comodo.com/bug-reports-cis/comodo-firewall-began-conflict-with-hyperv-t116351.0.html) diff --git a/docker-hub/bitbucket.md b/docker-hub/bitbucket.md index 89bc186ca86..af76d41e55b 100644 --- a/docker-hub/bitbucket.md +++ b/docker-hub/bitbucket.md @@ -4,15 +4,14 @@ keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, doc title: Configure automated builds with Bitbucket --- -If you've previously linked Docker Hub to your Bitbucket account, you'll be able -to skip to [Creating an Automated -Build](bitbucket.md#creating-an-automated-build). +If you've previously linked Docker Hub to your Bitbucket account, skip to +[Creating an Automated Build](bitbucket.md#creating-an-automated-build). ## Link to your Bitbucket account -In order to set up an Automated Build of a repository on Bitbucket, you need to +To set up an Automated Build of a repository on Bitbucket, you need to link your [Docker Hub](https://hub.docker.com/account/authorized-services/) -account to a Bitbucket account. This will allow the registry to see your +account to a Bitbucket account. This allows the registry to see your Bitbucket repositories. To add, remove, or view your linked account, go to the **Linked Accounts & @@ -21,7 +20,7 @@ Services** section of your Hub profile **Settings**. ![authorized-services](images/authorized-services.png) Then follow the onscreen instructions to authorize and link your Bitbucket -account to Docker Hub. Once it is linked, you'll be able to create a Docker Hub +account to Docker Hub. Once it is linked, you can create a Docker Hub repository from which to create the Automatic Build. ## Create an Automated Build diff --git a/docker-hub/builds.md b/docker-hub/builds.md index 9cef418e768..fe8f0a31a14 100644 --- a/docker-hub/builds.md +++ b/docker-hub/builds.md @@ -33,7 +33,7 @@ To view your current connection settings, log in to Docker Hub and choose Currently Docker Hub does not support Git LFS (Large File Storage). If you have binaries in your build context that are managed by Git LFS, only the pointer -file will be present in the clone made during the automated build, which is not +file is present in the clone made during the automated build, which is not what you want. Subscribe to the [GitHub @@ -63,7 +63,7 @@ limitation. ## Create an automated build Automated build repositories rely on the integration with your code repository -in order to build. However, you can also push already-built images to these +To build. However, you can also push already-built images to these repositories using the `docker push` command. 1. Select **Create** > **Create Automated Build** from Docker Hub. @@ -142,8 +142,8 @@ can click **Cancel** to end them. The statuses are: -* **Queued**: You're in line and your image will be built soon. Queue time varies depending on number of concurrent builds available to you. -* **Building**: The image is being built. +* **Queued**: You're in line for your image to be built. Queue time varies depending on number of concurrent builds available to you. +* **Building**: The image is building. * **Success**: The image has been built with no issues. * **Error**: There was an issue with your image. Click the row to go to the Builds Details screen. The banner at the top of the page displays the last sentence of the log file, which indicates what the error was. If you need more information, scroll to the bottom of the screen to the logs section. diff --git a/docker-hub/github.md b/docker-hub/github.md index 91d83903fa1..7f9c3674fcc 100644 --- a/docker-hub/github.md +++ b/docker-hub/github.md @@ -4,7 +4,7 @@ keywords: Docker, docker, registry, accounts, plans, Dockerfile, Docker Hub, doc title: Configure automated builds from GitHub --- -If you've previously linked Docker Hub to your GitHub account, you'll be able to +If you've previously linked Docker Hub to your GitHub account, skip to [Creating an Automated Build](github.md#creating-an-automated-build). ## Linking Docker Hub to a GitHub account @@ -13,18 +13,18 @@ skip to [Creating an Automated Build](github.md#creating-an-automated-build). > Automated Builds currently require *read* and *write* access since > [Docker Hub](https://hub.docker.com) needs to set up a GitHub service > hook. We have no choice here, this is how GitHub manages permissions. -> We do guarantee nothing else will be touched in your account. +> We do guarantee nothing else is touched in your account. -In order to set up an Automated Build of a repository on GitHub, you need to +To set up an Automated Build of a repository on GitHub, you need to link [Docker Hub](https://hub.docker.com/account/authorized-services/) to your -GitHub account. This will allow the registry to see your GitHub repositories. +GitHub account. This allows the registry to see your GitHub repositories. To add, remove or view your linked account, go to the "Linked Accounts & Services" section of your Hub profile "Settings". ![authorized-services](images/authorized-services.png) -When linking to GitHub, you'll need to select either "Public and Private", +When linking to GitHub, select either "Public and Private", or "Limited Access" linking. ![add-authorized-github-service.png](images/add-authorized-github-service.png) @@ -37,25 +37,25 @@ If you choose "Limited Access", Docker Hub only gets permission to access your public data and public repositories. Follow the onscreen instructions to authorize and link your GitHub account to -Docker Hub. Once it is linked, you'll be able to choose a source repository from +Docker Hub. Once it is linked, you can choose a source repository from which to create the Automatic Build. -You will be able to review and revoke Docker Hub's access by visiting the +You can review and revoke Docker Hub's access by visiting the [GitHub User's Applications settings](https://github.com/settings/applications). > **Note**: If you delete the GitHub account linkage that is used for one of your -> automated build repositories, the previously built images will still be available. +> automated build repositories, the previously built images are still available. > If you re-link to that GitHub account later, the automated build can be started > using the "Start Build" button on the Hub, or if the webhook on the GitHub repository -> still exists, it will be triggered by any subsequent commits. +> still exists, it is triggered by any subsequent commits. ## Auto builds and limited linked GitHub accounts. If you selected to link your GitHub account with only a "Limited Access" link, -then after creating your automated build, you will need to either manually +then after creating your automated build, you need to either manually trigger a Docker Hub build using the "Start a Build" button, or add the GitHub webhook manually, as described in [GitHub Service -Hooks](github.md#github-service-hooks). This will only work for repositories +Hooks](github.md#github-service-hooks). This only works for repositories under the user account, and adding an automated build to a public GitHub organization using a "Limited Access" link is not possible. @@ -72,9 +72,9 @@ You can now re-link your account at any time. ## GitHub organizations -GitHub organizations and private repositories forked from organizations will be +GitHub organizations and private repositories forked from organizations are made available to auto build using the "Docker Hub Registry" application, which -needs to be added to the organization - and then will apply to all users. +needs to be added to the organization - and then applies to all users. To check, or request access, go to your GitHub user's "Setting" page, select the "Applications" section from the left side bar, then click the "View" button for @@ -84,7 +84,7 @@ To check, or request access, go to your GitHub user's "Setting" page, select the The organization's administrators may need to go to the Organization's "Third party access" screen in "Settings" to grant or deny access to the Docker Hub -Registry application. This change will apply to all organization members. +Registry application. This change applies to all organization members. ![Check Docker Hub application access to Organization](images/gh-check-admin-org-dh-app-access.png) @@ -105,7 +105,7 @@ Once you've selected the source repository, you can then configure: - If the visibility of the Docker repository: "Public" or "Private" You can change the accessibility options after the repository has been created. If you add a Private repository to a Hub user namespace, then you can only add other users - as collaborators, and those users will be able to view and pull all images in that + as collaborators, and those users can view and pull all images in that repository. To configure more granular access permissions, such as using teams of users or allow different users access to different image tags, then you need to add the Private repository to a Hub organization for which your user has Administrator @@ -120,13 +120,12 @@ You can also select one or more: You can modify the description for the repository by clicking the "Description" section of the repository view. -Note that the "Full Description" will be over-written by the README.md file when the +The "Full Description" is over-written by the README.md file when the next build is triggered. ## GitHub private submodules -If your GitHub repository contains links to private submodules, you'll get an -error message in your build. +If your GitHub repository contains links to private submodules, your build fails. Normally, the Docker Hub sets up a deploy key in your GitHub repository. Unfortunately, GitHub only allows a repository deploy key to access a single @@ -188,7 +187,7 @@ When you create an Automated Build from a GitHub user that has full "Public and Private" linking, a Service Hook should get automatically added to your GitHub repository. -If your GitHub account link to the Docker Hub is "Limited Access", then you will +If your GitHub account link to the Docker Hub is "Limited Access", then you need to add the Service Hook manually. To add, confirm, or modify the service hook, log in to GitHub, then navigate to diff --git a/docker-hub/index.md b/docker-hub/index.md index 9c414196664..c447623ca08 100644 --- a/docker-hub/index.md +++ b/docker-hub/index.md @@ -36,7 +36,7 @@ Docker Hub provides the following major features: ## Create a Docker ID -To explore Docker Hub, you'll need to create an account by following the +To explore Docker Hub, you need to create an account by following the directions in [Your Docker ID](/docker-hub/accounts.md). > **Note**: You can search for and pull Docker images from Hub without logging diff --git a/docker-hub/official_repos.md b/docker-hub/official_repos.md index ae028f0e31c..d17ce54b0d1 100644 --- a/docker-hub/official_repos.md +++ b/docker-hub/official_repos.md @@ -90,8 +90,8 @@ questions should be directed to `#docker-library` on Freenode IRC. ## How do I create a new Official Repository? From a high level, an Official Repository starts out as a proposal in the form -of a set of GitHub pull requests. You'll find detailed and objective proposal -requirements in the following GitHub repositories: +of a set of GitHub pull requests. Detailed and objective proposal +requirements are documented in the following GitHub repositories: * [docker-library/official-images](https://github.com/docker-library/official-images) diff --git a/docker-hub/orgs.md b/docker-hub/orgs.md index a122bc264e5..4115d78f039 100644 --- a/docker-hub/orgs.md +++ b/docker-hub/orgs.md @@ -45,7 +45,7 @@ automatically have Read permissions: - `Write` access allows users to push to non-automated repositories on the Docker Hub. - `Admin` access allows users to modify the repositories "Description", "Collaborators" rights, "Public/Private" visibility and "Delete". -> **Note**: A User who has not yet verified their email address will only have +> **Note**: A User who has not yet verified their email address only has > `Read` access to the repository, regardless of the rights their team > membership has given them. diff --git a/docker-hub/repos.md b/docker-hub/repos.md index 234ad8a8066..9ac512246b3 100644 --- a/docker-hub/repos.md +++ b/docker-hub/repos.md @@ -63,17 +63,17 @@ when you `docker save` an image. ## Creating a new repository on Docker Hub -When you first create a Docker Hub user, you will have a "Get started with +When you first create a Docker Hub user, you see a "Get started with Docker Hub." screen, from which you can click directly into "Create Repository". You can also use the "Create ▼" menu to "Create Repository". When creating a new repository, you can choose to put it in your Docker ID namespace, or that of any [organization](/docker-hub/orgs.md) that you are in the "Owners" -team. The Repository Name will need to be unique in that namespace, can be two +team. The Repository Name needs to be unique in that namespace, can be two to 255 characters, and can only contain lowercase letters, numbers or `-` and `_`. -The "Short Description" of 100 characters will be used in the search results, +The "Short Description" of 100 characters is used in the search results, while the "Full Description" can be used as the Readme for the repository, and can use Markdown to add simple formatting. @@ -84,7 +84,7 @@ Hub based repository. ## Pushing a repository image to Docker Hub -In order to push a repository to the Docker Hub, you need to +To push a repository to the Docker Hub, you need to name your local image using your Docker Hub username, and the repository name that you created in the previous step. You can add multiple images to a repository, by adding a specific `:` to @@ -100,7 +100,7 @@ Now you can push this repository to the registry designated by its name or tag. $ docker push /: -The image will then be uploaded and available for use by your team-mates and/or +The image is then uploaded and available for use by your team-mates and/or the community. @@ -119,7 +119,7 @@ appropriate, you can flag them for review. ## Collaborators and their role A collaborator is someone you want to give access to a private repository. Once -designated, they can `push` and `pull` to your repositories. They will not be +designated, they can `push` and `pull` to your repositories. They are not allowed to perform any administrative tasks such as deleting the repository or changing its status from private to public. @@ -138,7 +138,7 @@ want to keep private, either to your own account or within an organization or team. To work with a private repository on [Docker Hub](https://hub.docker.com), you -will need to add one using the [Add Repository](https://hub.docker.com/add/repository/) button. You get one private +need to add one using the [Add Repository](https://hub.docker.com/add/repository/) button. You get one private repository for free with your Docker Hub user account (not usable for organizations you're a member of). If you need more accounts you can upgrade your [Docker Hub](https://hub.docker.com/account/billing-plans/) plan. @@ -153,11 +153,10 @@ Private repositories are just like public ones. However, it isn't possible to browse them or search their content on the public registry. They do not get cached the same way as a public repository either. -It is possible to give access to a private repository to those whom you -designate (i.e., collaborators) from its "Settings" page. From there, you can -also switch repository status (*public* to *private*, or vice-versa). You will -need to have an available private repository slot open before you can do such a -switch. If you don't have any available, you can always upgrade your +You can designate collaborators and manage their access to a private +repository from that repository's *Settings* page. You can also toggle the +repository's status between public and private, if you have an available +repository slot open. Otherwise, you can upgrade your [Docker Hub](https://hub.docker.com/account/billing-plans/) plan. ## Webhooks @@ -220,17 +219,17 @@ successfully tested, then update a separate Changelog once the deployment is complete. After clicking the "Add webhook" button, simply add as many URLs as necessary in your chain. -The first webhook in a chain will be called after a successful push. Subsequent -URLs will be contacted after the callback has been validated. +The first webhook in a chain is called after a successful push. Subsequent +URLs are contacted after the callback has been validated. ### Validating a callback -In order to validate a callback in a webhook chain, you need to +To validate a callback in a webhook chain, you need to 1. Retrieve the `callback_url` value in the request's JSON payload. 1. Send a POST request to this URL containing a valid JSON body. -> **Note**: A chain request will only be considered complete once the last +> **Note**: A chain request is only considered complete once the last > callback has been validated. To help you debug or simply view the results of your webhook(s), view the @@ -241,9 +240,9 @@ To help you debug or simply view the results of your webhook(s), view the The following parameters are recognized in callback data: * `state` (required): Accepted values are `success`, `failure`, and `error`. - If the state isn't `success`, the webhook chain will be interrupted. -* `description`: A string containing miscellaneous information that will be - available on the Docker Hub. Maximum 255 characters. + If the state isn't `success`, the webhook chain is interrupted. +* `description`: A string containing miscellaneous information that is + available on Docker Hub. Maximum 255 characters. * `context`: A string containing the context of the operation. Can be retrieved from the Docker Hub. Maximum 100 characters. * `target_url`: The URL where the results of the operation can be found. Can be diff --git a/docker-hub/webhooks.md b/docker-hub/webhooks.md index dc06f1c3c22..9dcb4cbcb74 100644 --- a/docker-hub/webhooks.md +++ b/docker-hub/webhooks.md @@ -13,7 +13,7 @@ Configure webhooks on `https://hub.docker.com/r///~/settin ![Create Webhook](/docker-hub/images/webhooks.png) -With your webhook, you specify a target URL to POST to. Docker Hub will POST +With your webhook, you specify a target URL to POST to. Docker Hub POSTs the URL with the following payload: ```json diff --git a/docker-id/index.md b/docker-id/index.md index c5f9ff60627..a6ac8586758 100644 --- a/docker-id/index.md +++ b/docker-id/index.md @@ -19,7 +19,7 @@ Your Docker ID becomes your user namespace for hosted Docker services, and becom 1. Go to the [Docker Cloud sign up page](https://cloud.docker.com). -2. Enter a username that will become your Docker ID. +2. Enter a username that is also your Docker ID. Your Docker ID must be between 4 and 30 characters long, and can only contain numbers and lowercase letters. diff --git a/docker-store/byol.md b/docker-store/byol.md index 35276565c30..a58482aa329 100644 --- a/docker-store/byol.md +++ b/docker-store/byol.md @@ -57,13 +57,13 @@ To use Docker Store as your fulfillment service, an ISV must: - Apply and be approved to use the Gated BYOL feature. - Create a BYOL, "bring your own license" plans, in the Docker Store Publisher center. -Docker will provide: +Docker provides: - One or more authorization tokens to the publisher for making API requests to the Docker fulfillment service. - A partner identifier. ### Fulfillment and orders usage -Docker Store provides an API for ISVs to create product orders and generate access tokens (in the form of URLs) for customer subscriptions to their products. ISVs will also be able to access reports and usage information about their products on Docker Store. +Docker Store provides an API for ISVs to create product orders and generate access tokens (in the form of URLs) for customer subscriptions to their products. ISVs can also access reports and usage information about their products on Docker Store. The BYOL program at Docker Store helps ISVs control access to their products for multiple use cases, such as: - New customers purchasing software from the ISV for the first time @@ -87,14 +87,14 @@ the ISV can change or cancel the corresponding subscription by making requests to the Store billing service. The subscription that a user receives at the end of this process appears with -their other subscriptions in Docker Store. However, the user will not be able to +their other subscriptions in Docker Store. However, the user cannot directly change or cancel it, as it represents an entitlement under the control -of the ISV. The Store UI will direct the user to contact the ISV for any desired +of the ISV. The Store UI directs the user to contact the ISV for any desired changes. ### Product keys -If an ISV's software uses product keys, these may be supplied at the time of order creation. Once the order has been fulfilled, the product keys will be available to the user from the user's subscription detail page. The ISV's installation documentation should refer the user to look for the keys there. +If an ISV's software uses product keys, these may be supplied at the time of order creation. Once the order has been fulfilled, the product keys are available to the user from the user's subscription detail page. The ISV's installation documentation should refer the user to look for the keys there. ### API usage @@ -106,7 +106,7 @@ or look up information about the order after it has been created.  ### Gated BYOL plan setup Once a publisher has been onboarded to create an gated BYOL plan, the publisher -center will begin to allow them to set up a new, unpublished plan with the gated +center allows them to set up a new, unpublished plan with the gated BYOL option: ![byol plan](images/publish_byol.png) @@ -122,20 +122,20 @@ license from the publisher. All API requests should be made to: -For example, the full path for the "Create Order" API will POST to: https://store.docker.com/api/fulfillment/v1/orders +For example, the full path for the "Create Order" API POSTs to: https://store.docker.com/api/fulfillment/v1/orders All API requests to the fulfillment service must have an "Authorization: Bearer" -header with an authorization string that will be provided by Docker. An example +header with an authorization string provided by Docker. An example header is: -``` +```none Authorization: Bearer 9043ea5c-172a-4d4b-b255-a1dab96fb631 ``` ISVs should closely protect their authorization token as if it were a password, and alert Docker if it has been compromised or needs replacement. -All request and response bodies must/will be encoded with JSON using UTF-8. +All request and response bodies are encoded with JSON using UTF-8. ### Data structures @@ -155,7 +155,7 @@ All request and response bodies must/will be encoded with JSON using UTF-8. * order\_id: `ord-93b2dba2-79e3-11e6-8b77-86f30ca893d3` (string, required) - The order id. * token: `DOCKER-TOKEN-234` (string, required) - The access token created for this order by the fulfillment service. -* docker\_id: `a76808b87b6c11e68b7786f30ca893d3` (string, optional) - The docker id of the user that fulfilled the order. Note that this is not set unless the order is in a fulfilled state. +* docker\_id: `a76808b87b6c11e68b7786f30ca893d3` (string, optional) - The docker id of the user that fulfilled the order. This is not set unless the order is in a fulfilled state. * state: created (enum, required) - The order state. * created * fulfilled @@ -174,8 +174,8 @@ All request and response bodies must/will be encoded with JSON using UTF-8. * id: `390745e6-faba-11e6-bc64-92361f002671` (string, required) - The order item id. * product\_id: `bf8f7c15-0c3b-4dc5-b5b3-1595ba9b589e` (string, required) - The Store product id associated with the order item. * rate\_plan\_id: `85717ec8-6fcf-4fd9-9dbf-051af0ce1eb3` (string, required) - The Store rate plan id associated with the order item. -* subscription\_start\_date: `2016-06-02T05:10:54Z` (string, optional) - An ISO-8601 timestamp representing the subscription start date. If not specified, the subscription will start at order fulfillment time. -* subscription\_end\_date: `2019-06-02T05:10:54Z` (string, optional) - An ISO-8601 timestamp representing the subscription end date. If not specified, the subscription will end based on the plan duration period. +* subscription\_start\_date: `2016-06-02T05:10:54Z` (string, optional) - An ISO-8601 timestamp representing the subscription start date. If not specified, the subscription starts at order fulfillment time. +* subscription\_end\_date: `2019-06-02T05:10:54Z` (string, optional) - An ISO-8601 timestamp representing the subscription end date. If not specified, the subscription ends based on the plan duration period. * pricing\_components (array[PricingComponent], required) - One or more pricing components associated with the order item. * metadata (OrderItemMetadata, optional) - Any key/value strings given for this item in the order creation request. * product\_keys (array[ProductKey], optional) - Product keys associated with the order item. @@ -187,8 +187,8 @@ All request and response bodies must/will be encoded with JSON using UTF-8. * sku: ZZ456A (string, optional) - The order item SKU. * product\_id: `bf8f7c15-0c3b-4dc5-b5b3-1595ba9b589e` (string, optional) - The Store product id associated with the order item. * rate\_plan\_id: `85717ec8-6fcf-4fd9-9dbf-051af0ce1eb3` (string, optional) - The Store rate plan id associated with the order item. -* subscription\_start\_date: `2016-06-02T05:10:54Z` (string, optional) - An ISO-8601 timestamp representing the subscription start date. If not specified, the subscription will start at order fulfillment time. -* subscription\_end\_date: `2019-06-02T05:10:54Z` (string, optional) - An ISO-8601 timestamp representing the subscription end date. If not specified, the subscription will end based on the plan duration period. +* subscription\_start\_date: `2016-06-02T05:10:54Z` (string, optional) - An ISO-8601 timestamp representing the subscription start date. If not specified, the subscription starts at order fulfillment time. +* subscription\_end\_date: `2019-06-02T05:10:54Z` (string, optional) - An ISO-8601 timestamp representing the subscription end date. If not specified, the subscription ends based on the plan duration period. * pricing\_components (array[PricingComponent], required) - One or more pricing components associated with the order item. * metadata (OrderItemMetadata, optional) - Mapping of key/value strings for this order item. * product\_keys (array[ProductKeyCreateRequest], optional) - Product keys associated with the order item. @@ -204,12 +204,12 @@ All request and response bodies must/will be encoded with JSON using UTF-8. #### Properties -* label: `Production` (string, required) - The human-readable label for the given product key that will be displayed to the customer. +* label: `Production` (string, required) - The human-readable label for the given product key that is displayed to the customer. * media\_type: `text/plain` (enum, required) - An accepted IANA Media Type for this product key. This suggests to the user interface how to display the product key for the customer to use. * text/plain * application/json * file\_name: `production-key.txt` (string, optional) - The file name for the downloaded file if the product key is a blob that requires or allows download by the customer. -* value: `AbKe13894Aksel` (string, required) - The contents of the product key as a string. If the value is blob that cannot be represented as a string, the contents will be encoded as a Base64 string. +* value: `AbKe13894Aksel` (string, required) - The contents of the product key as a string. If the value is blob that cannot be represented as a string, the contents are encoded as a Base64 string. ### ProductKey (object) @@ -217,12 +217,12 @@ All request and response bodies must/will be encoded with JSON using UTF-8. * id: `390745e6-faba-11e6-bc64-92361f002671` (string, required) - The product key id. * order\_item\_id: `85717ec8-6fcf-4fd9-9dbf-051af0ce1eb3` (string, required) - The id of the order item that this product key is associated with. -* label: `Production` (string, required) - The human-readable label for the given product key that will be displayed to the customer. +* label: `Production` (string, required) - The human-readable label for the given product key that is displayed to the customer. * media\_type: `text/plain` (enum, required) - An accepted IANA Media Type for this product key. This suggests to the user interface how to display the product key for the customer to use. * text/plain * application/json * file\_name: `production-key.txt` (string, optional) - The file name for the downloaded file if the product key is a blob that requires or allows download by the customer. -* value: `AbKe13894Aksel` (string, required) - The contents of the product key as a string. If the value is blob that cannot be represented as a string, the contents will be encoded as a Base64 string. +* value: `AbKe13894Aksel` (string, required) - The contents of the product key as a string. If the value is blob that cannot be represented as a string, the contents are encoded as a Base64 string. * created: `2016-06-02T05:10:54Z` (string, required) - An ISO-8601 product key creation timestamp. * updated: `2016-06-02T05:10:54Z` (string, required) - An ISO-8601 product key updated timestamp. @@ -252,7 +252,7 @@ Create an order. #### List orders by token [GET /orders{?token}] -Retrieve an order with the given token. An empty array will be returned if no orders for the given token are found. +Retrieve an order with the given token. An empty array is returned if no orders for the given token are found. * Parameters @@ -261,7 +261,7 @@ Retrieve an order with the given token. An empty array will be returned if no or #### List orders by partner [GET /orders{?partner\_id}] -List orders associated with the the given partner. An empty array will be returned if there are no orders associated with the partner. +List orders associated with the the given partner. An empty array is returned if there are no orders associated with the partner. * Parameters @@ -288,7 +288,7 @@ Retrieve an order by id. A number of operations can be performed by `PATCH`ing an order: -**Fulfill** an order. Fulfilling an order will put it in a fulfilled state, and will kick off a process to create subscriptions for each order item associated with the order. +**Fulfill** an order. Fulfilling an order puts it in a fulfilled state, and kicks off a process to create subscriptions for each order item associated with the order. * Request (application/json) @@ -297,7 +297,7 @@ A number of operations can be performed by `PATCH`ing an order: * Attributes (Order) -**Cancel** an order. Canceling an order will put it in a cancelled state. The order is frozen once cancelled (that is, no further changes may be made to it). +**Cancel** an order. Canceling an order puts it in a cancelled state. The order is frozen once cancelled (that is, no further changes may be made to it). * Request (application/json) * Attributes (OrderCancellationRequest) @@ -312,7 +312,7 @@ A number of operations can be performed by `PATCH`ing an order: #### List product keys for order item [GET] -Retrieve all product keys for an order item by id. An empty array will be returned if the order item does not have any product keys. +Retrieve all product keys for an order item by id. An empty array is returned if the order item does not have any product keys. * Request (application/json) * Response 200 (application/json) @@ -321,7 +321,7 @@ Retrieve all product keys for an order item by id. An empty array will be return #### Create product key for order item [POST] -Create a product key for an existing order item. Note that adding new product keys will not affect existing product keys. +Create a product key for an existing order item. Adding new product keys does not affect existing product keys. * Request (application/json) diff --git a/docker-store/customer_faq.md b/docker-store/customer_faq.md index 4a9cec62e98..acb9e6407b5 100644 --- a/docker-store/customer_faq.md +++ b/docker-store/customer_faq.md @@ -53,7 +53,7 @@ selected account on the **My Content** page. You can apply to become a Docker Store publisher by filling out the form [here](https://store.docker.com/publisher/signup). When you've been -accepted to the program, you'll set up a publisher profile and submit your +accepted to the program, you can set up a publisher profile and submit your images for review. Learn more about the publisher duties and requirements [here](https://success.docker.com/Store). @@ -73,7 +73,7 @@ pricing stable and consistent, rather than fluctuating with exchange rates. ### When do you charge my credit card? We automatically charge your credit card on the first day of your billing cycle -each month, and the charge will come from Docker, Inc. Your billing cycle is a +each month, and the charge comes from Docker, Inc. Your billing cycle is a 30 day period starting on the day you subscribe. ### What do I do if my payment fails? @@ -90,7 +90,7 @@ organization name from the **Account** menu before updating the information. ### How does cancellation work? Do you offer refunds? -You can cancel a subscription at any time, however you will still be billed +You can cancel a subscription at any time, however you are still billed for the full month, and have access to the subscription content until the end of that billing period. diff --git a/docker-store/index.md b/docker-store/index.md index ae30540328d..1fbf8bf251d 100644 --- a/docker-store/index.md +++ b/docker-store/index.md @@ -24,15 +24,15 @@ experience the following benefits: a) logged-in users, b) users who have purchased a license, or c) all Docker users. We’ll help you manage and control your distribution. -* **We'll handle checkout.** You don’t have to set up your own digital +* **We handle checkout.** You don’t need to set up your own digital e-commerce site when you sell your content through the Docker Store. Choose - how much you want to charge for your products and we'll handle the billing. + how much you want to charge for your products and we handle the billing. * **Seamless updates and upgrades for your customers.** We tell your customers when your content has upgrades or updates available. * **Become Docker Certified.** Publisher container images and plugins that meet - the quality, security, and support criteria of the program will display a + the quality, security, and support criteria of the program displays a “Docker Certified” badge within the Docker Store (which can be used in external marketing). diff --git a/docker-store/publish.md b/docker-store/publish.md index 306e8828ab5..edaf87a5a1a 100644 --- a/docker-store/publish.md +++ b/docker-store/publish.md @@ -6,13 +6,13 @@ title: Publish content on Docker Store ## Permitted content and support options -* Content that runs on a Docker Enterprise Edition (i.e. Docker Certified +* Content that runs on a Docker Enterprise Edition (Docker Certified Infrastructure) may be published in the Store. This content may also qualify to become a Docker Certified Container or Plugin image and be backed by collaborative Docker/Publisher support * Content that runs on the Docker Community Edition may be published in the - Store, but will not be supported by Docker nor is it eligible for certification. + Store, but is not supported by Docker nor is it eligible for certification. * Content that requires a non Certified Infrastructure environment may not be published in the Store. @@ -51,7 +51,7 @@ Commercial content and other supported images may qualify for the Docker Certified Container or Plugins quality mark. The testing for this program goes beyond the vulnerability scan and also evaluates container images for Docker best practices developed over years of experience. Collaborative support -capability between Docker and the publisher is also established. Please refer +capability between Docker and the publisher is also established. Refer to the diagram below for a high-level summary: ![publishing workflow](images/publish-diagram.png) @@ -59,7 +59,7 @@ to the diagram below for a high-level summary: ## Create great content Create your content, and follow our best practices to Dockerize it. Keep your -images small, your layers few, and your components secure. Please refer to the +images small, your layers few, and your components secure. Refer to the links and guidelines listed below to build and deliver great content: * [Best practices for writing Dockerfiles](/engine/userguide/eng-image/dockerfile_best-practices/) @@ -111,7 +111,7 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/ ``` -> **Note**: Files introduced in one directive of your Dockerfile can only be +> **Note**: Files introduced in one directive of your Dockerfile can only be > removed in the same directive (and not in subsequent directives in your Dockerfile). ### Keep required components up-to-date @@ -151,7 +151,7 @@ story, and what you do. At the very minimum, we require: You must provide the namespace (including repository and tags) of a private repository on Docker Cloud or Hub that contains the source for your product. -This repository path will not be shown to users, but the repositories you choose +This repository path is not shown to users, but the repositories you choose determine the Product Tiers available for customers to download. The following content information helps us make your product look great and @@ -172,8 +172,8 @@ discoverable: ### How the manifest information is displayed in the UI -(Please note that this is an approximate representation. We frequently make -enhancements to the look and some elements might shift around.) +This is an approximate representation. We frequently make +enhancements to the look and some elements might shift around. ![manifest information displayed on store UI](images/subscribed.png) @@ -181,7 +181,7 @@ enhancements to the look and some elements might shift around.) Docker users who download your content from the Store might need your help later, so be prepared for questions! The information you provide with your -submission will save support time in the future. +submission saves support time in the future. ### Support information @@ -213,7 +213,7 @@ To interpret the results, refer to the #### Classification of issues -* All Scan results will include the CVE numbers and a CVSS (Common Vulnerability +* All Scan results include the CVE numbers and a CVSS (Common Vulnerability Scoring System) Score. * CVE Identifiers (also referred to by the community as "CVE names," "CVE @@ -258,14 +258,14 @@ To interpret the results, refer to the users who have downloaded and subscribed for notifications. * A Repo’s listing can stay in the "hold" state for a maximum of 1 month, after - which the listing will be revoked. + which the listing is revoked. ### Usage audit and reporting -Unless otherwise negotiated, an audit of activity on publisher content will be +Unless otherwise negotiated, an audit of activity on publisher content is retained for no less than 180 days. -A monthly report of said activity will be provided to the publisher with the +A monthly report of said activity is provided to the publisher with the following data: (1) report of content download by free and paid customers by date and time; (2) report of purchase, cancellations, refunds, tax payments, where applicable, and subscription length for paid customers of the content; and @@ -278,7 +278,7 @@ There are three types of certification that appear in Docker Store. ![certified container badge](images/certified_container.png) Certifies that a container image on Docker Store has been tested; complies best -practices guidelines; will run on a Docker Certified Infrastructure; has proven +practices guidelines; runs on a Docker Certified Infrastructure; has proven provenance; been scanned for vulnerabilities; and is supported by Docker and the content publisher @@ -301,12 +301,12 @@ the partner. Docker Certified Container images and plugins are meant to differentiate high quality content on Docker Store. Customers can consume Certified Containers with -confidence knowing that both Docker and the publisher will stand behind the +confidence knowing that both Docker and the publisher stands behind the solution. Further details can be found in the [Docker Partner Program Guide](https://www.docker.com/partnerprogramguide){: target="_blank" class="_"}. #### What are the benefits of Docker Certified? -Docker Store will promote Docker Certified Containers and Plugins running on +Docker Store promotes Docker Certified Containers and Plugins running on Docker Certified Infrastructure trusted and high quality content. With over 8B image pulls and access to Docker’s large customer base, a publisher can differentiate their content by certifying their images and plugins. With a @@ -314,7 +314,7 @@ revenue share agreement, Docker can be a channel for your content. The Docker Certified badge can also be listed alongside external references to your product. -#### How will the Docker Certified Container image be listed on Docker Store? +#### How is the Docker Certified Container image listed on Docker Store? These images are differentiated from other images on store through a certification badge. A user can search specifically for CI’s by limiting their @@ -330,15 +330,15 @@ on Docker Store. ![certified content example](images/FAQ-types-of-certified-content.png) -#### How will support be handled? +#### How is support handled? All Docker Certified Container images and plugins running on Docker Certified Infrastructure come with SLA based support provided by the publisher and Docker. Normally, a customer contacts the publisher for container and application level -issues. Likewise, a customer will contact Docker for Docker Edition support. +issues. Likewise, a customer contacts Docker for Docker Edition support. In the case where a customer calls Docker (or vice versa) about an issue on the -application, Docker will advise the customer about the publisher support process -and will perform a handover directly to the publisher if required. TSAnet is +application, Docker advises the customer about the publisher support process +and performs a handover directly to the publisher if required. TSAnet is required for exchange of support tickets between the publisher and Docker. #### How does a publisher apply to the Docker Certified program? @@ -354,15 +354,15 @@ Partner](https://goto.docker.com/partners){: target="_blank" class="_"} * Test your image against the Docker CS Engine 1.12+ or on a Docker Certified Infrastructure version 17.03 and above (Plugins must run on 17.03 and above) -* Submit your image for Certification through the publisher portal. Docker will - scan the image and work with you to address vulnerabilities. Docker will also - conduct a best practices review of the image. +* Submit your image for Certification through the publisher portal. Docker + scans the image and works with you to address vulnerabilities. Docker also + conducts a best practices review of the image. * Be a [TSAnet](https://www.tsanet.org/){: target="_blank" class="_"} member or join the Docker Limited Group. * Upon completion of Certification criteria, and acceptance by - Docker, Publisher’s product page will be updated to reflect Certified status. + Docker, the Publisher’s product page is updated to reflect Certified status. #### Is there a fee to join the program? @@ -371,11 +371,11 @@ the initial period. #### What is the difference between Official Images and Docker Certified? -Many Official images will transition to the Docker Certified program and will be -maintained and updated by the original owner of the software. Docker will -continue to maintain some of the base OS images and language frameworks. +Many Official images transition to the Docker Certified program and are +maintained and updated by the original owner of the software. Docker +continues to maintain some of the base OS images and language frameworks. -#### How will certification of plugins be handled? +#### How is certification of plugins handled? Docker Certification program recognizes the need to apply special scrutiny and testing to containers that access system level interfaces like storage volumes diff --git a/docker-store/publisher_faq.md b/docker-store/publisher_faq.md index 6c8cc1b63c9..f6eebdc255b 100644 --- a/docker-store/publisher_faq.md +++ b/docker-store/publisher_faq.md @@ -28,12 +28,12 @@ Start by applying to be a Docker Technology Partner at https://goto.docker.com/p * Identify content that can be listed on Store and includes a support offering * Test your image against Docker Certified Infrastructure version 17.03 and above (Plugins must run on 17.03 and above). -* Submit your image for Certification through the publisher portal. Docker will -scan the image and work with you to address vulnerabilities. Docker will also -conduct a best practices review of the image. +* Submit your image for Certification through the publisher portal. Docker +scans the image and work with you to address vulnerabilities. Docker also +conducts a best practices review of the image. * Be a TSAnet member or join the Docker Limited Group. * Upon completion of Certification criteria, and acceptance by Docker, -Publisher’s product page will be updated to reflect Certified status. +Publisher’s product page is updated to reflect Certified status. ### What is the Docker Store Publisher Program application timeline? @@ -53,10 +53,10 @@ Yes. You can submit your content as a team. Edit the same product and update with the newly tagged repos. -### On the Information page, organization details are required. Do we need to fill those in again for every product we publish, or will they be carried over? And if we change them for a later image publish, will they be updated for all images published by our organization? +### On the Information page, organization details are required. Do we need to fill those in again for every product we publish, or are they carried over? And if we change them for a later image publish, are they updated for all images published by our organization? Organization details need to be filled in only once. Updating organization info -once will update this for all images published by your organization. +once updates this for all images published by your organization. ### On the page for another vendor’s product on Docker store, I see the following chunks of data: How do these fields map to the following that are required in the publish process? @@ -87,9 +87,7 @@ once will update this for all images published by your organization. ### How can I remove a submission? I don’t want to currently have this image published as it is missing several information. If you would like your submission removed, let us know by contacting us at -publisher-support@docker.com. We are redesigning our publisher portal, and will -present this capability to you soon. ETA for ability to delete submissions that -are not yet live is Q4 2017. +publisher-support@docker.com. ### Can publishers publish multi-container apps? @@ -97,7 +95,7 @@ Yes. Publishers can provide multiple images and add a compose file in the install instructions to describe how the multi-container app can be used. For now, we recommend asking publishers to look at this example from Microsoft https://store.docker.com/images/mssql-server-linux where they have Supported -Tags listed in the Install instructions (you don't necessarily have to list it +Tags listed in the Install instructions (you don't necessarily need to list it in the readme). ### Regarding source repo tags: it says not to use “latest”. However, if we want users to be able to download the images without specifying a tag, then presumably an image tagged “latest” is required. So how do we go about that? @@ -131,7 +129,7 @@ We provide users the following options to access your software * all users (including users without Docker Identity) Here is a [screenshot](https://user-images.githubusercontent.com/2453622/32067299-00cf1210-ba83-11e7-89f8-15deed6fef62.png) to describe how publishers can update the options provided to customers. -### If something is published as a free tier, for subscribed users only, does a user have to explicitly click Accept on the license terms for which we provide the link before they can download the image? +### If something is published as a free tier, for subscribed users only, does a user need to explicitly click Accept on the license terms for which we provide the link before they can download the image? Yes ### Do you have a license enforcement system for docker images sold on store? How are they protected, once they have been downloaded? What happens if a customer stop paying for the image I am selling after, let's say, 2 months? @@ -143,10 +141,10 @@ The expectation is that the publisher would take care of License Keys within the container. The License Key itself can be presented to the customer via Docker Store. We expect the Publisher to build short circuits into the container, so the container stops running once the License Key expires. Once a customer -cancels, or if the customer subscription expires - the customer will not be able -to download updates from the Store. +cancels, or if the customer subscription expires - the customer cannot +download updates from the Store. -If a user cancels their subscription, they will not be able to download updates +If a user cancels their subscription, they cannot download updates from the Store. The container may continue running. If you have a licensing scheme built into the container, the licensing scheme can be a forcing function and stop the container. (_We do not build anything into the container, it is up to the publisher_). @@ -164,7 +162,7 @@ feedback about pricing, send us an email at publisher-support@docker.com ### As a publisher, I have not setup any payment account. How does money get to me if my commercial content gets purchased by customers? -We (Docker) will cut you a check post a revenue share. Your Docker Store Vendor +We (Docker) cut you a check post a revenue share. Your Docker Store Vendor Agreement should cover specifics. ### How does Docker handle Export control? Can individual countries be specified if differing from Docker's list of embargoed countries? @@ -183,10 +181,10 @@ Content. Go to https://store.docker.com/publisher/center and click on "Actions" for the product you'd like to view analytics for. Here is a [screenshot](https://user-images.githubusercontent.com/2453622/32352202-6e87ce6e-bfdd-11e7-8fb0-08fe5a3e8930.png). -### How will metrics differentiate between Free and Paid subscribers? +### How do metrics differentiate between Free and Paid subscribers? -The Analytics reports will contain information about the Subscriber and the -relevant product plan. You will be able to identify subscribers for each plan +The Analytics reports contain information about the Subscriber and the +relevant product plan. You can identify subscribers for each plan for each product. ### Can I preview my submission before publishing? diff --git a/docker-store/trustchain.md b/docker-store/trustchain.md index 01e3badf505..d48cc7c7d4d 100644 --- a/docker-store/trustchain.md +++ b/docker-store/trustchain.md @@ -32,7 +32,7 @@ verify completion of the process when pulling an image from Docker Store: The Docker Store has a thorough and well-defined certification process to ensure top-quality content from producers is delivered to consumers in a trusted -manner. As a producer of content, you will be required to sign your images so +manner. As a producer of content, you are required to sign your images so that Docker can verify that your content is not tampered with upon starting the image certification and publishing process as outlined below: diff --git a/edge/engine/reference/commandline/README.md b/edge/engine/reference/commandline/README.md index aff6f8f155a..e97d18a4861 100644 --- a/edge/engine/reference/commandline/README.md +++ b/edge/engine/reference/commandline/README.md @@ -16,7 +16,7 @@ The output files are composed from two sources: - The **Extended Description** and **Examples** sections are pulled into the YAML from the files in [https://github.com/moby/moby/tree/master/docs/reference/commandline](https://github.com/moby/moby/tree/master/docs/reference/commandline) Specifically, the Markdown inside the `## Description` and `## Examples` - headings are parsed. Please submit corrections to the text in that repository. + headings are parsed. Submit corrections to the text in that repository. # Updating the YAML files diff --git a/edge/index.md b/edge/index.md index 4250ae320ec..ac98f2774ed 100644 --- a/edge/index.md +++ b/edge/index.md @@ -7,7 +7,7 @@ keywords: engine, edge, installation The current Docker CE Edge release is {{ site.docker_ce_edge_version }}. The Docker CE Edge channel provides monthly releases which allow you to try new features of Docker and verify bug fixes quickly. Edge releases are only supported for one -month, and a given Edge release will not receive any updates once a new edge +month, and a given Edge release does not receive any updates once a new Edge release is available. Stable releases are not published to the Edge channel, so Linux repository users diff --git a/engine/admin/ambassador_pattern_linking.md b/engine/admin/ambassador_pattern_linking.md index 55b1366dc64..baaa85f669f 100644 --- a/engine/admin/ambassador_pattern_linking.md +++ b/engine/admin/ambassador_pattern_linking.md @@ -59,7 +59,7 @@ ambassador. The following example shows what the `svendowideit/ambassador` container does automatically (with a tiny amount of `sed`) -On the Docker host (192.168.1.52) that Redis will run on: +On the Docker host (192.168.1.52) that Redis runs on: # start actual redis server $ docker run -d --name redis crosbymichael/redis diff --git a/engine/admin/ansible.md b/engine/admin/ansible.md index 41c2a16ba84..58f1f7f2f7d 100644 --- a/engine/admin/ansible.md +++ b/engine/admin/ansible.md @@ -4,61 +4,6 @@ keywords: ansible, installation, usage, docker, documentation title: Use Ansible --- -> **Note**: -> Please note this is a community contributed installation path. - -## Requirements - -To use this guide you'll need a working installation of -[Ansible](https://www.ansible.com/) version 2.1.0 or later. - -Requirements on the host that will execute the module: - -``` -python >= 2.6 -docker-py >= 1.7.0 -Docker API >= 1.20 -``` - -## Installation - -The `docker_container` module is a core module, and will ship with -Ansible by default. - -## Usage - -Task example that pulls the latest version of the `nginx` image and -runs a container. Bind address and ports are in the example defined -as [a variable](https://docs.ansible.com/ansible/playbooks_variables.html). - -``` ---- -- name: nginx container - docker: - name: nginx - image: nginx - state: reloaded - ports: - - "{{ nginx_bind_address }}:{{ nginx_port }}:{{ nginx_port }}" - cap_drop: all - cap_add: - - setgid - - setuid - pull: always - restart_policy: on-failure - restart_policy_retry: 3 - volumes: - - /some/nginx.conf:/etc/nginx/nginx.conf:ro - tags: - - docker_container - - nginx -... -``` - -## Documentation - -The documentation for the `docker_container` module is present at -[docs.ansible.com](https://docs.ansible.com/ansible/docker_container_module.html). - -Documentation covering Docker images, networks, and services is also present -at [docs.ansible.com](https://docs.ansible.com/ansible/list_of_cloud_modules.html#docker). +Docker no longer maintains specific documentation about using Ansible from within +Docker. To use Ansible within Docker, see the +[Ansible documentation](https://www.ansible.com/integrations/containers/docker). diff --git a/engine/admin/b2d_volume_resize.md b/engine/admin/b2d_volume_resize.md index f1b2981a2c0..64e621b6554 100644 --- a/engine/admin/b2d_volume_resize.md +++ b/engine/admin/b2d_volume_resize.md @@ -81,7 +81,7 @@ as a bootable ISO, is a free download, and works well with VirtualBox. 2. Clone the VMDK image to a VDI image Boot2Docker ships with a VMDK image, which can't be resized by VirtualBox's - native tools. We will instead create a VDI volume and clone the VMDK volume to + native tools. We instead create a VDI volume and clone the VMDK volume to it. 3. Using the command line VirtualBox tools, clone the VMDK image to a VDI image: @@ -90,15 +90,15 @@ as a bootable ISO, is a free download, and works well with VirtualBox. 4. Resize the VDI volume - Choose a size that will be appropriate for your needs. If you're spinning up a - lot of containers, or your containers are particularly large, larger will be + Choose a size appropriate for your needs. If you're spinning up a + lot of containers, or your containers are particularly large, larger is better: $ vboxmanage modifyhd /full/path/to/.vdi --resize 5. Download a disk partitioning tool ISO - To resize the volume, we'll use [GParted](https://sourceforge.net/projects/gparted/files/). + To resize the volume, we use [GParted](https://sourceforge.net/projects/gparted/files/). Once you've downloaded the tool, add the ISO to the Boot2Docker VM IDE bus. You might need to create the bus before you can add the ISO. @@ -134,7 +134,7 @@ as a bootable ISO, is a free download, and works well with VirtualBox. Manually start the Boot2Docker VM in VirtualBox, and the disk partitioning ISO should start up. Using GParted, choose the **GParted Live (default settings)** option. Choose the default keyboard, language, and XWindows settings, and the - GParted tool will start up and display the VDI volume you created. Right click + GParted tool starts up and displays the VDI volume you created. Right click on the VDI and choose **Resize/Move**. diff --git a/engine/admin/chef.md b/engine/admin/chef.md index eb080b4b3e1..7ecbcb1ad63 100644 --- a/engine/admin/chef.md +++ b/engine/admin/chef.md @@ -6,65 +6,6 @@ redirect_from: title: Use Chef --- -> **Note**: -> Please note this is a community contributed installation path. - -## Requirements - -To use this guide you'll need a working installation of -[Chef](https://www.chef.io/). This cookbook supports a variety of -operating systems. - -## Installation - -The cookbook is available on the [Chef Supermarket](https://supermarket.chef.io/cookbooks/docker) and can be -installed using your favorite cookbook dependency manager. - -The source can be found on -[GitHub](https://github.com/someara/chef-docker). - -Usage ------ -- Add ```depends 'docker', '~> 2.0'``` to your cookbook's metadata.rb -- Use resources shipped in cookbook in a recipe, the same way you'd - use core Chef resources (file, template, directory, package, etc). - -```ruby -docker_service 'default' do - action [:create, :start] -end - -docker_image 'busybox' do - action :pull -end - -docker_container 'an echo server' do - repo 'busybox' - port '1234:1234' - command "nc -ll -p 1234 -e /bin/cat" -end -``` - -## Getting started -Here's a quick example of pulling the latest image and running a -container with exposed ports. - -```ruby -# Pull latest image -docker_image 'nginx' do - tag 'latest' - action :pull -end - -# Run container exposing ports -docker_container 'my_nginx' do - repo 'nginx' - tag 'latest' - port '80:80' - binds [ '/some/local/files/:/etc/nginx/conf.d' ] - host_name 'www' - domain_name 'computers.biz' - env 'FOO=bar' - subscribes :redeploy, 'docker_image[nginx]' -end -``` +Docker no longer maintains specific documentation about using Chef from within +Docker. To use Chef within Docker, see the +[Chef documentation](https://supermarket.chef.io/cookbooks/docker). diff --git a/engine/admin/dsc.md b/engine/admin/dsc.md index 2b75ee72a4a..691fdb4ba24 100644 --- a/engine/admin/dsc.md +++ b/engine/admin/dsc.md @@ -14,7 +14,7 @@ configured. More information about PowerShell DSC can be found at ## Requirements -To use this guide you'll need a Windows host with PowerShell v4.0 or newer. +To use this guide you need a Windows host with PowerShell v4.0 or newer. The included DSC configuration script also uses the official PPA so only an Ubuntu target is supported. The Ubuntu target must already have the diff --git a/engine/admin/index.md b/engine/admin/index.md index 0d3c05a3be2..1f300d7e256 100644 --- a/engine/admin/index.md +++ b/engine/admin/index.md @@ -87,7 +87,7 @@ documentation. Some places to go next include: If you use a `daemon.json` file and also pass options to the `dockerd` command manually or using start-up scripts, and these options conflict, -Docker will fail to start with an error such as: +Docker fails to start with an error such as: ```none unable to configure the Docker daemon with file /etc/docker/daemon.json: @@ -127,7 +127,7 @@ There are other times when you might need to configure `systemd` with Docker, su [configuring a HTTP or HTTPS proxy](https://docs.docker.com/engine/admin/systemd/#httphttps-proxy). > **Note**: If you override this option and then do not specify a `hosts` entry in the `daemon.json` -> or a `-H` flag when starting Docker manually, Docker will fail to start. +> or a `-H` flag when starting Docker manually, Docker fails to start. Run `sudo systemctl daemon-reload` before attempting to start Docker. If Docker starts successfully, it is now listening on the IP address specified in the `hosts` key of the @@ -209,7 +209,7 @@ Docker platform. Instead of following this procedure, you can also stop the Docker daemon and restart it manually with the `-D` flag. However, this may result in Docker restarting with a different environment than the one the hosts' startup scripts -will create, and this may make debugging more difficult. +create, and this may make debugging more difficult. ### Force a stack trace to be logged @@ -228,11 +228,11 @@ by sending a `SIGUSR1` signal to the daemon. Run the executable with the flag `--pid=`. -This will force a stack trace to be logged but will not stop the daemon. -Daemon logs will show the stack trace or the path to a file containing the +This forces a stack trace to be logged but does not stop the daemon. +Daemon logs show the stack trace or the path to a file containing the stack trace if it was logged to a file. -The daemon will continue operating after handling the `SIGUSR1` signal and +The daemon continues operating after handling the `SIGUSR1` signal and dumping the stack traces to the log. The stack traces can be used to determine the state of all goroutines and threads within the daemon. @@ -258,7 +258,7 @@ Look in the Docker logs for a message like the following: ``` The locations where Docker saves these stack traces and dumps depends on your -operating system and configuration. You may be able to get useful diagnostic +operating system and configuration. You can sometimes get useful diagnostic information straight from the stack traces and dumps. Otherwise, you can provide this information to Docker for help diagnosing the problem. diff --git a/engine/admin/live-restore.md b/engine/admin/live-restore.md index 5501597ce85..6ef0258bd6d 100644 --- a/engine/admin/live-restore.md +++ b/engine/admin/live-restore.md @@ -31,7 +31,7 @@ Use your favorite editor to enable the `live-restore` option in the } ``` -You have to send a `SIGHUP` signal to the daemon process for it to reload the +You need to send a `SIGHUP` signal to the daemon process for it to reload the configuration. For more information on how to configure the Docker daemon using `daemon.json`, see [daemon configuration file](../reference/commandline/dockerd.md#daemon-configuration-file). @@ -47,7 +47,7 @@ The live restore feature supports restoring containers to the daemon for upgrades from one minor release to the next. For example from Docker Engine 1.12.1 to 1.12.2. -If you skip releases during an upgrade, the daemon may not restore its connection to the containers. If the daemon is unable to restore the connection, it ignores the running containers and you must manage them manually. +If you skip releases during an upgrade, the daemon may not restore its connection to the containers. If the daemon can't restore the connection, it ignores the running containers and you must manage them manually. ## Live restore upon restart @@ -59,7 +59,7 @@ the daemon restarts with a different bridge IP or a different graphdriver. A lengthy absence of the daemon can impact running containers. The containers process writes to FIFO logs for daemon consumption. If the daemon is unavailable -to consume the output, the buffer will fill up and block further writes to the +to consume the output, the buffer fills up and blocks further writes to the log. A full log blocks the process until further space is available. The default buffer size is typically 64K. diff --git a/engine/admin/logging/awslogs.md b/engine/admin/logging/awslogs.md index b2e1bef5a48..2286fb1a1c0 100644 --- a/engine/admin/logging/awslogs.md +++ b/engine/admin/logging/awslogs.md @@ -77,7 +77,7 @@ specified, the container ID is used as the log stream. ### awslogs-create-group -Log driver will return an error by default if the log group does not exist. However, you can set the +Log driver returns an error by default if the log group does not exist. However, you can set the `awslogs-create-group` to `true` to automatically create the log group as needed. The `awslogs-create-group` option defaults to `false`. @@ -135,7 +135,7 @@ $ docker run --log-driver=awslogs \ ... ``` -This will parse the logs into the following CloudWatch log events: +This parses the logs into the following CloudWatch log events: ```none # First event @@ -209,7 +209,7 @@ $ docker run --log-driver=awslogs \ ... ``` -This will parse the logs into the following CloudWatch log events: +This parses the logs into the following CloudWatch log events: ```none # First event @@ -225,10 +225,10 @@ INFO Another message was logged ### tag -Specify `tag` as an alternative to the `awslogs-stream` option. `tag` interprets template markup (e.g., `{% raw %}{{.ID}}{% endraw %}`, `{% raw %}{{.FullID}}{% endraw %}` or `{% raw %}{{.Name}}{% endraw %}` `{% raw %}docker.{{.ID}}{% endraw %}`). +Specify `tag` as an alternative to the `awslogs-stream` option. `tag` interprets Go template markup, such as `{% raw %}{{.ID}}{% endraw %}`, `{% raw %}{{.FullID}}{% endraw %}` or `{% raw %}{{.Name}}{% endraw %}` `{% raw %}docker.{{.ID}}{% endraw %}`. See the [tag option documentation](log_tags.md) for details on all supported template substitutions. -When both `awslogs-stream` and `tag` are specified, the value supplied for `awslogs-stream` will override the template specified with `tag`. +When both `awslogs-stream` and `tag` are specified, the value supplied for `awslogs-stream` overrides the template specified with `tag`. If not specified, the container ID is used as the log stream. @@ -237,7 +237,7 @@ If not specified, the container ID is used as the log stream. > The CloudWatch log API doesn't support `:` in the log name. This can cause some issues when using the `{{ .ImageName }}` as a tag, since a docker image has a format of `IMAGE:TAG`, such as `alpine:latest`. > Template markup can be used to get the proper format. > To get the image name and the first 12 characters of the container ID, you can use: `--log-opt tag='{{ with split .ImageName ":" }}{{join . "_"}}{{end}}-{{.ID}}'` -> the output will be something like: `alpine_latest-bf0072049c76` +> the output is something like: `alpine_latest-bf0072049c76` {% endraw %} diff --git a/engine/admin/logging/etwlogs.md b/engine/admin/logging/etwlogs.md index 87bb7e09610..d5975e10226 100644 --- a/engine/admin/logging/etwlogs.md +++ b/engine/admin/logging/etwlogs.md @@ -26,10 +26,10 @@ included in most installations of Windows: 2. Run your container(s) with the etwlogs driver, by adding `--log-driver=etwlogs` to the Docker run command, and generate log messages. 3. `logman stop -ets DockerContainerLogs` - 4. This will generate an etl file that contains the events. One way to convert this file into + 4. This generates an etl file that contains the events. One way to convert this file into human-readable form is to run: `tracerpt -y trace.etl`. -Each ETW event will contain a structured message string in this format: +Each ETW event contains a structured message string in this format: container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: [stdout | stderr], log: %s @@ -54,7 +54,7 @@ Here is an example event message: log: Hello world! A client can parse this message string to get both the log message, as well as its -context information. Note that the time stamp is also available within the ETW event. +context information. The timestamp is also available within the ETW event. > **Note**: This ETW provider emits only a message string, and not a specially > structured ETW event. Therefore, it is not required to register a manifest file diff --git a/engine/admin/logging/gcplogs.md b/engine/admin/logging/gcplogs.md index f0c1c4ebd1c..924839e7ba1 100644 --- a/engine/admin/logging/gcplogs.md +++ b/engine/admin/logging/gcplogs.md @@ -39,12 +39,12 @@ You can set the logging driver for a specific container by using the This log driver does not implement a reader so it is incompatible with `docker logs`. -If Docker detects that it is running in a Google Cloud Project, it will discover +If Docker detects that it is running in a Google Cloud Project, it discovers configuration from the [instance metadata service](https://cloud.google.com/compute/docs/metadata){: target="_blank" class="_"}. Otherwise, the user must specify -which project to log to using the `--gcp-project` log option and Docker will -attempt to obtain credentials from the +which project to log to using the `--gcp-project` log option and Docker +attempts to obtain credentials from the [Google Application Default Credential](https://developers.google.com/identity/protocols/application-default-credentials){: target="_blank" class="_"}. The `--gcp-project` flag takes precedence over information discovered from the metadata server so a Docker daemon running in a Google Cloud Project can be @@ -52,7 +52,7 @@ overridden to log to a different Google Cloud Project using `--gcp-project`. Docker fetches the values for zone, instance name and instance ID from Google Cloud metadata server. Those values can be provided via options if metadata -server is not available. They will not override the values from metadata server. +server is not available. They do not override the values from metadata server. ## gcplogs options diff --git a/engine/admin/logging/gelf.md b/engine/admin/logging/gelf.md index e91f3018df3..98b80bd27b9 100644 --- a/engine/admin/logging/gelf.md +++ b/engine/admin/logging/gelf.md @@ -80,8 +80,8 @@ The `gelf` logging driver supports the following options: | `gelf-tcp-max-reconnect` | optional | `TCP Only` The maximum number of reconnection attempts when the connection drop. An positive integer. Default value is 3. | `--log-opt gelf-tcp-max-reconnect=3` | | `gelf-tcp-reconnect-delay` | optional | `TCP Only` The number of seconds to wait between reconnection attempts. A positive integer. Default value is 1. | `--log-opt gelf-tcp-reconnect-delay=1` | | `tag` | optional | A string that is appended to the `APP-NAME` in the `gelf` message. By default, Docker uses the first 12 characters of the container ID to tag log messages. Refer to the [log tag option documentation](log_tags.md) for customizing the log tag format. | `--log-opt tag=mailer` | -| `labels` | optional | Applies when starting the Docker daemon. A comma-separated list of logging-related labels this daemon will accept. Adds additional key on the `extra` fields, prefixed by an underscore (`_`). Used for advanced [log tag options](log_tags.md). | `--log-opt labels=production_status,geo` | -| `env` | optional | Applies when starting the Docker daemon. A comma-separated list of logging-related environment variables this daemon will accept. Adds additional key on the `extra` fields, prefixed by an underscore (`_`). Used for advanced [log tag options](log_tags.md). | `--log-opt env=os,customer` | +| `labels` | optional | Applies when starting the Docker daemon. A comma-separated list of logging-related labels this daemon accepts. Adds additional key on the `extra` fields, prefixed by an underscore (`_`). Used for advanced [log tag options](log_tags.md). | `--log-opt labels=production_status,geo` | +| `env` | optional | Applies when starting the Docker daemon. A comma-separated list of logging-related environment variables this daemon accepts. Adds additional key on the `extra` fields, prefixed by an underscore (`_`). Used for advanced [log tag options](log_tags.md). | `--log-opt env=os,customer` | | `env-regex` | optional | Similar to and compatible with `env`. A regular expression to match logging-related environment variables. Used for advanced [log tag options](log_tags.md). | `--log-opt env-regex=^(os | customer).` | ### Examples diff --git a/engine/admin/logging/journald.md b/engine/admin/logging/journald.md index 53341d44079..b9df450ef0c 100644 --- a/engine/admin/logging/journald.md +++ b/engine/admin/logging/journald.md @@ -78,7 +78,7 @@ This configuration also directs the driver to include in the payload the label l The value logged in the `CONTAINER_NAME` field is the name of the container that was set at startup. If you use `docker rename` to rename a container, the new -name **is not reflected** in the journal entries. Journal entries will continue +name **is not reflected** in the journal entries. Journal entries continue to use the original name. ## Retrieve log messages with `journalctl` diff --git a/engine/admin/logging/json-file.md b/engine/admin/logging/json-file.md index 2ee826ae1ee..6a22baf8aea 100644 --- a/engine/admin/logging/json-file.md +++ b/engine/admin/logging/json-file.md @@ -33,7 +33,7 @@ option. } ``` -Restart Docker for the changes to take effect for newly created containers. Existing containers will not use the new logging configuration. +Restart Docker for the changes to take effect for newly created containers. Existing containers do not use the new logging configuration. You can set the logging driver for a specific container by using the `--log-driver` flag to `docker create` or `docker run`: @@ -52,8 +52,8 @@ The `json-file` logging driver supports the following logging options: |:------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------| | `max-size` | The maximum size of the log before it is rolled. A positive integer plus a modifier representing the unit of measure (`k`, `m`, or `g`). Defaults to -1 (unlimited). | `--log-opt max-size=10m` | | `max-file` | The maximum number of log files that can be present. If rolling the logs creates excess files, the oldest file is removed. **Only effective when `max-size` is also set.** A positive integer. Defaults to 1. | `--log-opt max-file=3` | -| `labels` | Applies when starting the Docker daemon. A comma-separated list of logging-related labels this daemon will accept. Used for advanced [log tag options](log_tags.md). | `--log-opt labels=production_status,geo` | -| `env` | Applies when starting the Docker daemon. A comma-separated list of logging-related environment variables this daemon will accept. Used for advanced [log tag options](log_tags.md). | `--log-opt env=os,customer` | +| `labels` | Applies when starting the Docker daemon. A comma-separated list of logging-related labels this daemon accepts. Used for advanced [log tag options](log_tags.md). | `--log-opt labels=production_status,geo` | +| `env` | Applies when starting the Docker daemon. A comma-separated list of logging-related environment variables this daemon accepts. Used for advanced [log tag options](log_tags.md). | `--log-opt env=os,customer` | | `env-regex` | Similar to and compatible with `env`. A regular expression to match logging-related environment variables. Used for advanced [log tag options](log_tags.md). | `--log-opt env-regex=^(os|customer).` | diff --git a/engine/admin/logging/logentries.md b/engine/admin/logging/logentries.md index e37171f6a62..033e3d9ac01 100644 --- a/engine/admin/logging/logentries.md +++ b/engine/admin/logging/logentries.md @@ -28,9 +28,8 @@ To set the logging driver for a specific container, pass the $ docker run --log-driver=logentries ... ``` -Before using this logging driver, you'll need to create a new Log Set in the -Logentries web interface. Then, you'll need to pass the token of that log set -to Docker: +Before using this logging driver, you need to create a new Log Set in the +Logentries web interface and pass the token of that log set to Docker: ```bash $ docker run --log-driver=logentries --log-opt logentries-token=abcd1234-12ab-34cd-5678-0123456789ab diff --git a/engine/admin/logging/overview.md b/engine/admin/logging/overview.md index c7dd2e90d80..59b256d0a58 100644 --- a/engine/admin/logging/overview.md +++ b/engine/admin/logging/overview.md @@ -96,7 +96,7 @@ Docker provides two modes for delivering messages from the container to the log * (default) direct, blocking delivery from container to driver * non-blocking delivery that stores log messages in an intermediate per-container ring buffer for consumption by driver -The `non-blocking` message delivery mode prevents applications from blocking due to logging back pressure. Applications will likely fail in unexpected ways when STDERR or STDOUT streams block. +The `non-blocking` message delivery mode prevents applications from blocking due to logging back pressure. Applications are likely to fail in unexpected ways when STDERR or STDOUT streams block. > **WARNING**: When the buffer is full and a new message is enqueued, the oldest message in memory is dropped. Dropping messages is often preferred to blocking the log-writing process of an application. {: .warning} @@ -138,7 +138,7 @@ see more options. | Driver | Description | |:------------------------------|:--------------------------------------------------------------------------------------------------------------| -| `none` | No logs will be available for the container and `docker logs` will not return any output. | +| `none` | No logs are available for the container and `docker logs` does not return any output. | | [`json-file`](json-file.md) | The logs are formatted as JSON. The default logging driver for Docker. | | [`syslog`](syslog.md) | Writes logging messages to the `syslog` facility. The `syslog` daemon must be running on the host machine. | | [`journald`](journald.md) | Writes log messages to `journald`. The `journald` daemon must be running on the host machine. | diff --git a/engine/admin/logging/splunk.md b/engine/admin/logging/splunk.md index 9fc0330302a..b0cfe8a951e 100644 --- a/engine/admin/logging/splunk.md +++ b/engine/admin/logging/splunk.md @@ -59,7 +59,7 @@ The following properties let you configure the splunk logging driver. | `splunk-sourcetype` | optional | Event source type. | | `splunk-index` | optional | Event index. | | `splunk-capath` | optional | Path to root certificate. | -| `splunk-caname` | optional | Name to use for validating server certificate; by default the hostname of the `splunk-url` will be used. | +| `splunk-caname` | optional | Name to use for validating server certificate; by default the hostname of the `splunk-url` is used. | | `splunk-insecureskipverify` | optional | Ignore server certificate validation. | | `splunk-format` | optional | Message format. Can be `inline`, `json` or `raw`. Defaults to `inline`. | | `splunk-verify-connection` | optional | Verify on start, that docker can connect to Splunk server. Defaults to true. | @@ -176,7 +176,7 @@ Splunk Logging Driver allows you to configure few advanced options by specifying | Environment variable name | Default value | Description | |:-------------------------------------------------|:--------------|:---------------------------------------------------------------------------------------------------------------------------------------------------| -| `SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY` | `5s` | If there is nothing to batch how often driver will post messages. You can think about this as the maximum time to wait for more messages to batch. | +| `SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY` | `5s` | If there is nothing to batch how often driver posts messages. You can think about this as the maximum time to wait for more messages to batch. | | `SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE` | `1000` | How many messages driver should wait before sending them in one batch. | | `SPLUNK_LOGGING_DRIVER_BUFFER_MAX` | `10 * 1000` | If driver cannot connect to remote server, what is the maximum amount of messages it can hold in buffer for retries. | | `SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE` | `4 * 1000` | How many pending messages can be in the channel which is used to send messages to background logger worker, which batches them. | diff --git a/engine/admin/logging/syslog.md b/engine/admin/logging/syslog.md index 6d96b0fafc9..9cba309365e 100644 --- a/engine/admin/logging/syslog.md +++ b/engine/admin/logging/syslog.md @@ -82,6 +82,6 @@ starting the container. | `syslog-tls-skip-verify` | If set to `true`, TLS verification is skipped when connecting to the `syslog` daemon. Defaults to `false`. **Ignored if the address protocol is not `tcp+tls`.** | `--log-opt syslog-tls-skip-verify=true` | | `tag` | A string that is appended to the `APP-NAME` in the `syslog` message. By default, Docker uses the first 12 characters of the container ID to tag log messages. Refer to the [log tag option documentation](log_tags.md) for customizing the log tag format. | `--log-opt tag=mailer` | | `syslog-format` | The `syslog` message format to use. If not specified the local UNIX syslog format is used, without a specified hostname. Specify `rfc3164` for the RFC-3164 compatible format, `rfc5424` for RFC-5424 compatible format, or `rfc5424micro` for RFC-5424 compatible format with microsecond timestamp resolution. | `--log-opt syslog-format=rfc5424micro` | -| `labels` | Applies when starting the Docker daemon. A comma-separated list of logging-related labels this daemon will accept. Used for advanced [log tag options](log_tags.md). | `--log-opt labels=production_status,geo` | -| `env` | Applies when starting the Docker daemon. A comma-separated list of logging-related environment variables this daemon will accept. Used for advanced [log tag options](log_tags.md). | `--log-opt env=os,customer` | +| `labels` | Applies when starting the Docker daemon. A comma-separated list of logging-related labels this daemon accepts. Used for advanced [log tag options](log_tags.md). | `--log-opt labels=production_status,geo` | +| `env` | Applies when starting the Docker daemon. A comma-separated list of logging-related environment variables this daemon accepts. Used for advanced [log tag options](log_tags.md). | `--log-opt env=os,customer` | | `env-regex` | Applies when starting the Docker daemon. Similar to and compatible with `env`. A regular expression to match logging-related environment variables. Used for advanced [log tag options](log_tags.md). | `--log-opt env-regex=^(os\|customer)` | diff --git a/engine/admin/multi-service_container.md b/engine/admin/multi-service_container.md index 9670562bf70..1ab3d1c71eb 100644 --- a/engine/admin/multi-service_container.md +++ b/engine/admin/multi-service_container.md @@ -54,16 +54,16 @@ this in a few different ways. # Naive check runs checks once a minute to see if either of the processes exited. # This illustrates part of the heavy lifting you need to do if you want to run - # more than one service in a container. The container will exit with an error + # more than one service in a container. The container exits with an error # if it detects that either of the processes has exited. - # Otherwise it will loop forever, waking up every 60 seconds + # Otherwise it loops forever, waking up every 60 seconds while sleep 60; do ps aux |grep my_first_process |grep -q -v grep PROCESS_1_STATUS=$? ps aux |grep my_second_process |grep -q -v grep PROCESS_2_STATUS=$? - # If the greps above find anything, they will exit with 0 status + # If the greps above find anything, they exit with 0 status # If they are not both 0, then something is wrong if [ $PROCESS_1_STATUS -ne 0 -o $PROCESS_2_STATUS -ne 0 ]; then echo "One of the processes has already exited." @@ -85,7 +85,7 @@ this in a few different ways. - Use a process manager like `supervisord`. This is a moderately heavy-weight approach that requires you to package `supervisord` and its configuration in your image (or base your image on one that includes `supervisord`), along with - the different applications it will manage. Then you start `supervisord`, which + the different applications it manages. Then you start `supervisord`, which manages your processes for you. Here is an example Dockerfile using this approach, that assumes the pre-written `supervisord.conf`, `my_first_process`, and `my_second_process` files all exist in the same directory as your diff --git a/engine/admin/prometheus.md b/engine/admin/prometheus.md index db1dedacc52..092a72b0e57 100644 --- a/engine/admin/prometheus.md +++ b/engine/admin/prometheus.md @@ -231,7 +231,7 @@ Verify that the Docker target is listed at http://localhost:9090/targets/. ![Prometheus targets page](images/prometheus-targets.png) -You will not be able to access the endpoint URLs directly if you use Docker +You can't access the endpoint URLs directly if you use Docker for Mac or Docker for Windows. ## Use Prometheus diff --git a/engine/admin/puppet.md b/engine/admin/puppet.md index 7629a60ddee..b6166e3751d 100644 --- a/engine/admin/puppet.md +++ b/engine/admin/puppet.md @@ -6,90 +6,6 @@ redirect_from: title: Use Puppet --- -> **Note**: Please note this is a community contributed installation path. The -> only `official` installation is using the -> [*Ubuntu*](../installation/linux/ubuntulinux.md) installation -> path. This version may sometimes be out of date. - -## Requirements - -To use this guide you'll need a working installation of Puppet from -[Puppet Labs](https://puppetlabs.com) . - -The module also currently uses the official PPA so only works with -Ubuntu. - -## Installation - -The module is available on the [Puppet -Forge](https://forge.puppetlabs.com/garethr/docker/) and can be -installed using the built-in module tool. - - $ puppet module install garethr/docker - -It can also be found on -[GitHub](https://github.com/garethr/garethr-docker) if you would rather -download the source. - -## Usage - -The module provides a puppet class for installing Docker and two defined -types for managing images and containers. - -### Installation - - include 'docker' - -### Images - -The next step is probably to install a Docker image. For this, we have a -defined type which can be used like so: - - docker::image { 'ubuntu': } - -This is equivalent to running: - - $ docker pull ubuntu - -Note that it will only be downloaded if an image of that name does not -already exist. This is downloading a large binary so on first run can -take a while. For that reason this define turns off the default 5 minute -timeout for the exec type. Note that you can also remove images you no -longer need with: - - docker::image { 'ubuntu': - ensure => 'absent', - } - -### Containers - -Now you have an image where you can run commands within a container -managed by Docker. - - docker::run { 'helloworld': - image => 'ubuntu', - command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"', - } - -This is equivalent to running the following command, but under upstart: - - $ docker run -d ubuntu /bin/sh -c "while true; do echo hello world; sleep 1; done" - -Run also contains a number of optional parameters: - - docker::run { 'helloworld': - image => 'ubuntu', - command => '/bin/sh -c "while true; do echo hello world; sleep 1; done"', - ports => ['4444', '4555'], - volumes => ['/var/lib/couchdb', '/var/log'], - volumes_from => '6446ea52fbc9', - memory_limit => 10485760, # bytes - username => 'example', - hostname => 'example.com', - env => ['FOO=BAR', 'FOO2=BAR2'], - dns => ['8.8.8.8', '8.8.4.4'], - } - -> **Note**: -> The `ports`, `env`, `dns`, and `volumes` attributes can be set with either a single -> string or as above with an array of values. +Docker no longer maintains documentation specific to using Puppet within +Docker. To use Puppet within Docker, see +[Puppet's documentation](https://puppet.com/blog/puppet-docker-running-puppet-container-centric-infrastructure). diff --git a/engine/admin/resource_constraints.md b/engine/admin/resource_constraints.md index fdd886c8db3..b498852761e 100644 --- a/engine/admin/resource_constraints.md +++ b/engine/admin/resource_constraints.md @@ -7,7 +7,7 @@ keywords: "docker, daemon, configuration" --- By default, a container has no resource constraints and can use as much of a -given resource as the host's kernel scheduler will allow. Docker provides ways +given resource as the host's kernel scheduler allows. Docker provides ways to control how much memory, CPU, or block IO a container can use, setting runtime configuration flags of the `docker run` command. This section provides details on when you should set such limits and the possible implications of setting them. @@ -32,16 +32,16 @@ Consult your operating system's documentation for enabling them. It is important not to allow a running container to consume too much of the host machine's memory. On Linux hosts, if the kernel detects that there is not enough memory to perform important system functions, it throws an `OOME`, or -`Out Of Memory Exception`, and starts killing processes in order to free up +`Out Of Memory Exception`, and starts killing processes to free up memory. Any process is subject to killing, including Docker and other important applications. This can effectively bring the entire system down if the wrong process is killed. Docker attempts to mitigate these risks by adjusting the OOM priority on the -Docker daemon so that it will be less likely to be killed than other processes +Docker daemon so that it is less likely to be killed than other processes on the system. The OOM priority on containers is not adjusted. This makes it more -likely that an individual container will be killed than that the Docker daemon -or other system processes will be killed. You should not try to circumvent +likely for an individual container to be killed than for the Docker daemon +or other system processes to be killed. You should not try to circumvent these safeguards by manually setting `--oom-score-adj` to an extreme negative number on the daemon or a container, or by setting `--oom-disable-kill` on a container. @@ -79,7 +79,7 @@ Most of these options take a positive integer, followed by a suffix of `b`, `k`, | `-m` or `--memory=` | The maximum amount of memory the container can use. If you set this option, the minimum allowed value is `4m` (4 megabyte). | | `--memory-swap`* | The amount of memory this container is allowed to swap to disk. See [`--memory-swap` details](resource_constraints.md#--memory-swap-details). | | `--memory-swappiness` | By default, the host kernel can swap out a percentage of anonymous pages used by a container. You can set `--memory-swappiness` to a value between 0 and 100, to tune this percentage. See [`--memory-swappiness` details](resource_constraints.md#--memory-swappiness-details). | -| `--memory-reservation` | Allows you to specify a soft limit smaller than `--memory` which is activated when Docker detects contention or low memory on the host machine. If you use `--memory-reservation`, it must be set lower than `--memory` in order for it to take precedence. Because it is a soft limit, it does not guarantee that the container will not exceed the limit. | +| `--memory-reservation` | Allows you to specify a soft limit smaller than `--memory` which is activated when Docker detects contention or low memory on the host machine. If you use `--memory-reservation`, it must be set lower than `--memory` for it to take precedence. Because it is a soft limit, it does not guarantee that the container doesn't exceed the limit. | | `--kernel-memory` | The maximum amount of kernel memory the container can use. The minimum allowed value is `4m`. Because kernel memory cannot be swapped out, a container which is starved of kernel memory may block host machine resources, which can have side effects on the host machine and on other containers. See [`--kernel-memory` details](resource_constraints.md#--kernel-memory-details). | | `--oom-kill-disable` | By default, if an out-of-memory (OOM) error occurs, the kernel kills processes in a container. To change this behavior, use the `--oom-kill-disable` option. Only disable the OOM killer on containers where you have also set the `-m/--memory` option. If the `-m` flag is not set, the host can run out of memory and the kernel may need to kill the host system's processes to free memory. | @@ -105,7 +105,7 @@ Its setting can have complicated effects: treated as unset. - If `--memory-swap` is set to the same value as `--memory`, and `--memory` is - set to a positive integer, **the container will not have access to swap**. + set to a positive integer, **the container does not have access to swap**. See [Prevent a container from using swap](#prevent-a-container-from-using-swap). @@ -119,7 +119,7 @@ Its setting can have complicated effects: #### Prevent a container from using swap -If `--memory` and `--memory-swap` are set to the same value, this will prevent +If `--memory` and `--memory-swap` are set to the same value, this prevents containers from using any swap. This is because `--memory-swap` is the amount of combined memory and swap that can be used, while `--memory` is only the amount of physical memory that can be used. @@ -147,12 +147,12 @@ a container. Consider the following scenarios: limited, but the kernel memory is not. - **Limited memory, limited kernel memory**: Limiting both user and kernel memory can be useful for debugging memory-related problems. If a container - is using an unexpected amount of either type of memory, it will run out + is using an unexpected amount of either type of memory, it runs out of memory without affecting other containers or the host machine. Within this setting, if the kernel memory limit is lower than the user memory - limit, running out of kernel memory will cause the container to experience + limit, running out of kernel memory causes the container to experience an OOM error. If the kernel memory limit is higher than the user memory - limit, the kernel limit will not cause the container to experience an OOM. + limit, the kernel limit does not cause the container to experience an OOM. When you turn on any kernel memory limits, the host machine tracks "high water mark" statistics on a per-process basis, so you can track which processes (in @@ -163,7 +163,7 @@ by viewing `/proc//status` on the host machine. By default, each container's access to the host machine's CPU cycles is unlimited. You can set various constraints to limit a given container's access to the host -machine's CPU cycles. Most users will use and configure the +machine's CPU cycles. Most users use and configure the [default CFS scheduler](#configure-the-default-cfs-scheduler). In Docker 1.13 and higher, you can also configure the [realtime scheduler](#configure-the-realtime-scheduler). @@ -177,13 +177,13 @@ the container's cgroup on the host machine. | Option | Description | |:-----------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `--cpus=` | Specify how much of the available CPU resources a container can use. For instance, if the host machine has two CPUs and you set `--cpus="1.5"`, the container will be guaranteed to be able to access at most one and a half of the CPUs. This is the equivalent of setting `--cpu-period="100000"` and `--cpu-quota="150000"`. Available in Docker 1.13 and higher. | +| `--cpus=` | Specify how much of the available CPU resources a container can use. For instance, if the host machine has two CPUs and you set `--cpus="1.5"`, the container is guaranteed at most one and a half of the CPUs. This is the equivalent of setting `--cpu-period="100000"` and `--cpu-quota="150000"`. Available in Docker 1.13 and higher. | | `--cpu-period=` | Specify the CPU CFS scheduler period, which is used alongside `--cpu-quota`. Defaults to 100 micro-seconds. Most users do not change this from the default. If you use Docker 1.13 or higher, use `--cpus` instead. | | `--cpu-quota=` | Impose a CPU CFS quota on the container. The number of microseconds per `--cpu-period` that the container is guaranteed CPU access. In other words, `cpu-quota / cpu-period`. If you use Docker 1.13 or higher, use `--cpus` instead. | | `--cpuset-cpus` | Limit the specific CPUs or cores a container can use. A comma-separated list or hyphen-separated range of CPUs a container can use, if you have more than one CPU. The first CPU is numbered 0. A valid value might be `0-3` (to use the first, second, third, and fourth CPU) or `1,3` (to use the second and fourth CPU). | | `--cpu-shares` | Set this flag to a value greater or less than the default of 1024 to increase or reduce the container's weight, and give it access to a greater or lesser proportion of the host machine's CPU cycles. This is only enforced when CPU cycles are constrained. When plenty of CPU cycles are available, all containers use as much CPU as they need. In that way, this is a soft limit. `--cpu-shares` does not prevent containers from being scheduled in swarm mode. It prioritizes container CPU resources for the available CPU cycles. It does not guarantee or reserve any specific CPU access. | -If you have 1 CPU, each of the following commands will guarantee the container at +If you have 1 CPU, each of the following commands guarantees the container at most 50% of the CPU every second. **Docker 1.13 and higher**: @@ -254,4 +254,4 @@ $ docker run --it --cpu-rt-runtime=950000 \ debian:jessie ``` -If the kernel or Docker daemon is not configured correctly, an error will occur. +If the kernel or Docker daemon is not configured correctly, an error occurs. diff --git a/engine/admin/runmetrics.md b/engine/admin/runmetrics.md index 57502f328dc..174dc0bf409 100644 --- a/engine/admin/runmetrics.md +++ b/engine/admin/runmetrics.md @@ -37,13 +37,13 @@ containers, as well as for Docker containers. Control groups are exposed through a pseudo-filesystem. In recent distros, you should find this filesystem under `/sys/fs/cgroup`. Under -that directory, you will see multiple sub-directories, called devices, +that directory, you see multiple sub-directories, called devices, freezer, blkio, etc.; each sub-directory actually corresponds to a different cgroup hierarchy. On older systems, the control groups might be mounted on `/cgroup`, without distinct hierarchies. In that case, instead of seeing the sub-directories, -you will see a bunch of files in that directory, and possibly some directories +you see a bunch of files in that directory, and possibly some directories corresponding to existing containers. To figure out where your control groups are mounted, you can run: @@ -58,19 +58,19 @@ You can look into `/proc/cgroups` to see the different control group subsystems known to the system, the hierarchy they belong to, and how many groups they contain. You can also look at `/proc//cgroup` to see which control groups a process -belongs to. The control group will be shown as a path relative to the root of -the hierarchy mountpoint; e.g., `/` means "this process has not been assigned into -a particular group", while `/lxc/pumpkin` means that the process is likely to be -a member of a container named `pumpkin`. +belongs to. The control group is shown as a path relative to the root of +the hierarchy mountpoint. `/` means the process has not been assigned to a +group, while `/lxc/pumpkin` indicates that the process is a member of a +container named `pumpkin`. ## Finding the cgroup for a given container -For each container, one cgroup will be created in each hierarchy. On +For each container, one cgroup is created in each hierarchy. On older systems with older versions of the LXC userland tools, the name of -the cgroup will be the name of the container. With more recent versions -of the LXC tools, the cgroup will be `lxc/.` +the cgroup is the name of the container. With more recent versions +of the LXC tools, the cgroup is `lxc/.` -For Docker containers using cgroups, the container name will be the full +For Docker containers using cgroups, the container name is the full ID or long ID of the container. If a container shows up as ae836c95b4c3 in `docker ps`, its long ID might be something like `ae836c95b4c3c9e9179e0e91015512da89fdec91612f63cebae57df9a5444c79`. You can @@ -81,12 +81,12 @@ container, take a look at `/sys/fs/cgroup/memory/docker//`. ## Metrics from cgroups: memory, CPU, block I/O -For each subsystem (memory, CPU, and block I/O), you will find one or -more pseudo-files containing statistics. +For each subsystem (memory, CPU, and block I/O), one or +more pseudo-files exist and contain statistics. ### Memory metrics: `memory.stat` -Memory metrics are found in the "memory" cgroup. Note that the memory +Memory metrics are found in the "memory" cgroup. The memory control group adds a little overhead, because it does very fine-grained accounting of the memory usage on your host. Therefore, many distros chose to not enable it by default. Generally, to enable it, all you have @@ -94,7 +94,7 @@ to do is to add some kernel command-line parameters: `cgroup_enable=memory swapaccount=1`. The metrics are in the pseudo-file `memory.stat`. -Here is what it will look like: +Here is what it looks like: cache 11492564992 rss 1930993664 @@ -129,58 +129,55 @@ The first half (without the `total_` prefix) contains statistics relevant to the processes within the cgroup, excluding sub-cgroups. The second half (with the `total_` prefix) includes sub-cgroups as well. -Some metrics are "gauges", i.e., values that can increase or decrease -(e.g., swap, the amount of swap space used by the members of the cgroup). -Some others are "counters", i.e., values that can only go up, because -they represent occurrences of a specific event (e.g., pgfault, which -indicates the number of page faults which happened since the creation of -the cgroup; this number can never decrease). +Some metrics are "gauges", or values that can increase or decrease. For instance, +`swap` isthe amount of swap space used by the members of the cgroup. +Some others are "counters", or values that can only go up, because +they represent occurrences of a specific event. For instance, `pgfault` +indicates the number of page faults since the creation of the cgroup. Metric | Description --------------------------------------|----------------------------------------------------------- -**cache** | The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device. When you read from and write to files on disk, this amount will increase. This will be the case if you use "conventional" I/O (`open`, `read`, `write` syscalls) as well as mapped files (with `mmap`). It also accounts for the memory used by `tmpfs` mounts, though the reasons are unclear. +**cache** | The amount of memory used by the processes of this control group that can be associated precisely with a block on a block device. When you read from and write to files on disk, this amount increases. This is the case if you use "conventional" I/O (`open`, `read`, `write` syscalls) as well as mapped files (with `mmap`). It also accounts for the memory used by `tmpfs` mounts, though the reasons are unclear. **rss** | The amount of memory that *doesn't* correspond to anything on disk: stacks, heaps, and anonymous memory maps. **mapped_file** | Indicates the amount of memory mapped by the processes in the control group. It doesn't give you information about *how much* memory is used; it rather tells you *how* it is used. -**pgfault**, **pgmajfault** | Indicate the number of times that a process of the cgroup triggered a "page fault" and a "major fault", respectively. A page fault happens when a process accesses a part of its virtual memory space which is nonexistent or protected. The former can happen if the process is buggy and tries to access an invalid address (it will then be sent a `SIGSEGV` signal, typically killing it with the famous `Segmentation fault` message). The latter can happen when the process reads from a memory zone which has been swapped out, or which corresponds to a mapped file: in that case, the kernel will load the page from disk, and let the CPU complete the memory access. It can also happen when the process writes to a copy-on-write memory zone: likewise, the kernel will preempt the process, duplicate the memory page, and resume the write operation on the process` own copy of the page. "Major" faults happen when the kernel actually has to read the data from disk. When it just has to duplicate an existing page, or allocate an empty page, it's a regular (or "minor") fault. +**pgfault**, **pgmajfault** | Indicate the number of times that a process of the cgroup triggered a "page fault" and a "major fault", respectively. A page fault happens when a process accesses a part of its virtual memory space which is nonexistent or protected. The former can happen if the process is buggy and tries to access an invalid address (it is sent a `SIGSEGV` signal, typically killing it with the famous `Segmentation fault` message). The latter can happen when the process reads from a memory zone which has been swapped out, or which corresponds to a mapped file: in that case, the kernel loads the page from disk, and let the CPU complete the memory access. It can also happen when the process writes to a copy-on-write memory zone: likewise, the kernel preempts the process, duplicate the memory page, and resume the write operation on the process` own copy of the page. "Major" faults happen when the kernel actually needs to read the data from disk. When it just duplicates an existing page, or allocate an empty page, it's a regular (or "minor") fault. **swap** | The amount of swap currently used by the processes in this cgroup. -**active_anon**, **inactive_anon** | The amount of *anonymous* memory that has been identified has respectively *active* and *inactive* by the kernel. "Anonymous" memory is the memory that is *not* linked to disk pages. In other words, that's the equivalent of the rss counter described above. In fact, the very definition of the rss counter is **active_anon** + **inactive_anon** - **tmpfs** (where tmpfs is the amount of memory used up by `tmpfs` filesystems mounted by this control group). Now, what's the difference between "active" and "inactive"? Pages are initially "active"; and at regular intervals, the kernel sweeps over the memory, and tags some pages as "inactive". Whenever they are accessed again, they are immediately retagged "active". When the kernel is almost out of memory, and time comes to swap out to disk, the kernel will swap "inactive" pages. -**active_file**, **inactive_file** | Cache memory, with *active* and *inactive* similar to the *anon* memory above. The exact formula is **cache** = **active_file** + **inactive_file** + **tmpfs**. The exact rules used by the kernel to move memory pages between active and inactive sets are different from the ones used for anonymous memory, but the general principle is the same. Note that when the kernel needs to reclaim memory, it is cheaper to reclaim a clean (=non modified) page from this pool, since it can be reclaimed immediately (while anonymous pages and dirty/modified pages have to be written to disk first). -**unevictable** | The amount of memory that cannot be reclaimed; generally, it will account for memory that has been "locked" with `mlock`. It is often used by crypto frameworks to make sure that secret keys and other sensitive material never gets swapped out to disk. +**active_anon**, **inactive_anon** | The amount of *anonymous* memory that has been identified has respectively *active* and *inactive* by the kernel. "Anonymous" memory is the memory that is *not* linked to disk pages. In other words, that's the equivalent of the rss counter described above. In fact, the very definition of the rss counter is **active_anon** + **inactive_anon** - **tmpfs** (where tmpfs is the amount of memory used up by `tmpfs` filesystems mounted by this control group). Now, what's the difference between "active" and "inactive"? Pages are initially "active"; and at regular intervals, the kernel sweeps over the memory, and tags some pages as "inactive". Whenever they are accessed again, they are immediately retagged "active". When the kernel is almost out of memory, and time comes to swap out to disk, the kernel swaps "inactive" pages. +**active_file**, **inactive_file** | Cache memory, with *active* and *inactive* similar to the *anon* memory above. The exact formula is **cache** = **active_file** + **inactive_file** + **tmpfs**. The exact rules used by the kernel to move memory pages between active and inactive sets are different from the ones used for anonymous memory, but the general principle is the same. When the kernel needs to reclaim memory, it is cheaper to reclaim a clean (=non modified) page from this pool, since it can be reclaimed immediately (while anonymous pages and dirty/modified pages need to be written to disk first). +**unevictable** | The amount of memory that cannot be reclaimed; generally, it accounts for memory that has been "locked" with `mlock`. It is often used by crypto frameworks to make sure that secret keys and other sensitive material never gets swapped out to disk. **memory_limit**, **memsw_limit** | These are not really metrics, but a reminder of the limits applied to this cgroup. The first one indicates the maximum amount of physical memory that can be used by the processes of this control group; the second one indicates the maximum amount of RAM+swap. Accounting for memory in the page cache is very complex. If two processes in different control groups both read the same file (ultimately relying on the same blocks on disk), the corresponding -memory charge will be split between the control groups. It's nice, but +memory charge is split between the control groups. It's nice, but it also means that when a cgroup is terminated, it could increase the memory usage of another cgroup, because they are not splitting the cost anymore for those memory pages. ### CPU metrics: `cpuacct.stat` -Now that we've covered memory metrics, everything else will look very -simple in comparison. CPU metrics will be found in the +Now that we've covered memory metrics, everything else is +simple in comparison. CPU metrics are in the `cpuacct` controller. -For each container, you will find a pseudo-file `cpuacct.stat`, -containing the CPU usage accumulated by the processes of the container, -broken down between `user` and `system` time. If you're not familiar -with the distinction, `user` is the time during which the processes were -in direct control of the CPU (i.e., executing process code), and `system` -is the time during which the CPU was executing system calls on behalf of -those processes. - -Those times are expressed in ticks of 1/100th of a second. Actually, -they are expressed in "user jiffies". There are `USER_HZ` -*"jiffies"* per second, and on x86 systems, -`USER_HZ` is 100. This used to map exactly to the -number of scheduler "ticks" per second; but with the advent of higher -frequency scheduling, as well as [tickless kernels]( -http://lwn.net/Articles/549580/), the number of kernel ticks -wasn't relevant anymore. It stuck around anyway, mainly for legacy and -compatibility reasons. +For each container, a pseudo-file `cpuacct.stat` contains the CPU usage +accumulated by the processes of the container, broken down into `user` and +`system` time. The distinction is: + +- `user` time is the amount of time a process has direct control of the CPU, + executing process code. +- `system` time is the time the kernel is executing system calls on behalf of + the process. + +Those times are expressed in ticks of 1/100th of a second, also called "user +jiffies". There are `USER_HZ` *"jiffies"* per second, and on x86 systems, +`USER_HZ` is 100. Historically, this mapped exactly to the number of scheduler +"ticks" per second, but higher frequency scheduling and +[tickless kernels]( http://lwn.net/Articles/549580/) have made the number of +ticks irrelevant. ### Block I/O metrics @@ -197,7 +194,7 @@ Metric | Description **blkio.sectors** | Contains the number of 512-bytes sectors read and written by the processes member of the cgroup, device by device. Reads and writes are merged in a single counter. **blkio.io_service_bytes** | Indicates the number of bytes read and written by the cgroup. It has 4 counters per device, because for each device, it differentiates between synchronous vs. asynchronous I/O, and reads vs. writes. **blkio.io_serviced** | The number of I/O operations performed, regardless of their size. It also has 4 counters per device. -**blkio.io_queued** | Indicates the number of I/O operations currently queued for this cgroup. In other words, if the cgroup isn't doing any I/O, this will be zero. Note that the opposite is not true. In other words, if there is no I/O queued, it does not mean that the cgroup is idle (I/O-wise). It could be doing purely synchronous reads on an otherwise quiescent device, which is therefore able to handle them immediately, without queuing. Also, while it is helpful to figure out which cgroup is putting stress on the I/O subsystem, keep in mind that it is a relative quantity. Even if a process group does not perform more I/O, its queue size can increase just because the device load increases because of other devices. +**blkio.io_queued** | Indicates the number of I/O operations currently queued for this cgroup. In other words, if the cgroup isn't doing any I/O, this is zero. The opposite is not true. In other words, if there is no I/O queued, it does not mean that the cgroup is idle (I/O-wise). It could be doing purely synchronous reads on an otherwise quiescent device, which can therefore handle them immediately, without queuing. Also, while it is helpful to figure out which cgroup is putting stress on the I/O subsystem, keep in mind that it is a relative quantity. Even if a process group does not perform more I/O, its queue size can increase just because the device load increases because of other devices. ## Network metrics @@ -229,7 +226,7 @@ $ iptables -I OUTPUT -p tcp --sport 80 ``` There is no `-j` or `-g` flag, -so the rule will just count matched packets and go to the following +so the rule just counts matched packets and goes to the following rule. Later, you can check the values of the counters, with: @@ -238,29 +235,28 @@ Later, you can check the values of the counters, with: $ iptables -nxvL OUTPUT ``` -Technically, `-n` is not required, but it will -prevent iptables from doing DNS reverse lookups, which are probably +Technically, `-n` is not required, but it +prevents iptables from doing DNS reverse lookups, which are probably useless in this scenario. Counters include packets and bytes. If you want to setup metrics for container traffic like this, you could execute a `for` loop to add two `iptables` rules per container IP address (one in each direction), in the `FORWARD` -chain. This will only meter traffic going through the NAT -layer; you will also have to add traffic going through the userland +chain. This only meters traffic going through the NAT +layer; you also need to add traffic going through the userland proxy. -Then, you will need to check those counters on a regular basis. If you +Then, you need to check those counters on a regular basis. If you happen to use `collectd`, there is a [nice plugin](https://collectd.org/wiki/index.php/Table_of_Plugins) to automate iptables counters collection. ### Interface-level counters -Since each container has a virtual Ethernet interface, you might want to -check directly the TX and RX counters of this interface. You will notice -that each container is associated to a virtual Ethernet interface in -your host, with a name like `vethKk8Zqi`. Figuring -out which interface corresponds to which container is, unfortunately, +Since each container has a virtual Ethernet interface, you might want to check +directly the TX and RX counters of this interface. Each container is associated +to a virtual Ethernet interface in your host, with a name like `vethKk8Zqi`. +Figuring out which interface corresponds to which container is, unfortunately, difficult. But for now, the best way is to check the metrics *from within the @@ -268,13 +264,12 @@ containers*. To accomplish this, you can run an executable from the host environment within the network namespace of a container using **ip-netns magic**. -The `ip-netns exec` command will let you execute any +The `ip-netns exec` command allows you to execute any program (present in the host system) within any network namespace -visible to the current process. This means that your host will be able -to enter the network namespace of your containers, but your containers -won't be able to access the host, nor their sibling containers. -Containers will be able to "see" and affect their sub-containers, -though. +visible to the current process. This means that your host can + enter the network namespace of your containers, but your containers +can't access the host or other peer containers. +Containers can interact with their sub-containers, though. The exact format of the command is: @@ -307,11 +302,11 @@ container, we need to: - Create a symlink from `/var/run/netns/` to `/proc//ns/net` - Execute `ip netns exec ....` -Please review [Enumerating Cgroups](#enumerating-cgroups) to learn how to find -the cgroup of a process running in the container of which you want to -measure network usage. From there, you can examine the pseudo-file named -`tasks`, which contains the PIDs that are in the -control group (i.e., in the container). Pick any one of them. +Review [Enumerating Cgroups](#enumerating-cgroups)for how to find +the cgroup of an in-container process whose network usage you want to measure. +From there, you can examine the pseudo-file named +`tasks`, which contains all the PIDs in the +cgroup (and thus, in the container). Pick any one of the PIDs. Putting everything together, if the "short ID" of a container is held in the environment variable `$CID`, then you can do this: @@ -326,13 +321,13 @@ $ ip netns exec $CID netstat -i ## Tips for high-performance metric collection -Note that running a new process each time you want to update metrics is +Running a new process each time you want to update metrics is (relatively) expensive. If you want to collect metrics at high resolutions, and/or over a large number of containers (think 1000 containers on a single host), you do not want to fork a new process each time. -Here is how to collect metrics from a single process. You will have to +Here is how to collect metrics from a single process. You need to write your metric collector in C (or any language that lets you do low-level system calls). You need to use a special system call, `setns()`, which lets the current process enter any @@ -342,8 +337,8 @@ the namespace pseudo-file (remember: that's the pseudo-file in However, there is a catch: you must not keep this file descriptor open. If you do, when the last process of the control group exits, the -namespace will not be destroyed, and its network resources (like the -virtual interface of the container) will stay around for ever (or until +namespace is not destroyed, and its network resources (like the +virtual interface of the container) stays around forever (or until you close that file descriptor). The right approach would be to keep track of the first PID of each @@ -355,10 +350,9 @@ Sometimes, you do not care about real time metric collection, but when a container exits, you want to know how much CPU, memory, etc. it has used. -Docker makes this difficult because it relies on `lxc-start`, which -carefully cleans up after itself, but it is still possible. It is -usually easier to collect metrics at regular intervals (e.g., every -minute, with the collectd LXC plugin) and rely on that instead. +Docker makes this difficult because it relies on `lxc-start`, which carefully +cleans up after itself. It is usually easier to collect metrics at regular +intervals, and this is the way the `collectd` LXC plugin works. But, if you'd still like to gather the stats when a container stops, here is how: @@ -371,8 +365,8 @@ the tasks file to check if it's the last process of the control group. previous section, you should also move the process to the appropriate network namespace.) -When the container exits, `lxc-start` will try to -delete the control groups. It will fail, since the control group is +When the container exits, `lxc-start` attempts to +delete the control groups. It fails, since the control group is still in use; but that's fine. Your process should now detect that it is the only one remaining in the group. Now is the right time to collect all the metrics you need! diff --git a/engine/admin/start-containers-automatically.md b/engine/admin/start-containers-automatically.md index 62ef6ec0a4c..a19d7930de7 100644 --- a/engine/admin/start-containers-automatically.md +++ b/engine/admin/start-containers-automatically.md @@ -15,7 +15,7 @@ using process managers to start containers. Restart policies are different from the `--live-restore` flag of the `dockerd` command. Using `--live-restore` allows you to keep your containers running -during a Docker upgrade, though networking and user input will be interrupted. +during a Docker upgrade, though networking and user input are interrupted. ## Use a restart policy @@ -64,7 +64,7 @@ Docker depend on Docker containers, you can use a process manager such as [supervisor](http://supervisord.org/) instead. > **Warning**: Do not try to combine Docker restart policies with host-level -> process managers, because the two will conflict. +> process managers, because this creates conflicts. To use a process manager, configure it to start your container or service using the same `docker start` or `docker service` command you would normally use to @@ -74,7 +74,7 @@ process manager for more details. ### Using a process manager inside containers Process managers can also run within the container to check whether a process is -running and starts/restart it if not. +running and starts/restart it if not. > **Warning**: These are not Docker-aware and just monitor operating system processes within the container. > diff --git a/engine/admin/systemd.md b/engine/admin/systemd.md index 005f4dc8eeb..fbfa7e6ce28 100644 --- a/engine/admin/systemd.md +++ b/engine/admin/systemd.md @@ -14,7 +14,7 @@ shows a few examples of how to customize Docker's settings. ### Start manually -Once Docker is installed, you will need to start the Docker daemon. +Once Docker is installed, you need to start the Docker daemon. Most Linux distributions use `systemctl` to start services. If you do not have `systemctl`, use the `service` command. @@ -69,7 +69,7 @@ these environment variables using the `daemon.json` file. This example overrides the default `docker.service` file. If you are behind an HTTP or HTTPS proxy server, for example in corporate settings, -you will need to add this configuration in the Docker systemd service file. +you need to add this configuration in the Docker systemd service file. 1. Create a systemd drop-in directory for the docker service: diff --git a/engine/admin/volumes/bind-mounts.md b/engine/admin/volumes/bind-mounts.md index f375836e264..7587c143148 100644 --- a/engine/admin/volumes/bind-mounts.md +++ b/engine/admin/volumes/bind-mounts.md @@ -39,7 +39,7 @@ syntax separates them. Here is a comparison of the syntax for each flag. is not immediately obvious. - In the case of bind mounts, the first field is the path to the file or directory on the **host machine**. - - The second field is the path where the file or directory will be mounted in + - The second field is the path where the file or directory is mounted in the container. - The third field is optional, and is a comma-separated list of options, such as `ro`, `consistent`, `delegated`, `cached`, `z`, and `Z`. These options @@ -50,12 +50,12 @@ syntax separates them. Here is a comparison of the syntax for each flag. than `-v` or `--volume`, but the order of the keys is not significant, and the value of the flag is easier to understand. - The `type` of the mount, which can be `bind`, `volume`, or `tmpfs`. This - topic discusses bind mounts, so the type will always be `bind`. + topic discusses bind mounts, so the type is always `bind`. - The `source` of the mount. For bind mounts, this is the path to the file or directory on the Docker daemon host. May be specified as `source` or `src`. - The `destination` takes as its value the path where the file or directory - will be mounted in the container. May be specified as `destination`, `dst`, + is mounted in the container. May be specified as `destination`, `dst`, or `target`. - The `readonly` option, if present, causes the bind mount to be [mounted into the container as read-only](#use-a-read-only-bind-mount). @@ -78,7 +78,7 @@ time, their behavior cannot be changed. This means that **there is one behavior that is different between `-v` and `--mount`.** If you use `-v` or `--volume` to bind-mount a file or directory that does not -yet exist on the Docker host, `-v` will create the endpoint for you. **It is +yet exist on the Docker host, `-v` creates the endpoint for you. **It is always created as a directory.** If you use `--mount` to bind-mount a file or directory that does not @@ -160,12 +160,12 @@ $ docker container rm devtest ### Mounting into a non-empty directory on the container If you bind-mount into a non-empty directory on the container, the directory's -existing contents will be obscured by the bind mount. This can be beneficial, +existing contents are obscured by the bind mount. This can be beneficial, such as when you want to test a new version of your application without building a new image. However, it can also be surprising and this behavior differs from that of [docker volumes](volumes.md). -This example is contrived to be extreme, but will replace the contents of the +This example is contrived to be extreme, but replaces the contents of the container's `/usr/` directory with the `/tmp/` directory on the host machine. In most cases, this would result in a non-functioning container. @@ -214,10 +214,9 @@ $ docker container rm broken-container ## Use a read-only bind mount -For some development applications, it is useful for the container to be able to -write into the bind mount, in order for changes to be propagated back to the -Docker host. At other times, the container should only be able to read the -data and not modify it. +For some development applications, the container needs to +write into the bind mount, so changes are propagated back to the +Docker host. At other times, the container only needs read access. This example modifies the one above but mounts the directory as a read-only bind mount, by adding `ro` to the (empty by default) list of options, after the @@ -345,7 +344,7 @@ $ docker run -d \

-Now if you create `/app/foo/`, `/app2/foo/` will also exist. +Now if you create `/app/foo/`, `/app2/foo/` also exists. ## Configure the selinux label @@ -359,7 +358,7 @@ consequences outside of the scope of Docker. - The `Z` option indicates that the bind mount content is private and unshared. Use **extreme** caution with these options. Bind-mounting a system directory -such as `/home` or `/usr` with the `Z` option will render your host machine +such as `/home` or `/usr` with the `Z` option renders your host machine inoperable and you may need to relabel the host machine files by hand. > **Important**: When using bind mounts with services, selinux labels diff --git a/engine/admin/volumes/index.md b/engine/admin/volumes/index.md index db6bd03b5e0..e48747985b2 100644 --- a/engine/admin/volumes/index.md +++ b/engine/admin/volumes/index.md @@ -7,7 +7,7 @@ keywords: storage, persistence, data persistence, volumes, mounts, bind mounts It is possible to store data within the writable layer of a container, but there are some downsides: -- The data won't persist when that container is no longer running, and it can be +- The data don't persist when that container is no longer running, and it can be difficult to get the data out of the container if another process needs it. - A container's writable layer is tightly coupled to the host machine where the container is running. You can't easily move the data somewhere else. @@ -119,7 +119,7 @@ Some use cases for volumes include: - When you want to store your container's data on a remote host or a cloud provider, rather than locally. -- When you need to be able to back up, restore, or migrate data from one Docker +- When you need to back up, restore, or migrate data from one Docker host to another, volumes are a better choice. You can stop containers using the volume, then back up the volume's directory (such as `/var/lib/docker/volumes/`). @@ -157,12 +157,12 @@ needs to write a large volume of non-persistent state data. If you use either bind mounts or volumes, keep the following in mind: - If you mount an **empty volume** into a directory in the container in which files - or directories exist, these files or directories will be propagated (copied) + or directories exist, these files or directories are propagated (copied) into the volume. Similarly, if you start a container and specify a volume which does not already exist, an empty volume is created for you. This is a good way to pre-populate data that another container needs. -- If you mount a **bind mount or non-empty volume** into a directory in the container +- If you mount a **bind mount or non-empty volume** into a directory in the container in which some files or directories exist, these files or directories are obscured by the mount, just as if you saved files into `/mnt` on a Linux host and then mounted a USB drive into `/mnt`. The contents of `/mnt` would be diff --git a/engine/admin/volumes/tmpfs.md b/engine/admin/volumes/tmpfs.md index 07d5619da17..a71021ace96 100644 --- a/engine/admin/volumes/tmpfs.md +++ b/engine/admin/volumes/tmpfs.md @@ -42,10 +42,10 @@ the `--mount` flag was used for swarm services. However, starting with Docker than `-v` or `--volume`, but the order of the keys is not significant, and the value of the flag is easier to understand. - The `type` of the mount, which can be [`bind`](bind-mounts-md), `volume`, or - [`tmpfs`](tmpfs.md). This topic discusses `tmpfs`, so the type will always - be `tmpfs`. + [`tmpfs`](tmpfs.md). This topic discusses `tmpfs`, so the type is always + `tmpfs`. - The `destination` takes as its value the path where the `tmpfs` mount - will be mounted in the container. May be specified as `destination`, `dst`, + is mounted in the container. May be specified as `destination`, `dst`, or `target`. - The `tmpfs-type` and `tmpfs-mode` options. See [tmpfs options](#tmpfs-options). diff --git a/engine/admin/volumes/volumes.md b/engine/admin/volumes/volumes.md index d9722269005..85addaa4420 100644 --- a/engine/admin/volumes/volumes.md +++ b/engine/admin/volumes/volumes.md @@ -56,7 +56,7 @@ If you need to specify volume driver options, you must use `--mount`. - In the case of named volumes, the first field is the name of the volume, and is unique on a given host machine. For anonymous volumes, the first field is omitted. - - The second field is the path where the file or directory will be mounted in + - The second field is the path where the file or directory are mounted in the container. - The third field is optional, and is a comma-separated list of options, such as `ro`. These options are discussed below. @@ -66,13 +66,13 @@ If you need to specify volume driver options, you must use `--mount`. than `-v` or `--volume`, but the order of the keys is not significant, and the value of the flag is easier to understand. - The `type` of the mount, which can be [`bind`](bind-mounts.md), `volume`, or - [`tmpfs`](tmpfs.md). This topic discusses volumes, so the type will always - be `volume`. + [`tmpfs`](tmpfs.md). This topic discusses volumes, so the type is always + `volume`. - The `source` of the mount. For named volumes, this is the name of the volume. For anonymous volumes, this field is omitted. May be specified as `source` or `src`. - The `destination` takes as its value the path where the file or directory - will be mounted in the container. May be specified as `destination`, `dst`, + is mounted in the container. May be specified as `destination`, `dst`, or `target`. - The `readonly` option, if present, causes the bind mount to be [mounted into the container as read-only](#use-a-read-only-volume). @@ -200,7 +200,7 @@ $ docker volume rm myvol2 ### Start a service with volumes -When you start a service and define a volume, each service container will use its own +When you start a service and define a volume, each service container uses its own local volume. None of the containers can share this data if you use the `local` volume driver, but some volume drivers do support shared storage. Docker for AWS and Docker for Azure both support persistent storage using the Cloudstor plugin. @@ -241,8 +241,8 @@ flag. If you start a container which creates a new volume, as above, and the container has files or directories in the directory to be mounted (such as `/app/` above), -the directory's contents will be copied into the volume. The container will then -mount and use the volume, and other containers which use the volume will also +the directory's contents are copied into the volume. The container then +mounts and uses the volume, and other containers which use the volume also have access to the pre-populated content. To illustrate this, this example starts an `nginx` container and populates the @@ -292,12 +292,11 @@ $ docker volume rm nginx-vol ## Use a read-only volume -For some development applications, it is useful for the container to be able to -write into the bind mount, in order for changes to be propagated back to the -Docker host. At other times, the container should only be able to read the -data and not modify it. Remember that multiple containers can mount the same -volume, and it can be mounted read-write for some of them and read-only for -others, simultaneously. +For some development applications, the container needs to write into the bind +mount so that changes are propagated back to the Docker host. At other times, +the container only needs read access to the data. Remember that multiple +containers can mount the same volume, and it can be mounted read-write for some +of them and read-only for others, at the same time. This example modifies the one above but mounts the directory as a read-only volume, by adding `ro` to the (empty by default) list of options, after the @@ -366,7 +365,7 @@ $ docker volume rm nginx-vol When you create a volume using `docker volume create`, or when you start a container which uses a not-yet-created volume, you can specify a volume driver. The following examples use the `vieux/sshfs` volume driver, first when creating -a standalone volume, and then when starting a container which will create a new +a standalone volume, and then when starting a container which creates a new volume. ### Initial set-up diff --git a/engine/examples/apt-cacher-ng.md b/engine/examples/apt-cacher-ng.md index ec186133b89..4c0045da18e 100644 --- a/engine/examples/apt-cacher-ng.md +++ b/engine/examples/apt-cacher-ng.md @@ -48,7 +48,7 @@ use: $ docker logs -f test_apt_cacher_ng To get your Debian-based containers to use the proxy, you have -following options. Note that you must replace `dockerhost` with the +following options. Replace `dockerhost` with the IP address or FQDN of the host running the `test_apt_cacher_ng` container. @@ -70,14 +70,13 @@ a local version of a common base: # docker build -t my_ubuntu . -**Option 2** is good for testing, but will break other HTTP clients +**Option 2** is good for testing, but breaks other HTTP clients which obey `http_proxy`, such as `curl`, `wget` and others: $ docker run --rm -t -i -e http_proxy=http://dockerhost:3142/ debian bash -**Option 3** is the least portable, but there will be times when you -might need to do it and you can do it from your `Dockerfile` -too. +**Option 3** is the least portable, but you might need to do it and you can do it +from your `Dockerfile` too. **Option 4** links Debian-containers to the proxy server using following command: diff --git a/engine/examples/couchdb_data_volumes.md b/engine/examples/couchdb_data_volumes.md index 37f8bbe95ee..6c6ed5dba61 100644 --- a/engine/examples/couchdb_data_volumes.md +++ b/engine/examples/couchdb_data_volumes.md @@ -14,7 +14,7 @@ different versions of CouchDB on the same data, etc. ## Create first database -Note that we're marking `/var/lib/couchdb` as a data volume. +We're marking `/var/lib/couchdb` as a data volume. $ COUCH1=$(docker run -d -p 5984 -v /var/lib/couchdb shykes/couchdb:2013-05-03) diff --git a/engine/examples/dotnetcore.md b/engine/examples/dotnetcore.md index 05754153dc6..6ec33f0cdb6 100644 --- a/engine/examples/dotnetcore.md +++ b/engine/examples/dotnetcore.md @@ -34,7 +34,7 @@ tutorial](https://www.asp.net/get-started) to initialize a project or clone our 1. Create a `Dockerfile` in your project folder. 2. Add the text below to your `Dockerfile` for either Linux or [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). - The tags below are multi-arch meaning they will pull either Windows or + The tags below are multi-arch meaning they pull either Windows or Linux containers depending on what mode is set in [Docker for Windows](https://docs.docker.com/docker-for-windows/). Read more on [switching containers](https://docs.docker.com/docker-for-windows/#switch-between-windows-and-linux-containers). 3. The `Dockerfile` assumes that your application is called `aspnetapp`. Change diff --git a/engine/examples/postgresql_service.md b/engine/examples/postgresql_service.md index 1123d370670..45bd2b58f20 100644 --- a/engine/examples/postgresql_service.md +++ b/engine/examples/postgresql_service.md @@ -90,7 +90,7 @@ the container exits successfully. Containers can be linked to another container's ports directly using `-link remote_name:local_alias` in the client's -`docker run`. This will set a number of environment +`docker run`. This sets a number of environment variables that can then be used to connect: ```bash diff --git a/engine/examples/running_riak_service.md b/engine/examples/running_riak_service.md index 2e68662403b..b0c21d16a02 100644 --- a/engine/examples/running_riak_service.md +++ b/engine/examples/running_riak_service.md @@ -14,7 +14,7 @@ Create an empty file called `Dockerfile`: $ touch Dockerfile Next, define the parent image you want to use to build your image on top -of. We'll use [Ubuntu](https://hub.docker.com/_/ubuntu/) (tag: +of. We use [Ubuntu](https://hub.docker.com/_/ubuntu/) (tag: `trusty`), which is available on [Docker Hub](https://hub.docker.com): # Riak @@ -88,7 +88,7 @@ Populate it with the following program definitions: ## Build the Docker image for Riak -Now you should be able to build a Docker image for Riak: +Now you can build a Docker image for Riak: $ docker build -t "/riak" . diff --git a/engine/examples/running_ssh_service.md b/engine/examples/running_ssh_service.md index 4b6541a651d..e5669339d45 100644 --- a/engine/examples/running_ssh_service.md +++ b/engine/examples/running_ssh_service.md @@ -62,11 +62,11 @@ Using the `sshd` daemon to spawn shells makes it complicated to pass environment variables to the user's shell via the normal Docker mechanisms, as `sshd` scrubs the environment before it starts the shell. -If you're setting values in the `Dockerfile` using `ENV`, you'll need to push them +If you're setting values in the `Dockerfile` using `ENV`, you need to push them to a shell initialization file like the `/etc/profile` example in the `Dockerfile` above. -If you need to pass`docker run -e ENV=value` values, you will need to write a +If you need to pass`docker run -e ENV=value` values, you need to write a short script to do the same before you start `sshd -D` and then replace the `CMD` with that script. diff --git a/engine/faq.md b/engine/faq.md index a01c571762b..ddbb78dc88f 100644 --- a/engine/faq.md +++ b/engine/faq.md @@ -23,7 +23,7 @@ offers a high-level tool with several powerful functionalities: - *Portable deployment across machines.* Docker defines a format for bundling an application and all its dependencies into a single object called a container. This container can be transferred to any Docker-enabled machine. The container can be executed there with the - guarantee that the execution environment exposed to the application will be the + guarantee that the execution environment exposed to the application is the same in development, testing, and production. LXC implements process sandboxing, which is an important pre-requisite for portable deployment, but is not sufficient for portable deployment. If you sent me a copy of your application installed in a custom LXC @@ -138,7 +138,7 @@ You can learn about the project's security policy ### Why do I need to sign my commits to Docker with the DCO? -Please read [our blog post]( +Read [our blog post]( http://blog.docker.com/2014/01/docker-code-contributions-require-developer-certificate-of-origin/){: target="_blank" class="_"} on the introduction of the DCO. ### When building an image, should I prefer system libraries or bundled ones? @@ -147,12 +147,11 @@ http://blog.docker.com/2014/01/docker-code-contributions-require-developer-certi https://groups.google.com/forum/#!topic/docker-dev/L2RBSPDu1L0){: target="_blank" class="_"}.* Virtually all programs depend on third-party libraries. Most frequently, they -will use dynamic linking and some kind of package dependency, so that when +use dynamic linking and some kind of package dependency, so that when multiple programs need the same library, it is installed only once. -Some programs, however, will bundle their third-party libraries, because they -rely on very specific versions of those libraries. For instance, Node.js bundles -OpenSSL; MongoDB bundles V8 and Boost (among others). +Some programs, however, bundle their third-party libraries, because they +rely on very specific versions of those libraries. When creating a Docker image, is it better to use the bundled libraries, or should you build those programs so that they use the default system libraries @@ -186,7 +185,7 @@ When building Docker images on Debian and Ubuntu you may have seen errors like: unable to initialize frontend: Dialog These errors don't stop the image from being built but inform you that the -installation process tried to open a dialog box, but was unable to. Generally, +installation process tried to open a dialog box, but couldn't. Generally, these errors are safe to ignore. Some people circumvent these errors by changing the `DEBIAN_FRONTEND` @@ -198,10 +197,10 @@ This prevents the installer from opening dialog boxes during installation which stops the errors. While this may sound like a good idea, it *may* have side effects. The -`DEBIAN_FRONTEND` environment variable will be inherited by all images and +`DEBIAN_FRONTEND` environment variable is inherited by all images and containers built from your image, effectively changing their behavior. People -using those images will run into problems when installing software -interactively, because installers will not show any dialog boxes. +using those images run into problems when installing software +interactively, because installers do not show any dialog boxes. Because of this, and because setting `DEBIAN_FRONTEND` to `noninteractive` is mainly a 'cosmetic' change, we *discourage* changing it. @@ -234,7 +233,7 @@ command and start it with `docker-machine start` if needed. $ docker-machine start default -You have to tell Docker to talk to that machine. You can do this with the +You need to tell Docker to talk to that machine. You can do this with the `docker-machine env` command. For example, $ eval "$(docker-machine env default)" diff --git a/engine/index.md b/engine/index.md index 92abe5ef54b..cd9a0f2bf38 100644 --- a/engine/index.md +++ b/engine/index.md @@ -69,7 +69,7 @@ Docker consists of: ## About this guide -The [Understanding Docker section](understanding-docker.md) will help you: +The [Understanding Docker section](understanding-docker.md) helps you: - See how Docker works at a high level - Understand the architecture of Docker @@ -79,7 +79,7 @@ The [Understanding Docker section](understanding-docker.md) will help you: ### Installation guides -The [installation section](installation/index.md) will show you how to install Docker +The [installation section](installation/index.md) shows you how to install Docker on a variety of platforms. @@ -96,10 +96,10 @@ on the separate [Release Notes page](/release-notes) ## Feature Deprecation Policy As changes are made to Docker there may be times when existing features -will need to be removed or replaced with newer features. Before an existing -feature is removed it will be labeled as "deprecated" within the documentation -and will remain in Docker for at least 3 stable releases (roughly 9 months). -After that time it may be removed. +need to be removed or replaced with newer features. Before an existing +feature is removed it is labeled as "deprecated" within the documentation +and remains in Docker for at least 3 stable releases. After that time it may be +removed. Users are expected to take note of the list of deprecated features each release and plan their migration away from those features, and (if applicable) diff --git a/engine/installation/index.md b/engine/installation/index.md index 80a3fbe1e7b..c514c3e5946 100644 --- a/engine/installation/index.md +++ b/engine/installation/index.md @@ -67,11 +67,11 @@ Major new versions of Docker Enterprise Edition are released twice per year. ### Updates, and patches -- A given Docker EE release will receive patches and updates for at least **one +- A given Docker EE release receives patches and updates for at least **one year** after it is released. -- A given Docker CE Stable release will receive patches and updates for **one +- A given Docker CE Stable release receives patches and updates for **one month after the next Docker CE Stable release**. -- A given Docker CE Edge release will not receive any patches or updates after +- A given Docker CE Edge release does not receive any patches or updates after a subsequent Docker CE Edge or Stable release. ### Prior releases diff --git a/engine/installation/linux/docker-ce/centos.md b/engine/installation/linux/docker-ce/centos.md index 268c1495822..fdc24859ee3 100644 --- a/engine/installation/linux/docker-ce/centos.md +++ b/engine/installation/linux/docker-ce/centos.md @@ -133,13 +133,13 @@ from the repository. > **Warning**: If you have multiple Docker repositories enabled, installing > or updating without specifying a version in the `yum install` or - > `yum update` command will always install the highest possible version, + > `yum update` command always installs the highest possible version, > which may not be appropriate for your stability needs. {:.warning} If this is the first time you are installing a package from a recently added - repository, you will be prompted to accept the GPG key, and - the key's fingerprint will be shown. Verify that the fingerprint is + repository, you are prompted to accept the GPG key, and + the key's fingerprint is shown. Verify that the fingerprint is correct, and if so, accept the key. The fingerprint should match `060A 61C5 1B55 8A7F 742B 77AA C52F EB6B 621E 9F35`. @@ -158,7 +158,7 @@ from the repository. ``` The contents of the list depend upon which repositories are enabled, and - will be specific to your version of CentOS (indicated by the `.el7` suffix + are specific to your version of CentOS (indicated by the `.el7` suffix on the version, in this example). Choose a specific version to install. The second column is the version string. You can use the entire version string, but **you need to include at least to the first hyphen**. The third column @@ -204,7 +204,7 @@ to install. ### Install from a package If you cannot use Docker's repository to install Docker, you can download the -`.rpm` file for your release and install it manually. You will need to download +`.rpm` file for your release and install it manually. You need to download a new file each time you want to upgrade Docker. 1. Go to diff --git a/engine/installation/linux/docker-ce/debian.md b/engine/installation/linux/docker-ce/debian.md index a164b654f9b..15f236e9278 100644 --- a/engine/installation/linux/docker-ce/debian.md +++ b/engine/installation/linux/docker-ce/debian.md @@ -192,7 +192,7 @@ from the repository. 5. **Wheezy only**: The version of `add-apt-repository` on Wheezy adds a `deb-src` repository that does not exist. You need to comment out this repository or - running `apt-get update` will fail. Edit `/etc/apt/sources.list`. Find the + running `apt-get update` fails. Edit `/etc/apt/sources.list`. Find the line like the following, and comment it out or remove it: ```none @@ -228,7 +228,7 @@ from the repository. > > If you have multiple Docker repositories enabled, installing > or updating without specifying a version in the `apt-get install` or - > `apt-get update` command will always install the highest possible version, + > `apt-get update` command always installs the highest possible version, > which may not be appropriate for your stability needs. {:.warning} @@ -289,7 +289,7 @@ to install. ### Install from a package If you cannot use Docker's repository to install Docker CE, you can download the -`.deb` file for your release and install it manually. You will need to download +`.deb` file for your release and install it manually. You need to download a new file each time you want to upgrade Docker. 1. Go to `{{ download-url-base }}/dists/`, diff --git a/engine/installation/linux/docker-ce/fedora.md b/engine/installation/linux/docker-ce/fedora.md index a21383a5462..4d32c2237ea 100644 --- a/engine/installation/linux/docker-ce/fedora.md +++ b/engine/installation/linux/docker-ce/fedora.md @@ -125,8 +125,8 @@ from the repository. ``` If this is the first time you are installing a package from a recently added - repository, you will be prompted to accept the GPG key, and - the key's fingerprint will be shown. Verify that the fingerprint matches + repository, you are prompted to accept the GPG key, and + the key's fingerprint is shown. Verify that the fingerprint matches `060A 61C5 1B55 8A7F 742B 77AA C52F EB6B 621E 9F35` and if so, accept the key. @@ -134,7 +134,7 @@ from the repository. > > If you have multiple Docker repositories enabled, installing > or updating without specifying a version in the `dnf install` or - > `dnf update` command will always install the highest possible version, + > `dnf update` command always installs the highest possible version, > which may not be appropriate for your stability needs. {:.warning-vanilla} @@ -150,7 +150,7 @@ from the repository. ``` The contents of the list depend upon which repositories are enabled, and - will be specific to your version of Fedora (indicated by the `.fc26` suffix + are specific to your version of Fedora (indicated by the `.fc26` suffix on the version, in this example). Choose a specific version to install. The second column is the version string. The third column is the repository name, which indicates which repository the package is from and by extension @@ -192,7 +192,7 @@ to install. ### Install from a package If you cannot use Docker's repository to install Docker, you can download the -`.rpm` file for your release and install it manually. You will need to download +`.rpm` file for your release and install it manually. You need to download a new file each time you want to upgrade Docker CE. 1. Go to [{{ download-url-base }}/]({{ download-url-base }}/) and choose your diff --git a/engine/installation/linux/docker-ce/ubuntu.md b/engine/installation/linux/docker-ce/ubuntu.md index 2d64c8fbaed..e8052432633 100644 --- a/engine/installation/linux/docker-ce/ubuntu.md +++ b/engine/installation/linux/docker-ce/ubuntu.md @@ -61,7 +61,7 @@ Docker EE on Ubuntu supports `overlay2` and `aufs` storage drivers. - For version 3 of the Linux kernel, `aufs` is supported because `overlay` or `overlay2` drivers are not supported by that kernel version. -If you need to use `aufs`, you will need to do additional preparation as +If you need to use `aufs`, you need to do additional preparation as outlined below. #### Extra steps for aufs @@ -74,7 +74,7 @@ outlined below.
For Ubuntu 16.04 and higher, the Linux kernel includes support for OverlayFS, -and Docker CE will use the `overlay2` storage driver by default. If you need +and Docker CE uses the `overlay2` storage driver by default. If you need to use `aufs` instead, you need to configure it manually. See [aufs](/engine/userguide/storagedriver/aufs-driver.md) @@ -166,7 +166,7 @@ the repository. > **Note**: The `lsb_release -cs` sub-command below returns the name of your > Ubuntu distribution, such as `xenial`. Sometimes, in a distribution - > like Linux Mint, you might have to change `$(lsb_release -cs)` + > like Linux Mint, you might need to change `$(lsb_release -cs)` > to your parent Ubuntu distribution. For example, if you are using > `Linux Mint Rafaela`, you could use `trusty`. @@ -245,7 +245,7 @@ the repository. > > If you have multiple Docker repositories enabled, installing > or updating without specifying a version in the `apt-get install` or - > `apt-get update` command will always install the highest possible version, + > `apt-get update` command always installs the highest possible version, > which may not be appropriate for your stability needs. {:.warning-vanilla} @@ -297,7 +297,7 @@ to install. ### Install from a package If you cannot use Docker's repository to install Docker CE, you can download the -`.deb` file for your release and install it manually. You will need to download +`.deb` file for your release and install it manually. You need to download a new file each time you want to upgrade Docker CE. 1. Go to [{{ download-url-base }}/dists/]({{ download-url-base }}/dists/), diff --git a/engine/installation/linux/docker-ee/oracle.md b/engine/installation/linux/docker-ee/oracle.md index 8a1f53c4bbc..e2f31ee230e 100644 --- a/engine/installation/linux/docker-ee/oracle.md +++ b/engine/installation/linux/docker-ee/oracle.md @@ -37,10 +37,10 @@ Docker EE. On production systems, you must use `direct-lvm` mode, which requires one or more dedicated block devices. Fast storage such as solid-state media (SSD) is recommended. -> **Docker EE will not install on {{ linux-dist }} with `selinux` enabled!** +> **Docker EE cannot install on {{ linux-dist }} with `selinux` enabled!** > > If you have `selinux` enabled and you attempt to install Docker EE 17.06.1, -> you will get an error that the `container-selinux` package cannot be found. +> you get an error that the `container-selinux` package cannot be found. {:.warning } ### Uninstall old versions diff --git a/engine/installation/linux/docker-ee/suse.md b/engine/installation/linux/docker-ee/suse.md index 9296d005602..cb2759f9592 100644 --- a/engine/installation/linux/docker-ee/suse.md +++ b/engine/installation/linux/docker-ee/suse.md @@ -40,14 +40,14 @@ To install Docker EE, you need the 64-bit version of SLES 12.x, running on `x86_64`, `s390x` (IBM Z), or `ppc64le` (IBM Power) architectures. Docker EE is not supported on OpenSUSE. -The only supported storage driver for Docker EE on SLES is Btrfs, which will be +The only supported storage driver for Docker EE on SLES is Btrfs, which is used by default if the underlying filesystem hosting `/var/lib/docker/` is a BTRFS filesystem. #### Firewall configuration Docker creates a `DOCKER` iptables chain when it starts. The SUSE firewall may -block access to this chain, which can prevent you from being able to run +block access to this chain, which can prevent you from running containers with published ports. You may see errors such as the following: ```none @@ -97,7 +97,7 @@ BTRFS filesystem and mount it on `/var/lib/docker/`. 1. Check whether `/` (or `/var/` or `/var/lib/` or `/var/lib/docker/` if they are separate mount points) are formatted using Btrfs. If you do not have - separate mount points for any of these, a duplicate result for `/` will be + separate mount points for any of these, a duplicate result for `/` is returned. ```bash @@ -165,8 +165,8 @@ from the repository. #### Set up the repository -1. Temporarily add a `$DOCKER_EE_URL` variable into your environment. This will - only persist until you log out of the session. Replace `` +1. Temporarily add a `$DOCKER_EE_URL` variable into your environment. This + only persists until you log out of the session. Replace `` with the URL you noted down in the [prerequisites](#prerequisites). ```bash @@ -174,7 +174,7 @@ from the repository. ``` 2. Use the following command to set up the **stable** repository. Use the - command as-is. It will work because of the variable you set in the previous + command as-is. It works because of the variable you set in the previous step.
-3. Import the GPG key from the repository. Use the command as-is. It will work +3. Import the GPG key from the repository. Use the command as-is. It works because of the variable you set earlier. ```bash @@ -230,8 +230,8 @@ from the repository. ``` If this is the first time you have refreshed the package index since adding - the Docker repositories, you will be prompted to accept the GPG key, and - the key's fingerprint will be shown. Verify that the fingerprint matches + the Docker repositories, you are prompted to accept the GPG key, and + the key's fingerprint is shown. Verify that the fingerprint matches `77FE DA13 1A83 1D29 A418 D3E8 99E5 FF2E 7668 2BC9` and if so, accept the key. @@ -334,7 +334,7 @@ To upgrade Docker EE: ### Install from a package If you cannot use the official Docker repository to install Docker EE, you can -download the `.rpm` file for your release and install it manually. You will +download the `.rpm` file for your release and install it manually. You need to download a new file each time you want to upgrade Docker EE. 1. Go to the Docker EE repository URL associated with your diff --git a/engine/installation/linux/docker-ee/ubuntu.md b/engine/installation/linux/docker-ee/ubuntu.md index bc11d69ef69..29ba4f75403 100644 --- a/engine/installation/linux/docker-ee/ubuntu.md +++ b/engine/installation/linux/docker-ee/ubuntu.md @@ -75,7 +75,7 @@ Docker EE on Ubuntu supports `overlay2` and `aufs` storage drivers. - For version 3 of the Linux kernel, `aufs` is supported because `overlay` or `overlay2` drivers are not supported by that kernel version. -If you need to use `aufs`, you will need to do additional preparation as +If you need to use `aufs`, you need to do additional preparation as outlined below. #### Extra steps for aufs @@ -88,7 +88,7 @@ outlined below.
For Ubuntu 16.04 and higher, the Linux kernel includes support for OverlayFS, -and Docker CE will use the `overlay2` storage driver by default. If you need +and Docker CE uses the `overlay2` storage driver by default. If you need to use `aufs` instead, you need to configure it manually. See [aufs](/engine/userguide/storagedriver/aufs-driver.md) @@ -147,8 +147,8 @@ from the repository. software-properties-common ``` -3. Temporarily add a `$DOCKER_EE_URL` variable into your environment. This will - only persist until you log out of the session. Replace `` +3. Temporarily add a `$DOCKER_EE_URL` variable into your environment. This + only persists until you log out of the session. Replace `` with the URL you noted down in the [prerequisites](#prerequisites). ```bash @@ -163,8 +163,8 @@ from the repository. Verify that you now have the key with the fingerprint `DD91 1E99 5A64 A202 E859 07D6 BC14 F10B 6D08 5F96`, by searching for the - last eight characters of the fingerprint. Use the command as-is. It will - work because of the variable you set earlier. + last eight characters of the fingerprint. Use the command as-is. It works + because of the variable you set earlier. ```bash $ sudo apt-key fingerprint 6D085F96 @@ -176,7 +176,7 @@ from the repository. ``` 5. Use the following command to set up the **stable** repository. Use the - command as-is. It will work because of the variable you set earlier. + command as-is. It works because of the variable you set earlier. > **Note**: The `lsb_release -cs` sub-command below returns the name of your > Ubuntu distribution, such as `xenial`. @@ -239,7 +239,7 @@ from the repository. > **Warning**: If you have multiple Docker repositories enabled, installing > or updating without specifying a version in the `apt-get install` or - > `apt-get update` command will always install the highest possible version, + > `apt-get update` command always installs the highest possible version, > which may not be appropriate for your stability needs. {:.warning} @@ -254,7 +254,7 @@ from the repository. ``` The contents of the list depend upon which repositories are enabled, - and will be specific to your version of Ubuntu (indicated by the `xenial` + and are specific to your version of Ubuntu (indicated by the `xenial` suffix on the version, in this example). Choose a specific version to install. The second column is the version string. The third column is the repository name, which indicates which repository the package is from and @@ -300,7 +300,7 @@ To upgrade Docker EE: ### Install from a package If you cannot use Docker's repository to install Docker EE, you can download the -`.deb` file for your release and install it manually. You will need to download +`.deb` file for your release and install it manually. You need to download a new file each time you want to upgrade Docker EE. 1. Go to the Docker EE repository URL associated with your diff --git a/engine/installation/linux/linux-postinstall.md b/engine/installation/linux/linux-postinstall.md index 645ac496fbb..55ed934799d 100644 --- a/engine/installation/linux/linux-postinstall.md +++ b/engine/installation/linux/linux-postinstall.md @@ -66,8 +66,8 @@ To create the `docker` group and add your user: ``` To fix this problem, either remove the `~/.docker/` directory - (it will be recreated automatically, but any custom settings - will be lost), or change its ownership and pemissions using the + (it is recreated automatically, but any custom settings + are lost), or change its ownership and permissions using the following commands: ```bash @@ -123,7 +123,7 @@ your host's Linux distribution and available kernel drivers. ### Kernel compatibility -Docker will not run correctly if your kernel is older than version 3.10 or if it +Docker cannot run correctly if your kernel is older than version 3.10 or if it is missing some modules. To check kernel compatibility, you can download and run the [`check-compatibility.sh`](https://raw.githubusercontent.com/docker/docker/master/contrib/check-config.sh) script. @@ -134,7 +134,7 @@ $ curl https://raw.githubusercontent.com/docker/docker/master/contrib/check-conf $ bash ./check-config.sh ``` -The script will only work on Linux, not macOS. +The script only works on Linux, not macOS. ### `Cannot connect to the Docker daemon` @@ -173,7 +173,7 @@ from connecting. ### IP forwarding problems If you manually configure your network using `systemd-network` with `systemd` -version 219 or higher, Docker containers may be unable to access your network. +version 219 or higher, Docker containers may not be able to access your network. Beginning with `systemd` version 220, the forwarding setting for a given network (`net.ipv4.conf..forwarding`) defaults to *off*. This setting prevents IP forwarding. It also conflicts with Docker's behavior of enabling @@ -202,7 +202,7 @@ Linux systems which use a GUI often have a network manager running, which uses a `dnsmasq` instance running on a loopback address such as `127.0.0.1` or `127.0.1.1` to cache DNS requests, and adds this entry to `/etc/resolv.conf`. The `dnsmasq` service speeds up -DNS look-ups and also provides DHCP services. This configuration will not work +DNS look-ups and also provides DHCP services. This configuration does not work within a Docker container which has its own network namespace, because the Docker container resolves loopback addresses such as `127.0.0.1` to **itself**, and it is very unlikely to be running a DNS server on its own @@ -224,11 +224,11 @@ $ ps aux |grep dnsmasq ``` If your container needs to resolve hosts which are internal to your network, the -public nameservers will not be adequate. You have two choices: +public nameservers are not adequate. You have two choices: - You can specify a DNS server for Docker to use, **or** - You can disable `dnsmasq` in NetworkManager. If you do this, NetworkManager - will add your true DNS nameserver to `/etc/resolv.conf`, but you will lose the + adds your true DNS nameserver to `/etc/resolv.conf`, but you lose the possible benefits of `dnsmasq`. **You only need to use one of these methods.** @@ -393,10 +393,10 @@ and a 10% overall performance degradation, even if Docker is not running. $ sudo update-grub ``` - If your GRUB configuration file has incorrect syntax, an error will occur. + If your GRUB configuration file has incorrect syntax, an error occurs. In this case, repeat steps 3 and 4. - The changes will take effect when the system is rebooted. + The changes take effect when the system is rebooted. ## Next steps diff --git a/engine/installation/windows/docker-ee.md b/engine/installation/windows/docker-ee.md index 99b2ab0ca11..3c7574ef6e4 100644 --- a/engine/installation/windows/docker-ee.md +++ b/engine/installation/windows/docker-ee.md @@ -33,7 +33,7 @@ on Windows Server 2016 and you have a >Windows Server 1709 > >Docker Universal Control Plane is not currently supported on Windows Server 1709 due to image incompatibility issues. ->To use UCP, for now please use the current LTSB Windows release and not 1709. +>To use UCP, for now, use the current LTSB Windows release and not 1709. Docker EE for Windows requires Windows Server 2016 or later. See @@ -100,7 +100,7 @@ installs, or install on air-gapped systems. # Clean up the zip file. Remove-Item -Force {{ filename }} - # Install Docker. This will require rebooting. + # Install Docker. This requires rebooting. $null = Install-WindowsFeature containers # Add Docker to the path for the current session. @@ -142,7 +142,7 @@ Docker 17.03.0-ee Docker Contains Docker ## Update Docker EE > **Check that you have the Docker module** -> You may have previously installed Docker using a Microsoft provided module. To ensure you get the latest Docker patches, please remove this module and use Docker's module: +> You may have previously installed Docker using a Microsoft provided module. To ensure you get the latest Docker patches, remove this module and use Docker's module: > > ```none > Uninstall-Module DockerMsftProvider -Force @@ -156,7 +156,7 @@ Install-Package -Name docker -ProviderName DockerProvider -Update -Force ``` If Docker Universal Control Plane (UCP) is installed, run the -[UCP installation script for Windows](/datacenter/ucp/2.2/guides/admin/configure/join-windows-worker-nodes/#run-the-windows-node-setup-script). +[UCP installation script for Windows](/datacenter/ucp/2.2/guides/admin/configure/join-windows-worker-nodes/#run-the-windows-node-setup-script). Start the Docker service: diff --git a/engine/migration.md b/engine/migration.md index 214fb938193..ef37a2472b0 100644 --- a/engine/migration.md +++ b/engine/migration.md @@ -16,7 +16,7 @@ layers even if they didn’t come from the same build. Addressing images by their content also lets us more easily detect if something has already been downloaded. Because we have separated images and layers, you -don’t have to pull the configurations for every image that was part of the +don’t need to pull the configurations for every image that was part of the original build chain. We also don’t need to create layers for the build instructions that didn’t modify the filesystem. @@ -30,19 +30,19 @@ We are also introducing a new manifest format that is built on top of the content addressable base. It directly references the content addressable image configuration and layer checksums. The new manifest format also makes it possible for a manifest list to be used for targeting multiple -architectures/platforms. Moving to the new manifest format will be completely +architectures/platforms. Moving to the new manifest format is completely transparent. ## Prepare for upgrade -To make your current images accessible to the new model we have to migrate them +To make your current images accessible to the new model we need to migrate them to content addressable storage. This means calculating the secure checksums for your current data. All your current images, tags, and containers are automatically migrated to the new foundation the first time you start Docker Engine 1.10. Before loading your -container, the daemon will calculate all needed checksums for your current data, -and after it has completed, all your images and tags will have brand new secure +container, the daemon calculates all needed checksums for your current data, +and after it has completed, all your images and tags have brand new secure IDs. **While this is simple operation, calculating SHA256 checksums for your files @@ -53,15 +53,15 @@ Docker daemon won’t be ready to respond to requests. ## Minimize migration time If you can accept this one time hit, then upgrading Docker Engine and restarting -the daemon will transparently migrate your images. However, if you want to +the daemon transparently migrates your images. However, if you want to minimize the daemon’s downtime, a migration utility can be run while your old daemon is still running. -This tool will find all your current images and calculate the checksums for +This tool finds all your current images and calculate the checksums for them. After you upgrade and restart the daemon, the checksum data of the -migrated images will already exist, freeing the daemon from that computation -work. If new images appeared between the migration and the upgrade, those will -be processed at time of upgrade to 1.10. +migrated images already exist, freeing the daemon from that computation +work. If new images appeared between the migration and the upgrade, those are +processed at time of upgrade to 1.10. [You can download the migration tool here.](https://github.com/docker/v1.10-migrator/releases) diff --git a/engine/reference/commandline/README.md b/engine/reference/commandline/README.md index aff6f8f155a..e97d18a4861 100644 --- a/engine/reference/commandline/README.md +++ b/engine/reference/commandline/README.md @@ -16,7 +16,7 @@ The output files are composed from two sources: - The **Extended Description** and **Examples** sections are pulled into the YAML from the files in [https://github.com/moby/moby/tree/master/docs/reference/commandline](https://github.com/moby/moby/tree/master/docs/reference/commandline) Specifically, the Markdown inside the `## Description` and `## Examples` - headings are parsed. Please submit corrections to the text in that repository. + headings are parsed. Submit corrections to the text in that repository. # Updating the YAML files diff --git a/engine/security/antivirus.md b/engine/security/antivirus.md index 903d9ed1a7d..9b9e6316dd5 100644 --- a/engine/security/antivirus.md +++ b/engine/security/antivirus.md @@ -10,7 +10,7 @@ in a way that causes Docker commands to hang. One way to reduce these problems is to add the Docker data directory (`/var/lib/docker` on Linux or `$Env:ProgramData` on Windows Server) to the antivirus's exclusion list. However, this comes with the trade-off that viruses -or malware in Docker images, writable layers of containers, or volumes will not -be detected. If you do choose to exclude Docker's data directory from background +or malware in Docker images, writable layers of containers, or volumes are not +detected. If you do choose to exclude Docker's data directory from background virus scanning, you may want to schedule a recurring task that stops Docker, scans the data directory, and restarts Docker. \ No newline at end of file diff --git a/engine/security/certificates.md b/engine/security/certificates.md index ed54ba1e0c5..bea1564678a 100644 --- a/engine/security/certificates.md +++ b/engine/security/certificates.md @@ -10,31 +10,34 @@ In [Running Docker with HTTPS](https.md), you learned that, by default, Docker runs via a non-networked Unix socket and TLS must be enabled in order to have the Docker client and the daemon communicate securely over HTTPS. TLS ensures authenticity of the registry endpoint and that traffic to/from registry is encrypted. -This article demonstrates how to ensure the traffic between the Docker registry (i.e., *a server*) and the Docker daemon (i.e., *a client*) traffic is encrypted and a properly authenticated using *certificate-based client-server authentication*. +This article demonstrates how to ensure the traffic between the Docker registry +server and the Docker daemon (a client of the registry server) is encrypted and +properly authenticated using *certificate-based client-server authentication*. -We will show you how to install a Certificate Authority (CA) root certificate +We show you how to install a Certificate Authority (CA) root certificate for the registry and how to set the client TLS certificate for verification. ## Understanding the configuration A custom certificate is configured by creating a directory under -`/etc/docker/certs.d` using the same name as the registry's hostname (e.g., -`localhost`). All `*.crt` files are added to this directory as CA roots. +`/etc/docker/certs.d` using the same name as the registry's hostname, such as +`localhost`. All `*.crt` files are added to this directory as CA roots. > **Note**: -> As of docker 1.13, on Linux any root certificates authorities will be merged -> in with the system defaults (i.e., host's root CA set). Prior to 1.13 and on -> Windows, the system default certificates will only be used when there are no -> custom root certificates provided. +> As of Docker 1.13, on Linux any root certificates authorities are merged +> with the system defaults, including as the host's root CA set. On prior +versions of Docker, and on Docker Enterprise Edition for Windows Server, +> the system default certificates are only used when no custom root certificates +> are configured. The presence of one or more `.key/cert` pairs indicates to Docker that there are custom certificates required for access to the desired repository. > **Note**: -> If there are multiple certificates, each will be tried in alphabetical -> order. If there is an authentication error (e.g., 403, 404, 5xx, etc.), Docker -> will continue to try with the next certificate. +> If multiple certificates exist, each is tried in alphabetical +> order. If there is a 4xx-level or 5xx-level authentication error, Docker +> continues to try with the next certificate. The following illustrates a configuration with custom certificates: @@ -54,14 +57,14 @@ creating an os-provided bundled certificate chain. ## Creating the client certificates -You will use OpenSSL's `genrsa` and `req` commands to first generate an RSA +Use OpenSSL's `genrsa` and `req` commands to first generate an RSA key and then use the key to create the certificate. $ openssl genrsa -out client.key 4096 $ openssl req -new -x509 -text -key client.key -out client.cert > **Note**: -> These TLS commands will only generate a working set of certificates on Linux. +> These TLS commands only generate a working set of certificates on Linux. > The version of OpenSSL in macOS is incompatible with the type of > certificate Docker requires. @@ -73,7 +76,7 @@ as client certificates. If a CA certificate is accidentally given the extension following error message: ``` -Missing key KEY_NAME for client certificate CERT_NAME. Note that CA certificates should use the extension .crt. +Missing key KEY_NAME for client certificate CERT_NAME. CA certificates should use the extension .crt. ``` If the Docker registry is accessed without a port number, do not add the port to the directory name. The following shows the configuration for a registry on default port 443 which is accessed with `docker login my-https.registry.example.com`: diff --git a/engine/security/https.md b/engine/security/https.md index b5d25038b6f..95dbe361e2c 100644 --- a/engine/security/https.md +++ b/engine/security/https.md @@ -14,9 +14,9 @@ If you need Docker to be reachable via the network in a safe manner, you can enable TLS by specifying the `tlsverify` flag and pointing Docker's `tlscacert` flag to a trusted CA certificate. -In the daemon mode, it will only allow connections from clients +In the daemon mode, it only allows connections from clients authenticated by a certificate signed by that CA. In the client mode, -it will only connect to servers with a certificate signed by that CA. +it only connects to servers with a certificate signed by that CA. > Advanced topic > @@ -24,13 +24,6 @@ it will only connect to servers with a certificate signed by that CA. > with OpenSSL, x509 and TLS before using it in production. {:.important} -> Only works on Linux -> -> These TLS commands will only generate a working set of certificates on Linux. -> macOS comes with a version of OpenSSL that is incompatible with the -> certificates that Docker requires. -{:.important} - ## Create a CA, server and client keys with OpenSSL > **Note**: replace all instances of `$HOST` in the following example with the @@ -63,9 +56,9 @@ First, on the **Docker daemon's host machine**, generate CA private and public k Common Name (e.g. server FQDN or YOUR name) []:$HOST Email Address []:Sven@home.org.au -Now that we have a CA, you can create a server key and certificate -signing request (CSR). Make sure that "Common Name" (i.e., server FQDN or YOUR -name) matches the hostname you will use to connect to Docker: +Now that you have a CA, you can create a server key and certificate +signing request (CSR). Make sure that "Common Name" matches the hostname you use +to connect to Docker: > **Note**: replace all instances of `$HOST` in the following example with the > DNS name of your Docker daemon's host. @@ -139,10 +132,10 @@ two certificate signing requests: $ rm -v client.csr server.csr -With a default `umask` of 022, your secret keys will be *world-readable* and +With a default `umask` of 022, your secret keys are *world-readable* and writable for you and your group. -In order to protect your keys from accidental damage, you will want to remove their +To protect your keys from accidental damage, remove their write permissions. To make them only readable by you, change file modes as follows: $ chmod -v 0400 ca-key.pem key.pem server-key.pem @@ -153,13 +146,13 @@ prevent accidental damage: $ chmod -v 0444 ca.pem server-cert.pem cert.pem Now you can make the Docker daemon only accept connections from clients -providing a certificate trusted by our CA: +providing a certificate trusted by your CA: $ dockerd --tlsverify --tlscacert=ca.pem --tlscert=server-cert.pem --tlskey=server-key.pem \ -H=0.0.0.0:2376 -To be able to connect to Docker and validate its certificate, you now -need to provide your client keys, certificates and trusted CA: +To connect to Docker and validate its certificate, provide your client keys, +certificates and trusted CA: > Run it on the client machine > @@ -177,7 +170,7 @@ need to provide your client keys, certificates and trusted CA: > Docker over TLS should run on TCP port 2376. > **Warning**: -> As shown in the example above, you don't have to run the `docker` client +> As shown in the example above, you don't need to run the `docker` client > with `sudo` or the `docker` group when you use certificate authentication. > That means anyone with the keys can give any instructions to your Docker > daemon, giving them root access to the machine hosting the daemon. Guard @@ -196,7 +189,7 @@ the files to the `.docker` directory in your home directory -- and set the $ export DOCKER_HOST=tcp://$HOST:2376 DOCKER_TLS_VERIFY=1 -Docker will now connect securely by default: +Docker now connects securely by default: $ docker ps @@ -219,7 +212,7 @@ Docker in various other modes by mixing the flags. - `tlsverify`, `tlscacert`, `tlscert`, `tlskey`: Authenticate with client certificate and authenticate server based on given CA -If found, the client will send its client certificate, so you just need +If found, the client sends its client certificate, so you just need to drop your keys into `~/.docker/{ca,cert,key}.pem`. Alternatively, if you want to store your keys in another location, you can specify that location using the environment variable `DOCKER_CERT_PATH`. diff --git a/engine/security/https/README.md b/engine/security/https/README.md index 74dae4a3a21..41e9fe22eaa 100644 --- a/engine/security/https/README.md +++ b/engine/security/https/README.md @@ -5,7 +5,7 @@ published: false This is an initial attempt to make it easier to test the examples in the https.md doc. -At this point, it has to be a manual thing, and I've been running it in boot2docker. +At this point, it is a manual thing, and I've been running it in boot2docker. My process is as following: @@ -26,4 +26,4 @@ Start another terminal: root@boot2docker:/# cd docker/docs/articles/https root@boot2docker:/# make client -The last will connect first with `--tls` and then with `--tlsverify`, both should succeed. +The last connects first with `--tls` and then with `--tlsverify`, both should succeed. diff --git a/engine/security/security.md b/engine/security/security.md index 4b24294f7b1..a18e1dbc45c 100644 --- a/engine/security/security.md +++ b/engine/security/security.md @@ -86,8 +86,8 @@ Docker daemon**. This is a direct consequence of some powerful Docker features. Specifically, Docker allows you to share a directory between the Docker host and a guest container; and it allows you to do so without limiting the access rights of the container. This means that you -can start a container where the `/host` directory will be the `/` directory -on your host; and the container will be able to alter your host filesystem +can start a container where the `/host` directory is the `/` directory +on your host; and the container can alter your host filesystem without any restriction. This is similar to how virtualization systems allow filesystem resource sharing. Nothing prevents you from sharing your root filesystem (or even your root block device) with a virtual machine. @@ -107,10 +107,10 @@ use traditional UNIX permission checks to limit access to the control socket. You can also expose the REST API over HTTP if you explicitly decide to do so. -However, if you do that, being aware of the above mentioned security -implication, you should ensure that it will be reachable only from a -trusted network or VPN; or protected with e.g., `stunnel` and client SSL -certificates. You can also secure them with [HTTPS and +However, if you do that, be aware of the above mentioned security +implications. Ensure that it is reachable only from a +trusted network or VPN or protected with a mechanism such as `stunnel` and +client SSL certificates. You can also secure API endpoints with [HTTPS and certificates](https.md). The daemon is also potentially vulnerable to other inputs, such as image @@ -121,12 +121,6 @@ toward privilege separation. As of Docker 1.10.0, all images are stored and accessed by the cryptographic checksums of their contents, limiting the possibility of an attacker causing a collision with an existing image. -Eventually, it is expected that the Docker daemon will run restricted -privileges, delegating operations to well-audited sub-processes, -each with its own (very limited) scope of Linux capabilities, -virtual network setup, filesystem management, etc. That is, most likely, -pieces of the Docker engine itself will run inside of containers. - Finally, if you run Docker on a server, it is recommended to run exclusively Docker on the server, and move all other services within containers controlled by Docker. Of course, it is fine to keep your @@ -140,26 +134,24 @@ capabilities. What does that mean? Capabilities turn the binary "root/non-root" dichotomy into a fine-grained access control system. Processes (like web servers) that -just need to bind on a port below 1024 do not have to run as root: they +just need to bind on a port below 1024 do not need to run as root: they can just be granted the `net_bind_service` capability instead. And there are many other capabilities, for almost all the specific areas where root privileges are usually needed. This means a lot for container security; let's see why! -Your average server (bare metal or virtual machine) needs to run a bunch -of processes as root. Those typically include SSH, cron, syslogd; -hardware management tools (e.g., load modules), network configuration -tools (e.g., to handle DHCP, WPA, or VPNs), and much more. A container is -very different, because almost all of those tasks are handled by the -infrastructure around the container: +Typical servers run several processes as `root`, including the SSH daemon, +`cron` daemon, logging daemons, kernel modules, network configuration tools, +and more. A container is different, because almost all of those tasks are +handled by the infrastructure around the container: - - SSH access will typically be managed by a single server running on + - SSH access are typically managed by a single server running on the Docker host; - `cron`, when necessary, should run as a user process, dedicated and tailored for the app that needs its scheduling service, rather than as a platform-wide facility; - - log management will also typically be handed to Docker, or by + - log management is also typically handed to Docker, or to third-party services like Loggly or Splunk; - hardware management is irrelevant, meaning that you never need to run `udevd` or equivalent daemons within @@ -171,7 +163,7 @@ infrastructure around the container: is specifically engineered to behave like a router or firewall, of course). -This means that in most cases, containers will not need "real" root +This means that in most cases, containers do not need "real" root privileges *at all*. And therefore, containers can run with a reduced capability set; meaning that "root" within a container has much less privileges than the real "root". For instance, it is possible to: @@ -185,11 +177,11 @@ privileges than the real "root". For instance, it is possible to: - and many others. This means that even if an intruder manages to escalate to root within a -container, it will be much harder to do serious damage, or to escalate +container, it is much harder to do serious damage, or to escalate to the host. -This won't affect regular web apps; but malicious users will find that -the arsenal at their disposal has shrunk considerably! By default Docker +This doesn't affect regular web apps, but reduces the vectors of attack by +malicious users considerably. By default Docker drops all capabilities except [those needed](https://github.com/moby/moby/blob/master/oci/defaults.go#L14-L30), a whitelist instead of a blacklist approach. You can see a full list of @@ -218,8 +210,8 @@ While Docker currently only enables capabilities, it doesn't interfere with the other systems. This means that there are many different ways to harden a Docker host. Here are a few examples. - - You can run a kernel with GRSEC and PAX. This will add many safety - checks, both at compile-time and run-time; it will also defeat many + - You can run a kernel with GRSEC and PAX. This adds many safety + checks, both at compile-time and run-time; it also defeats many exploits, thanks to techniques like address randomization. It doesn't require Docker-specific configuration, since those security features apply system-wide, independent of containers. @@ -231,10 +223,9 @@ harden a Docker host. Here are a few examples. - You can define your own policies using your favorite access control mechanism. -Just like there are many third-party tools to augment Docker containers -with e.g., special network topologies or shared filesystems, you can -expect to see tools to harden existing Docker containers without -affecting Docker's core. +Just as you can use third-party tools to augment Docker containers, including +special network topologies or shared filesystems, tools exist to harden Docker +containers without the need to modify Docker itself. As of Docker 1.10 User Namespaces are supported directly by the docker daemon. This feature allows for the root user in a container to be mapped @@ -245,21 +236,19 @@ by default. Refer to the [daemon command](../reference/commandline/dockerd.md#daemon-user-namespace-options) in the command line reference for more information on this feature. Additional information on the implementation of User Namespaces in Docker -can be found in this blog post. +can be found in +[this blog post](https://integratedcode.us/2015/10/13/user-namespaces-have-arrived-in-docker/). ## Conclusions Docker containers are, by default, quite secure; especially if you take -care of running your processes inside the containers as non-privileged -users (i.e., non-`root`). +run your processes as non-privileged users inside the container. You can add an extra layer of safety by enabling AppArmor, SELinux, -GRSEC, or your favorite hardening solution. +GRSEC, or another appropriate hardening system. -Last but not least, if you see interesting security features in other -containerization systems, these are simply kernels features that may -be implemented in Docker as well. We welcome users to submit issues, -pull requests, and communicate via the mailing list. +If you think of ways to make docker more secure, we welcome feature requests, +pull requests, or comments on the Docker community forums. ## Related information diff --git a/engine/security/trust/content_trust.md b/engine/security/trust/content_trust.md index 8f08cafb3f9..a2e5f972a30 100644 --- a/engine/security/trust/content_trust.md +++ b/engine/security/trust/content_trust.md @@ -1,6 +1,6 @@ --- description: Enabling content trust in Docker -keywords: content, trust, security, docker, documentation +keywords: content, trust, security, docker, documentation title: Content trust in Docker --- @@ -130,7 +130,7 @@ read how to [manage keys for content trust](trust_key_mng.md). ## Survey of typical content trust operations This section surveys the typical trusted operations users perform with Docker -images. Specifically, we will be going through the following steps to help us exercise +images. Specifically, we go through the following steps to help us exercise these various trusted operations: * Build and push an unsigned image @@ -167,7 +167,7 @@ FROM docker/trusttest:latest RUN echo ``` -In order to build a container successfully using this Dockerfile, one can do: +To build a container successfully using this Dockerfile, one can do: ``` $ docker build --disable-content-trust -t /nottrusttest:latest . @@ -219,10 +219,11 @@ Enter passphrase for new repository key with id docker.io//trusttest ( Repeat passphrase for new repository key with id docker.io//trusttest (3a932f1): Finished initializing "docker.io//trusttest" ``` -When you push your first tagged image with content trust enabled, the `docker` + +When you push your first tagged image with content trust enabled, the `docker` client recognizes this is your first push and: - - alerts you that it will create a new root key + - alerts you that it is creating a new root key - requests a passphrase for the root key - generates a root key in the `~/.docker/trust` directory - requests a passphrase for the repository key diff --git a/engine/security/trust/deploying_notary.md b/engine/security/trust/deploying_notary.md index 1d0a230958b..df3aad7157f 100644 --- a/engine/security/trust/deploying_notary.md +++ b/engine/security/trust/deploying_notary.md @@ -23,6 +23,6 @@ for [Notary](https://github.com/docker/notary#using-notary) depending on which o ## If you want to use Notary in production -Please check back here for instructions after Notary Server has an official +Check back here for instructions after Notary Server has an official stable release. To get a head start on deploying Notary in production, see [the Notary repository](https://github.com/docker/notary). diff --git a/engine/security/trust/trust_automation.md b/engine/security/trust/trust_automation.md index 0b42a18ce68..8f571961932 100644 --- a/engine/security/trust/trust_automation.md +++ b/engine/security/trust/trust_automation.md @@ -1,6 +1,6 @@ --- description: Automating content push pulls with trust -keywords: trust, security, docker, documentation, automation +keywords: trust, security, docker, documentation, automation title: Automation with content trust --- @@ -35,7 +35,7 @@ latest: digest: sha256:d149ab53f871 size: 3355 Signing and pushing trust metadata ``` -When working directly with the Notary client, it will use its [own set of environment variables](/notary/reference/client-config.md#environment-variables-optional). +When working directly with the Notary client, it uses its [own set of environment variables](/notary/reference/client-config.md#environment-variables-optional). ## Building with content trust diff --git a/engine/security/trust/trust_delegation.md b/engine/security/trust/trust_delegation.md index 011c1c1f94c..65803ed669b 100644 --- a/engine/security/trust/trust_delegation.md +++ b/engine/security/trust/trust_delegation.md @@ -33,10 +33,10 @@ available on your path Notary server used for images in Docker Hub. For more detailed information about how to use Notary outside of the default -Docker Content Trust use cases, please refer to the +Docker Content Trust use cases, refer to the [Notary CLI documentation](/notary/getting_started.md). -Note that when publishing and listing delegation changes using the Notary client, +When publishing and listing delegation changes using the Notary client, your Docker Hub credentials are required. ## Generating delegation keys @@ -58,11 +58,10 @@ e is 65537 (0x10001) ``` -They should keep `delegation.key` private - this is what they will use to sign -tags. +They should keep `delegation.key` private because it is used to sign tags. Then they need to generate an x509 certificate containing the public key, which is -what they will give to you. Here is the command to generate a CSR (certificate +what you need from them. Here is the command to generate a CSR (certificate signing request): ``` @@ -84,15 +83,15 @@ by a CA. If your repository was created using a version of Docker Engine prior to 1.11, then before adding any delegations, you should rotate the snapshot key to the server -so that collaborators will not require your snapshot key to sign and publish tags: +so that collaborators don't need your snapshot key to sign and publish tags: ``` $ notary key rotate docker.io// snapshot -r ``` -This tells Notary to rotate a key for your particular image repository - note that -you must include the `docker.io/` prefix. `snapshot -r` specifies that you want -to rotate the snapshot key specifically, and you want the server to manage it (`-r` +This tells Notary to rotate a key for your particular image repository. The +`docker.io/` prefix is required. `snapshot -r` specifies that you want +to rotate the snapshot key and that you want the server to manage it (`-r` stands for "remote"). When adding a delegation, your must acquire @@ -108,14 +107,14 @@ $ notary publish docker.io// ``` The preceding example illustrates a request to add the delegation -`targets/releases` to the image repository, if it doesn't exist. Be sure to use +`targets/releases` to the image repository, if it doesn't exist. Be sure to use `targets/releases` - Notary supports multiple delegation roles, so if you mistype -the delegation name, the Notary CLI will not error. However, Docker Engine +the delegation name, the Notary CLI does not error. However, Docker Engine supports reading only from `targets/releases`. It also adds the collaborator's public key to the delegation, enabling them to sign the `targets/releases` delegation so long as they have the private key corresponding -to this public key. The `--all-paths` flag tells Notary not to restrict the tag +to this public key. The `--all-paths` flag tells Notary not to restrict the tag names that can be signed into `targets/releases`, which we highly recommend for `targets/releases`. @@ -151,15 +150,15 @@ $ notary delegation remove docker.io// targets/releases 729 Removal of delegation role targets/releases with keys [729c7094a8210fd1e780e7b17b7bb55c9a28a48b871b07f65d97baf93898523a], to repository "docker.io//" staged for next publish. ``` -The revocation will take effect as soon as you publish: +The revocation takes effect as soon as you publish: ``` $ notary publish docker.io// ``` -Note that by removing all the keys from the `targets/releases` delegation, the -delegation (and any tags that are signed into it) is removed. That means that -these tags will all be deleted, and you may end up with older, legacy tags that +By removing all the keys from the `targets/releases` delegation, the +delegation (and any tags that are signed into it) is removed. That means that +these tags are all deleted, and you may end up with older, legacy tags that were signed directly by the targets key. ## Removing the `targets/releases` delegation entirely from a repository @@ -197,19 +196,19 @@ $ notary key import delegation.key --role user where `delegation.key` is the file containing your PEM-encoded private key. After you have done so, running `docker push` on any repository that -includes your key in the `targets/releases` delegation will automatically sign +includes your key in the `targets/releases` delegation automatically signs tags using this imported key. ## `docker push` behavior When running `docker push` with Docker Content Trust, Docker Engine -will attempt to sign and push with the `targets/releases` delegation if it exists. -If it does not, the targets key will be used to sign the tag, if the key is available. +attempts to sign and push with the `targets/releases` delegation if it exists. +If it does not, the targets key is used to sign the tag, if the key is available. ## `docker pull` and `docker build` behavior When running `docker pull` or `docker build` with Docker Content Trust, Docker -Engine will pull tags only signed by the `targets/releases` delegation role or +Engine pulls tags only signed by the `targets/releases` delegation role or the legacy tags that were signed directly with the `targets` key. ## Related information diff --git a/engine/security/trust/trust_key_mng.md b/engine/security/trust/trust_key_mng.md index 18ad71aa925..6e797223b8c 100644 --- a/engine/security/trust/trust_key_mng.md +++ b/engine/security/trust/trust_key_mng.md @@ -64,7 +64,8 @@ Docker Content Trust can store and sign with root keys from a Yubikey 4. The Yubikey is prioritized over keys stored in the filesystem. When you initialize a new repository with content trust, Docker Engine looks for a root key locally. If a key is not found and the Yubikey 4 exists, Docker Engine creates a root key in the -Yubikey 4. Please consult the [Notary documentation](/notary/advanced_usage.md#use-a-yubikey) for more details. +Yubikey 4. Consult the [Notary documentation](/notary/advanced_usage.md#use-a-yubikey) +for more details. Prior to Docker Engine 1.11, this feature was only in the experimental branch. diff --git a/engine/security/trust/trust_sandbox.md b/engine/security/trust/trust_sandbox.md index 97307cf376e..775cf37d636 100644 --- a/engine/security/trust/trust_sandbox.md +++ b/engine/security/trust/trust_sandbox.md @@ -16,7 +16,7 @@ overview](content_trust.md). ### Prerequisites These instructions assume you are running in Linux or macOS. You can run -this sandbox on a local machine or on a virtual machine. You will need to +this sandbox on a local machine or on a virtual machine. You need to have privileges to run docker commands on your local machine or in the VM. This sandbox requires you to install two Docker tools: Docker Engine >= 1.10.0 @@ -25,8 +25,6 @@ and Docker Compose >= 1.6.0. To install the Docker Engine, choose from the Docker Compose, see the [detailed instructions here](/compose/install/). -Finally, you'll need to have a text editor installed on your local system or VM. - ## What is in the sandbox? If you are just using trust out-of-the-box you only need your Docker Engine @@ -39,7 +37,7 @@ production trust environment, and sets up these additional components. | Registry server | A local registry service. | | Notary server | The service that does all the heavy-lifting of managing trust | -This means you will be running your own content trust (Notary) server and registry. +This means you run your own content trust (Notary) server and registry. If you work exclusively with the Docker Hub, you would not need with these components. They are built into the Docker Hub for you. For the sandbox, however, you build your own entire, mock production environment. @@ -48,19 +46,19 @@ Within the `trustsandbox` container, you interact with your local registry rathe than the Docker Hub. This means your everyday image repositories are not used. They are protected while you play. -When you play in the sandbox, you'll also create root and repository keys. The +When you play in the sandbox, you also create root and repository keys. The sandbox is configured to store all the keys and files inside the `trustsandbox` container. Since the keys you create in the sandbox are for play only, destroying the container destroys them as well. -By using a docker-in-docker image for the `trustsandbox` container, you will also -not pollute your real docker daemon cache with any images you push and pull. The -images will instead be stored in an anonymous volume attached to this container, +By using a docker-in-docker image for the `trustsandbox` container, you also +don't pollute your real Docker daemon cache with any images you push and pull. +The images are stored in an anonymous volume attached to this container, and can be destroyed after you destroy the container. ## Build the sandbox -In this section, you'll use Docker Compose to specify how to set up and link together +In this section, you use Docker Compose to specify how to set up and link together the `trustsandbox` container, the Notary server, and the Registry server. @@ -120,13 +118,13 @@ the `trustsandbox` container, the Notary server, and the Registry server. $ docker-compose up -d The first time you run this, the docker-in-docker, Notary server, and registry - images will be first downloaded from Docker Hub. + images are downloaded from Docker Hub. ## Playing in the sandbox Now that everything is setup, you can go into your `trustsandbox` container and -start testing Docker content trust. From your host machine, obtain a shell +start testing Docker content trust. From your host machine, obtain a shell in the `trustsandbox` container. $ docker exec -it trustsandbox sh @@ -134,7 +132,7 @@ in the `trustsandbox` container. ### Test some trust operations -Now, you'll pull some images from within the `trustsandbox` container. +Now, pull some images from within the `trustsandbox` container. 1. Download a `docker` image to test with. @@ -192,7 +190,10 @@ Now, you'll pull some images from within the `trustsandbox` container. Finished initializing "sandboxregistry:5000/test/trusttest" Successfully signed "sandboxregistry:5000/test/trusttest":latest - Because you are pushing this repository for the first time, docker creates new root and repository keys and asks you for passphrases with which to encrypt them. If you push again after this, it will only ask you for repository passphrase so it can decrypt the key and sign again. + Because you are pushing this repository for the first time, Docker creates + new root and repository keys and asks you for passphrases with which to + encrypt them. If you push again after this, it only asks you for repository + passphrase so it can decrypt the key and sign again. 7. Try pulling the image you just pushed: @@ -211,33 +212,35 @@ What happens when data is corrupted and you try to pull it when trust is enabled? In this section, you go into the `sandboxregistry` and tamper with some data. Then, you try and pull it. -1. Leave the `trustsandbox` shell and container running. +1. Leave the `trustsandbox` shell and container running. -2. Open a new interactive terminal from your host, and obtain a shell into the -`sandboxregistry` container. +2. Open a new interactive terminal from your host, and obtain a shell into the + `sandboxregistry` container. $ docker exec -it sandboxregistry bash root@65084fc6f047:/# -3. List the layers for the `test/trusttest` image you pushed: +3. List the layers for the `test/trusttest` image you pushed: - root@65084fc6f047:/# ls -l /var/lib/registry/docker/registry/v2/repositories/test/trusttest/_layers/sha256 - total 12 - drwxr-xr-x 2 root root 4096 Jun 10 17:26 a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4 - drwxr-xr-x 2 root root 4096 Jun 10 17:26 aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042 - drwxr-xr-x 2 root root 4096 Jun 10 17:26 cc7629d1331a7362b5e5126beb5bf15ca0bf67eb41eab994c719a45de53255cd + ```bash + root@65084fc6f047:/# ls -l /var/lib/registry/docker/registry/v2/repositories/test/trusttest/_layers/sha256 + total 12 + drwxr-xr-x 2 root root 4096 Jun 10 17:26 a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4 + drwxr-xr-x 2 root root 4096 Jun 10 17:26 aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042 + drwxr-xr-x 2 root root 4096 Jun 10 17:26 cc7629d1331a7362b5e5126beb5bf15ca0bf67eb41eab994c719a45de53255cd + ``` -4. Change into the registry storage for one of those layers (note that this is in a different directory): +4. Change into the registry storage for one of those layers (this is in a different directory): root@65084fc6f047:/# cd /var/lib/registry/docker/registry/v2/blobs/sha256/aa/aac0c133338db2b18ff054943cee3267fe50c75cdee969aed88b1992539ed042 -5. Add malicious data to one of the `trusttest` layers: +5. Add malicious data to one of the `trusttest` layers: root@65084fc6f047:/# echo "Malicious data" > data -6. Go back to your `trustsandbox` terminal. +6. Go back to your `trustsandbox` terminal. -7. List the `trusttest` image. +7. List the `trusttest` image. / # docker images | grep trusttest REPOSITORY TAG IMAGE ID CREATED SIZE @@ -245,7 +248,7 @@ data. Then, you try and pull it. sandboxregistry:5000/test/trusttest latest cc7629d1331a 11 months ago 5.025 MB sandboxregistry:5000/test/trusttest cc7629d1331a 11 months ago 5.025 MB -8. Remove the `trusttest:latest` image from our local cache. +8. Remove the `trusttest:latest` image from our local cache. / # docker rmi -f cc7629d1331a Untagged: docker/trusttest:latest @@ -259,7 +262,7 @@ data. Then, you try and pull it. Docker to attempt to download the tampered image from the registry and reject it because it is invalid. -8. Pull the image again. This will download the image from the registry, because we don't have it cached. +8. Pull the image again. This downloads the image from the registry, because we don't have it cached. / # docker pull sandboxregistry:5000/test/trusttest Using default tag: latest @@ -270,8 +273,8 @@ data. Then, you try and pull it. a3ed95caeb02: Download complete error pulling image configuration: unexpected EOF - You'll see the pull did not complete because the trust system was - unable to verify the image. + The pull did not complete because the trust system couldn't verify the + image. ## More play in the sandbox diff --git a/engine/security/userns-remap.md b/engine/security/userns-remap.md index 9bf0e5d5805..cbcf38ef11f 100644 --- a/engine/security/userns-remap.md +++ b/engine/security/userns-remap.md @@ -66,7 +66,7 @@ avoid these situations. ## Prerequisites 1. The subordinate UID and GID ranges must be associated with an existing user, - even though the association is an implementation detail. The user will own + even though the association is an implementation detail. The user owns the namespaced storage directories under `/var/lib/docker/`. If you don't want to use an existing user, Docker can create one for you and use that. If you want to use an existing username or user ID, it must already exist. @@ -97,8 +97,8 @@ avoid these situations. testuser:231072:65536 ``` - This means that user-namespaced processes started by `testuser` will be - owned by host UID `231072` (which will look like UID `0` inside the + This means that user-namespaced processes started by `testuser` are + owned by host UID `231072` (which looks like UID `0` inside the namespace) through 296608 (231072 + 65536). These ranges should not overlap, to ensure that namespaced processes cannot access each other's namespaces. @@ -107,26 +107,26 @@ avoid these situations. avoid overlap. If you want to use the `dockremap` user automatically created by Docker, - you'll need to check for the `dockremap` entry in these files **after** + check for the `dockremap` entry in these files **after** configuring and restarting Docker. 3. If there are any locations on the Docker host where the unprivileged user needs to write, adjust the permissions of those locations accordingly. This is also true if you want to use the `dockremap` user - automatically created by Docker, but you won't be able to modify the + automatically created by Docker, but you can't modify the permissions until after configuring and restarting Docker. -4. Enabling `userns-remap` will effectively mask existing image and container +4. Enabling `userns-remap` effectively masks existing image and container layers, as well as other Docker objects within `/var/lib/docker/`. This is because Docker needs to adjust the ownership of these resources and actually stores them in a subdirectory within `/var/lib/docker/`. It is best to enable this feature on a new Docker installation rather than an existing one. - Along the same lines, if you disable `userns-remap` you will not see any + Along the same lines, if you disable `userns-remap` you can't access any of the resources created while it was enabled. 5. Check the [limitations](#user-namespace-known-restrictions) on user - namespaces to be sure your use case will be possible. + namespaces to be sure your use case is possible. ## Enable userns-remap on the daemon @@ -140,11 +140,11 @@ $ dockerd --userns-remap="testuser:testuser" ``` 1. Edit `/etc/docker/daemon.json`. Assuming the file was previously empty, the - following entry will enable `userns-remap` using user and group called + following entry enables `userns-remap` using user and group called `testuser`. You can address the user and group by ID or name. You only need to specify the group name or ID if it is different from the user name or ID. If you provide both the user and group name or ID, separate them by a colon - (`:`) character. The following formats will all work for the value, assuming + (`:`) character. The following formats all work for the value, assuming the UID and GID of `testuser` are `1001`: - `testuser` @@ -230,7 +230,7 @@ $ dockerd --userns-remap="testuser:testuser" The directories which are owned by the remapped user are used instead of the same directories directly beneath `/var/lib/docker/` and the unused versions (such as `/var/lib/docker/tmp/` in the example here) - can be removed. Docker will not use them while `userns-remap` is + can be removed. Docker does not use them while `userns-remap` is enabled. ## Disable namespace remapping for a container @@ -264,5 +264,5 @@ While the root user inside a user-namespaced container process has many of the expected privileges of the superuser within the container, the Linux kernel imposes restrictions based on internal knowledge that this is a user-namespaced process. One notable restriction is the inability to use the `mknod` command. -Permission will be denied for device creation within the container when run by +Permission is denied for device creation within the container when run by the `root` user. diff --git a/engine/static_files/README.md b/engine/static_files/README.md index 3caa5d663df..a2cac2c028b 100644 --- a/engine/static_files/README.md +++ b/engine/static_files/README.md @@ -5,7 +5,7 @@ published: false Static files dir ================ -Files you put in /static_files/ will be copied to the web visible /_static/ +Files you put in /static_files/ are copied to the web visible /_static/ Be careful not to override pre-existing static files from the template. diff --git a/engine/swarm/admin_guide.md b/engine/swarm/admin_guide.md index a40eb27f525..18dcf2ca708 100644 --- a/engine/swarm/admin_guide.md +++ b/engine/swarm/admin_guide.md @@ -8,7 +8,7 @@ title: Administer and maintain a swarm of Docker Engines When you run a swarm of Docker Engines, **manager nodes** are the key components for managing the swarm and storing the swarm state. It is important to -understand some key features of manager nodes in order to properly deploy and +understand some key features of manager nodes to properly deploy and maintain the swarm. Refer to [How nodes work](/engine/swarm/how-swarm-mode-works/nodes.md) @@ -35,8 +35,8 @@ operations are subject to the same constraints as state replication. ### Maintain the quorum of managers If the swarm loses the quorum of managers, the swarm cannot perform management -tasks. If your swarm has multiple managers, always have more than two. In order -to maintain quorum, a majority of managers must be available. An odd number of +tasks. If your swarm has multiple managers, always have more than two. +To maintain quorum, a majority of managers must be available. An odd number of managers is recommended, because the next even number does not make the quorum easier to keep. For instance, whether you have 3 or 4 managers, you can still only lose 1 manager and maintain the quorum. If you have 5 or 6 managers, you @@ -52,7 +52,7 @@ troubleshooting steps if you do lose the quorum of managers. ## Configure the manager to advertise on a static IP address -When initiating a swarm, you have to specify the `--advertise-addr` flag to +When initiating a swarm, you must specify the `--advertise-addr` flag to advertise your address to other manager nodes in the swarm. For more information, see [Run Docker Engine in swarm mode](/engine/swarm/swarm-mode.md#configure-the-advertise-address). Because manager nodes are meant to be a stable component of the infrastructure, you should use a *fixed @@ -95,7 +95,7 @@ impossible to demote the last manager node. This ensures you maintain access to the swarm and that the swarm can still process requests. Scaling down to a single manager is an unsafe operation and is not recommended. If the last node leaves the swarm unexpectedly during the demote operation, the -swarm will become unavailable until you reboot the node or restart with +swarm becomes unavailable until you reboot the node or restart with `--force-new-cluster`. You manage swarm membership with the `docker swarm` and `docker node` @@ -144,12 +144,11 @@ assigning tasks to the node. ## Add worker nodes for load balancing [Add nodes to the swarm](/engine/swarm/join-nodes.md) to balance your swarm's -load. Replicated service tasks will be distributed across the swarm as evenly as +load. Replicated service tasks are distributed across the swarm as evenly as possible over time, as long as the worker nodes are matched to the requirements of the services. When limiting a service to run on only specific types of nodes, such as nodes with a specific number of CPUs or amount of memory, remember that -worker nodes that do not meet these requirements will not be able to run these -tasks. +worker nodes that do not meet these requirements cannot run these tasks. ## Monitor swarm health @@ -241,12 +240,12 @@ you demote or remove a manager. Docker manager nodes store the swarm state and manager logs in the `/var/lib/docker/swarm/` directory. In 1.13 and higher, this data includes the -keys used to encrypt the Raft logs. Without these keys, you will not be able -to restore the swarm. +keys used to encrypt the Raft logs. Without these keys, you cannot restore the +swarm. You can back up the swarm using any manager. Use the following procedure. -1. If the swarm has auto-lock enabled, you will need the unlock key in order +1. If the swarm has auto-lock enabled, you need the unlock key to restore the swarm from backup. Retrieve the unlock key if necessary and store it in a safe location. If you are unsure, read [Lock your swarm to protect its encryption key](/engine/swarm/swarm_manager_locking.md). @@ -254,9 +253,8 @@ You can back up the swarm using any manager. Use the following procedure. 2. Stop Docker on the manager before backing up the data, so that no data is being changed during the backup. It is possible to take a backup while the manager is running (a "hot" backup), but this is not recommended and your - results will be less predictable when restoring. While the manager is down, - other nodes will continue generating swarm data that will not be part of - this backup. + results are less predictable when restoring. While the manager is down, + other nodes continue generating swarm data that is not part of this backup. > **Note**: Be sure to maintain the quorum of swarm managers. During the > time that a manager is shut down, your swarm is more vulnerable to @@ -279,7 +277,7 @@ After backing up the swarm as described in [Back up the swarm](#back-up-the-swarm), use the following procedure to restore the data to a new swarm. -1. Shut down Docker on the target host machine where the swarm will be restored. +1. Shut down Docker on the target host machine for the restored swarm. 3. Remove the contents of the `/var/lib/docker/swarm` directory on the new swarm. @@ -287,13 +285,13 @@ restore the data to a new swarm. 4. Restore the `/var/lib/docker/swarm` directory with the contents of the backup. - > **Note**: The new node will use the same encryption key for on-disk + > **Note**: The new node uses the same encryption key for on-disk > storage as the old one. It is not possible to change the on-disk storage > encryption keys at this time. > > In the case of a swarm with auto-lock enabled, the unlock key is also the - > same as on the old swarm, and the unlock key will be needed to - > restore. + > same as on the old swarm, and the unlock key is needed to restore the + > swarm. 5. Start Docker on the new node. Unlock the swarm if necessary. Re-initialize the swarm using the following command, so that this node does not attempt @@ -321,7 +319,7 @@ restore the data to a new swarm. Swarm is resilient to failures and the swarm can recover from any number of temporary node failures (machine reboots or crash with restart) or other transient errors. However, a swarm cannot automatically recover if it loses a -quorum. Tasks on existing worker nodes will continue to run, but administrative +quorum. Tasks on existing worker nodes continue to run, but administrative tasks are not possible, including scaling or updating services and joining or removing nodes from the swarm. The best way to recover is to bring the missing manager nodes back online. If that is not possible, continue reading for some @@ -358,7 +356,7 @@ When you run the `docker swarm init` command with the `--force-new-cluster` flag, the Docker Engine where you run the command becomes the manager node of a single-node swarm which is capable of managing and running services. The manager has all the previous information about services and tasks, worker nodes are -still part of the swarm, and services are still running. You will need to add or +still part of the swarm, and services are still running. You need to add or re-add manager nodes to achieve your previous task distribution and ensure that you have enough managers to maintain high availability and prevent losing the quorum. @@ -377,9 +375,9 @@ is eventual balance, with minimal disruption to the end user. In Docker 1.13 and higher, you can use the `--force` or `-f` flag with the `docker service update` command to force the service to redistribute its tasks -across the available worker nodes. This will cause the service tasks to restart. +across the available worker nodes. This causes the service tasks to restart. Client applications may be disrupted. If you have configured it, your service -will use a [rolling update](/engine/swarm/swarm-tutorial.md#rolling-update). +uses a [rolling update](/engine/swarm/swarm-tutorial.md#rolling-update). If you use an earlier version and you want to achieve an even balance of load across workers and don't mind disrupting running tasks, you can force your swarm diff --git a/engine/swarm/configs.md b/engine/swarm/configs.md index 28ee31b19a6..6f871138230 100644 --- a/engine/swarm/configs.md +++ b/engine/swarm/configs.md @@ -57,14 +57,14 @@ configs. You cannot remove a config that a running service is using. See [Rotate a config](configs.md#example-rotate-a-config) for a way to remove a config without disrupting running services. -In order to update or roll back configs more easily, consider adding a version +To update or roll back configs more easily, consider adding a version number or date to the config name. This is made easier by the ability to control the mount point of the config within a given container. To update a stack, make changes to your Compose file, then re-run `docker stack deploy -c `. If you use a new config in -that file, your services will start using them. Keep in mind that configurations -are immutable, so you won't be able to change the file for an existing service. +that file, your services start using them. Keep in mind that configurations +are immutable, so you can't change the file for an existing service. Instead, you create a new config to use a different file You can run `docker stack rm` to stop the app and take down the stack. This @@ -175,7 +175,7 @@ real-world example, continue to ``` 7. Repeat steps 3 and 4 again, verifying that the service no longer has access - to the config. The container ID will be different, because the + to the config. The container ID is different, because the `service update` command redeploys the service. ```none @@ -280,8 +280,8 @@ generate the site key and certificate, name the files `site.key` and ``` 3. Configure the root CA. Edit a new file called `root-ca.cnf` and paste - the following contents into it. This constrains the root CA to only be - able to sign leaf certificates and not intermediate CAs. + the following contents into it. This constrains the root CA to only sign + leaf certificates and not intermediate CAs. ```none [root_ca] @@ -336,13 +336,13 @@ generate the site key and certificate, name the files `site.key` and ``` 9. The `site.csr` and `site.cnf` files are not needed by the Nginx service, but - you will need them if you want to generate a new site certificate. Protect + you need them if you want to generate a new site certificate. Protect the `root-ca.key` file. #### Configure the Nginx container 1. Produce a very basic Nginx configuration that serves static files over HTTPS. - The TLS certificate and key will be stored as Docker secrets so that they + The TLS certificate and key are stored as Docker secrets so that they can be rotated easily. In the current directory, create a new file called `site.conf` with the @@ -364,7 +364,7 @@ generate the site key and certificate, name the files `site.key` and 2. Create two secrets, representing the key and the certificate. You can store any file as a secret as long as it is smaller than 500 KB. This allows you - to decouple the key and certificate from the services that will use them. + to decouple the key and certificate from the services that use them. In these examples, the secret name and the file name are the same. ```bash @@ -447,7 +447,7 @@ generate the site key and certificate, name the files `site.key` and

If you see this page, the nginx web server is successfully installed and working. Further configuration is required.

-

For online documentation and support please refer to +

For online documentation and support, refer to nginx.org.
Commercial support is available at nginx.com.

diff --git a/engine/swarm/how-swarm-mode-works/nodes.md b/engine/swarm/how-swarm-mode-works/nodes.md index 0a6985bc9f3..268ec6ad027 100644 --- a/engine/swarm/how-swarm-mode-works/nodes.md +++ b/engine/swarm/how-swarm-mode-works/nodes.md @@ -31,8 +31,8 @@ Manager nodes handle cluster management tasks: Using a [Raft](https://raft.github.io/raft.pdf) implementation, the managers maintain a consistent internal state of the entire swarm and all the services running on it. For testing purposes it is OK to run a swarm with a single -manager. If the manager in a single-manager swarm fails, your services will -continue to run, but you will need to create a new cluster to recover. +manager. If the manager in a single-manager swarm fails, your services +continue to run, but you need to create a new cluster to recover. To take advantage of swarm mode's fault-tolerance features, Docker recommends you implement an odd number of nodes according to your organization's @@ -42,7 +42,7 @@ from the failure of a manager node without downtime. * A three-manager swarm tolerates a maximum loss of one manager. * A five-manager swarm tolerates a maximum simultaneous loss of two manager nodes. -* An `N` manager cluster will tolerate the loss of at most +* An `N` manager cluster tolerates the loss of at most `(N-1)/2` managers. * Docker recommends a maximum of seven manager nodes for a swarm. @@ -58,7 +58,7 @@ state, make scheduling decisions, or serve the swarm mode HTTP API. You can create a swarm of one manager node, but you cannot have a worker node without at least one manager node. By default, all managers are also workers. In a single manager node cluster, you can run commands like `docker service -create` and the scheduler will place all tasks on the local Engine. +create` and the scheduler places all tasks on the local Engine. To prevent the scheduler from placing tasks on a manager node in a multi-node swarm, set the availability for the manager node to `Drain`. The scheduler diff --git a/engine/swarm/how-swarm-mode-works/pki.md b/engine/swarm/how-swarm-mode-works/pki.md index bed2c6ca46e..36ef4a1724b 100644 --- a/engine/swarm/how-swarm-mode-works/pki.md +++ b/engine/swarm/how-swarm-mode-works/pki.md @@ -61,7 +61,7 @@ reference for details. ## Rotating the CA certificate In the event that a cluster CA key or a manager node is compromised, you can -rotate the swarm root CA so that none of the nodes will trust certificates +rotate the swarm root CA so that none of the nodes trust certificates signed by the old root CA anymore. Run `docker swarm ca --rotate` to generate a new CA certificate and key. If you @@ -77,7 +77,7 @@ happen in sequence: the new root CA certificate is signed with the old root CA certificate. This cross-signed certificate is used as an intermediate certificate for all new node certificates. This ensures that nodes that still trust the old root - CA will be able to validate a certificate signed by the new CA. + CA can still validate a certificate signed by the new CA. 2. In Docker 17.06 and higher, Docker also tells all nodes to immediately renew their TLS certificates. This process may take several minutes, @@ -86,21 +86,21 @@ happen in sequence: > **Note**: If your swarm has nodes with different Docker versions, the > following two things are true: > - Only a manager that is running as the leader **and** running Docker 17.06 - > or higher will tell nodes to renew their TLS certificates. - > - Only nodes running Docker 17.06 or higher will obey this directive. + > or higher tells nodes to renew their TLS certificates. + > - Only nodes running Docker 17.06 or higher obey this directive. > > For the most predictable behavior, ensure that all swarm nodes are running > Docker 17.06 or higher. 3. After every node in the swarm has a new TLS certificate signed by the new CA, - Docker will forget about the old CA certificate and key material, and tell + Docker forgets about the old CA certificate and key material, and tells all the nodes to trust the new CA certificate only. - This will also cause a change in the swarm's join tokens. The previous - join tokens will no longer be valid. + This also causes a change in the swarm's join tokens. The previous + join tokens are no longer valid. -From this point on, all new node certificates issued will be signed with the new -root CA, and will not contain any intermediates. +From this point on, all new node certificates issued are signed with the new +root CA, and do not contain any intermediates. ## Learn More diff --git a/engine/swarm/how-swarm-mode-works/services.md b/engine/swarm/how-swarm-mode-works/services.md index 2844baf0dcb..38d5ecac4bc 100644 --- a/engine/swarm/how-swarm-mode-works/services.md +++ b/engine/swarm/how-swarm-mode-works/services.md @@ -5,7 +5,7 @@ title: How services work --- To deploy an application image when Docker Engine is in swarm mode, you create a -service. Frequently a service will be the image for a microservice within the +service. Frequently a service is the image for a microservice within the context of some larger application. Examples of services might include an HTTP server, a database, or any other type of executable program that you wish to run in a distributed environment. @@ -14,7 +14,7 @@ When you create a service, you specify which container image to use and which commands to execute inside running containers. You also define options for the service including: -* the port where the swarm will make the service available outside the swarm +* the port where the swarm makes the service available outside the swarm * an overlay network for the service to connect to other services in the swarm * CPU and memory limits and reservations * a rolling update policy @@ -76,17 +76,17 @@ Here are a few examples of when a service might remain in state `pending`. > **Note**: If your only intention is to prevent a service from being deployed, scale the service to 0 instead of trying to configure it in -such a way that it will remain in `pending`. +such a way that it remains in `pending`. -- If all nodes are paused or drained, and you create a service, it will be +- If all nodes are paused or drained, and you create a service, it is pending until a node becomes available. In reality, the first node to become - available will get all of the tasks, so this is not a good thing to do in a + available gets all of the tasks, so this is not a good thing to do in a production environment. - You can reserve a specific amount of memory for a service. If no node in the - swarm has the required amount of memory, the service will remain in a pending + swarm has the required amount of memory, the service remains in a pending state until a node is available which can run its tasks. If you specify a very - large value, such as 500 GB, the task will be pending forever, unless you + large value, such as 500 GB, the task stays pending forever, unless you really have a node which can satisfy it. - You can impose placement constraints on the service, and the constraints may diff --git a/engine/swarm/index.md b/engine/swarm/index.md index 91e86745d37..11f6d10a466 100644 --- a/engine/swarm/index.md +++ b/engine/swarm/index.md @@ -41,7 +41,7 @@ adding or removing tasks to maintain the desired state. the cluster state and reconciles any differences between the actual state and your expressed desired state. For example, if you set up a service to run 10 replicas of a container, and a worker machine hosting two of those replicas -crashes, the manager will create two new replicas to replace the replicas that +crashes, the manager creates two new replicas to replace the replicas that crashed. The swarm manager assigns the new replicas to workers that are running and available. diff --git a/engine/swarm/ingress.md b/engine/swarm/ingress.md index 47034afb540..05f944c0bb2 100644 --- a/engine/swarm/ingress.md +++ b/engine/swarm/ingress.md @@ -11,7 +11,7 @@ accept connections on published ports for any service running in the swarm, even if there's no task running on the node. The routing mesh routes all incoming requests to published ports on available nodes to an active container. -In order to use the ingress network in the swarm, you need to have the following +To use the ingress network in the swarm, you need to have the following ports open between the swarm nodes before you enable swarm mode: * Port `7946` TCP/UDP for container network discovery. @@ -28,7 +28,7 @@ service. Use the `--publish` flag to publish a port when you create a service. `target` is used to specify the port inside the container, and `published` is used to specify the port to bind on the routing mesh. If you leave off the `published` -port, a random high-numbered port is bound for each service task. You will +port, a random high-numbered port is bound for each service task. You need to inspect the task to determine the port. ```bash @@ -164,9 +164,9 @@ given node, you are always accessing the instance of the service running on that node. This is referred to as `host` mode. There are a few things to keep in mind. -- If you access a node which is not running a service task, the service will not - be listening on that port. It is possible that nothing will be listening, or - that a completely different application will be listening. +- If you access a node which is not running a service task, the service does not + listen on that port. It is possible that nothing is listening, or + that a completely different application is listening. - If you expect to run multiple service tasks on each node (such as when you have 5 nodes but run 10 replicas), you cannot specify a static target port. diff --git a/engine/swarm/join-nodes.md b/engine/swarm/join-nodes.md index 4cf2b5efbde..315382899a9 100644 --- a/engine/swarm/join-nodes.md +++ b/engine/swarm/join-nodes.md @@ -66,7 +66,7 @@ from the scheduler. When you run `docker swarm join` and pass the manager token, the Docker Engine switches into swarm mode the same as for workers. Manager nodes also participate in the raft consensus. The new nodes should be `Reachable`, but the existing -manager will remain the swarm `Leader`. +manager remains the swarm `Leader`. Docker recommends three or five manager nodes per cluster to implement high availability. Because swarm mode manager nodes share data using Raft, there diff --git a/engine/swarm/manage-nodes.md b/engine/swarm/manage-nodes.md index 64b578e1ddf..5bce8d92815 100644 --- a/engine/swarm/manage-nodes.md +++ b/engine/swarm/manage-nodes.md @@ -45,7 +45,7 @@ The `MANAGER STATUS` column shows node participation in the Raft consensus: * `Reachable` means the node is a manager node participating in the Raft consensus quorum. If the leader node becomes unavailable, the node is eligible for election as the new leader. -* `Unavailable` means the node is a manager that is not able to communicate with +* `Unavailable` means the node is a manager that can't communicate with other managers. If a manager node becomes unavailable, you should either join a new manager node to the swarm or promote a worker node to be a manager. @@ -98,7 +98,7 @@ Changing node availability lets you: * drain a manager node so that only performs swarm management tasks and is unavailable for task assignment. * drain a node so you can take it down for maintenance. -* pause a node so it is unavailable to receive new tasks. +* pause a node so it can't receive new tasks. * restore unavailable or paused nodes available status. For example, to change a manager node to `Drain` availability: @@ -219,7 +219,7 @@ Node left the swarm. When a node leaves the swarm, the Docker Engine stops running in swarm mode. The orchestrator no longer schedules tasks to the node. -If the node is a manager node, you will receive a warning about maintaining the +If the node is a manager node, you receive a warning about maintaining the quorum. To override the warning, pass the `--force` flag. If the last manager node leaves the swarm, the swarm becomes unavailable requiring you to take disaster recovery measures. diff --git a/engine/swarm/networking.md b/engine/swarm/networking.md index cdf560f72d2..95123be0474 100644 --- a/engine/swarm/networking.md +++ b/engine/swarm/networking.md @@ -283,14 +283,14 @@ services which publish ports, those services need to be removed before you can remove the `ingress` network. During the time that no `ingress` network exists, existing services which do not -publish ports will continue to function but are not load-balanced. This affects +publish ports continue to function but are not load-balanced. This affects services which publish ports, such as a WordPress service which publishes port 80. 1. Inspect the `ingress` network using `docker network inspect ingress`, and remove any services whose containers are connected to it. These are services that publish ports, such as a WordPress service which publishes port 80. If - all such services are not stopped, the next step will fail. + all such services are not stopped, the next step fails. 2. Remove the existing `ingress` network: @@ -299,7 +299,7 @@ services which publish ports, such as a WordPress service which publishes port WARNING! Before removing the routing-mesh network, make sure all the nodes in your swarm run the same docker engine version. Otherwise, removal may not - be effective and functionality of newly create ingress networks will be + be effective and functionality of newly created ingress networks will be impaired. Are you sure you want to continue? [y/N] ``` @@ -320,7 +320,7 @@ services which publish ports, such as a WordPress service which publishes port > **Note**: You can name your `ingress` network something other than > `ingress`, but you can only have one. An attempt to create a second one - > will fail. + > fails. 4. Restart the services that you stopped in the first step. @@ -370,8 +370,8 @@ In Docker 17.06 and higher, it is possible to separate this traffic by passing the `--data-path-addr` flag when initializing or joining the swarm. If there are multiple interfaces, `--advertise-addr` must be specified explicitly, and `--data-path-addr` defaults to `--advertise-addr` if not specified. Traffic about -joining, leaving, and managing the swarm will be sent over the -`--advertise-addr` interface, and traffic among a service's containers will be +joining, leaving, and managing the swarm is sent over the +`--advertise-addr` interface, and traffic among a service's containers is sent sent over the `--data-path-addr` interface. These flags can take an IP address or a network device name, such as `eth0`. diff --git a/engine/swarm/raft.md b/engine/swarm/raft.md index 4abf7fccbba..1bbcdf5a5bd 100644 --- a/engine/swarm/raft.md +++ b/engine/swarm/raft.md @@ -24,8 +24,8 @@ in the presence of failures by requiring a majority of nodes to agree on values. Raft tolerates up to `(N-1)/2` failures and requires a majority or quorum of `(N/2)+1` members to agree on values proposed to the cluster. This means that in a cluster of 5 Managers running Raft, if 3 nodes are unavailable, the system -will not process any more requests to schedule additional tasks. The existing -tasks will keep running but the scheduler will not be able to rebalance tasks to +cannot process any more requests to schedule additional tasks. The existing +tasks keep running but the scheduler cannot rebalance tasks to cope with failures if the manager set is not healthy. The implementation of the consensus algorithm in swarm mode means it features diff --git a/engine/swarm/secrets.md b/engine/swarm/secrets.md index d98ffd143af..099aef3a47d 100644 --- a/engine/swarm/secrets.md +++ b/engine/swarm/secrets.md @@ -27,14 +27,15 @@ runtime but you don't want to store in the image or in source control, such as: > **Note**: Docker secrets are only available to swarm services, not to > standalone containers. To use this feature, consider adapting your container -> to run as a service with a scale of 1. +> to run as a service. Stateful containers can typically run with a scale of 1 +> without changing the container code. Another use case for using secrets is to provide a layer of abstraction between the container and a set of credentials. Consider a scenario where you have separate development, test, and production environments for your application. Each of these environments can have different credentials, stored in the development, test, and production swarms with the same secret name. Your -containers only need to know the name of the secret in order to function in all +containers only need to know the name of the secret to function in all three environments. You can also use secrets to manage non-sensitive data, such as configuration @@ -83,7 +84,7 @@ management data. > **Warning**: Raft data is encrypted in Docker 1.13 and higher. If any of your > Swarm managers run an earlier version, and one of those managers becomes the -> manager of the swarm, the secrets will be stored unencrypted in that node's +> manager of the swarm, the secrets are stored unencrypted in that node's > Raft logs. Before adding any secrets, update all of your manager nodes to > Docker 1.13 or higher to prevent secrets from being written to plain-text Raft > logs. @@ -114,7 +115,7 @@ secrets. You cannot remove a secret that a running service is using. See [Rotate a secret](secrets.md#example-rotate-a-secret) for a way to remove a secret without disrupting running services. -In order to update or roll back secrets more easily, consider adding a version +To update or roll back secrets more easily, consider adding a version number or date to the secret name. This is made easier by the ability to control the mount point of the secret within a given container. @@ -252,7 +253,7 @@ real-world example, continue to ``` 8. Repeat steps 3 and 4 again, verifying that the service no longer has access - to the secret. The container ID will be different, because the + to the secret. The container ID is different, because the `service update` command redeploys the service. ```none @@ -361,8 +362,8 @@ generate the site key and certificate, name the files `site.key` and ``` 3. Configure the root CA. Edit a new file called `root-ca.cnf` and paste - the following contents into it. This constrains the root CA to only be - able to sign leaf certificates and not intermediate CAs. + the following contents into it. This constrains the root CA to signing leaf + certificates and not intermediate CAs. ```none [root_ca] @@ -417,13 +418,13 @@ generate the site key and certificate, name the files `site.key` and ``` 9. The `site.csr` and `site.cnf` files are not needed by the Nginx service, but - you will need them if you want to generate a new site certificate. Protect + you need them if you want to generate a new site certificate. Protect the `root-ca.key` file. #### Configure the Nginx container 1. Produce a very basic Nginx configuration that serves static files over HTTPS. - The TLS certificate and key will be stored as Docker secrets so that they + The TLS certificate and key are stored as Docker secrets so that they can be rotated easily. In the current directory, create a new file called `site.conf` with the @@ -446,7 +447,7 @@ generate the site key and certificate, name the files `site.key` and 2. Create three secrets, representing the key, the certificate, and the `site.conf`. You can store any file as a secret as long as it is smaller than 500 KB. This allows you to decouple the key, certificate, and - configuration from the services that will use them. In each of these + configuration from the services that use them. In each of these commands, the last argument represents the path to the file to read the secret from on the host machine's filesystem. In these examples, the secret name and the file name are the same. @@ -560,7 +561,7 @@ generate the site key and certificate, name the files `site.key` and

If you see this page, the nginx web server is successfully installed and working. Further configuration is required.

-

For online documentation and support please refer to +

For online documentation and support. refer to nginx.org.
Commercial support is available at nginx.com.

@@ -669,7 +670,7 @@ line. The value returned is not the password, but the ID of the secret. In the remainder of this tutorial, the ID output is omitted. - Generate a second secret for the MySQL `root` user. This secret won't be + Generate a second secret for the MySQL `root` user. This secret isn't shared with the WordPress service created later. It's only needed to bootstrap the `mysql` service. @@ -689,7 +690,7 @@ line. The secrets are stored in the encrypted Raft logs for the swarm. -2. Create a user-defined overlay network which will be used for communication +2. Create a user-defined overlay network which is used for communication between the MySQL and WordPress services. There is no need to expose the MySQL service to any external host or container. @@ -697,7 +698,7 @@ line. $ docker network create -d overlay mysql_private ``` -3. Create the MySQL service. The MySQL service will have the following +3. Create the MySQL service. The MySQL service has the following characteristics: - Because the scale is set to `1`, only a single MySQL task runs. @@ -720,8 +721,8 @@ line. passwords are stored in the MySQL system database itself. - Sets environment variables `MYSQL_USER` and `MYSQL_DATABASE`. A new database called `wordpress` is created when the container starts, and the - `wordpress` user will have full permissions for this database only. This - user will not be able to create or drop databases or change the MySQL + `wordpress` user has full permissions for this database only. This + user cannot create or drop databases or change the MySQL configuration. ```bash @@ -751,7 +752,7 @@ line. At this point, you could actually revoke the `mysql` service's access to the `mysql_password` and `mysql_root_password` secrets because the passwords have been saved in the MySQL system database. Don't do that for now, because - we will use them later to facilitate rotating the MySQL password. + we use them later to facilitate rotating the MySQL password. 5. Now that MySQL is set up, create a WordPress service that connects to the MySQL service. The WordPress service has the following characteristics: @@ -767,12 +768,12 @@ line. `mysql` container, and also publishes port 80 to port 30000 on all swarm nodes. - Has access to the `mysql_password` secret, but specifies a different - target file name within the container. The WordPress container will use + target file name within the container. The WordPress container uses the mount point `/run/secrets/wp_db_password`. Also specifies that the secret is not group-or-world-readable, by setting the mode to `0400`. - Sets the environment variable `WORDPRESS_DB_PASSWORD_FILE` to the file - path where the secret is mounted. The WordPress service will read the + path where the secret is mounted. The WordPress service reads the MySQL password string from that file and add it to the `wp-config.php` configuration file. - Connects to the MySQL container using the username `wordpress` and the @@ -816,7 +817,7 @@ line. At this point, you could actually revoke the WordPress service's access to the `mysql_password` secret, because WordPress has copied the secret to its - configuration file `wp-config.php`. Don't do that for now, because we will + configuration file `wp-config.php`. Don't do that for now, because we use it later to facilitate rotating the MySQL password. 7. Access `http://localhost:30000/` from any swarm node and set up WordPress @@ -824,7 +825,7 @@ line. `wordpress` database. WordPress automatically generates a password for your WordPress user, which is completely different from the password WordPress uses to access MySQL. Store this password securely, such as in a password - manager. You will need it to log into WordPress after + manager. You need it to log into WordPress after [rotating the secret](#example-rotate-a-secret). Go ahead and write a blog post or two and install a WordPress plugin or @@ -882,7 +883,7 @@ use it, then remove the old secret. in `/run/secrets` but does not expose them on the command line or save them in the shell history. - Do this quickly and move on to the next step, because WordPress will lose + Do this quickly and move on to the next step, because WordPress loses the ability to connect to MySQL. First, find the ID of the `mysql` container task. @@ -910,8 +911,8 @@ use it, then remove the old secret. 4. Update the `wordpress` service to use the new password, keeping the target path at `/run/secrets/wp_db_secret` and keeping the file permissions at - `0400`. This will trigger a rolling restart of the WordPress service and - the new secret will be used. + `0400`. This triggers a rolling restart of the WordPress service and + the new secret is used. ```bash $ docker service update \ @@ -921,7 +922,7 @@ use it, then remove the old secret. ``` 5. Verify that WordPress works by browsing to http://localhost:30000/ on any - swarm node again. You'll need to use the WordPress username and password + swarm node again. Use the WordPress username and password from when you ran through the WordPress wizard in the previous task. Verify that the blog post you wrote still exists, and if you changed any @@ -1027,12 +1028,12 @@ a compose file. The keyword `secrets:` defines two secrets `db_password:` and `db_root_password:`. -When deploying, Docker will create these two secrets and populate them with the +When deploying, Docker creates these two secrets and populate them with the content from the file specified in the compose file. The db service uses both secrets, and the wordpress is using one. -When you deploy, Docker will mount a file under `/run/secrets/` in the +When you deploy, Docker mounts a file under `/run/secrets/` in the services. These files are never persisted in disk, but are managed in memory. Each service uses environment variables to specify where the service should look diff --git a/engine/swarm/services.md b/engine/swarm/services.md index 9cae09b46c7..9377cf3935c 100644 --- a/engine/swarm/services.md +++ b/engine/swarm/services.md @@ -27,7 +27,7 @@ For an overview of how services work, see To create a single-replica service with no extra configuration, you only need to supply the image name. This command starts an Nginx service with a randomly-generated name and no published ports. This is a naive example, since -you won't be able to interact with the Nginx service. +you can't interact with the Nginx service. ```bash $ docker service create nginx @@ -80,7 +80,7 @@ You can change almost everything about an existing service using the `docker service update` command. When you update a service, Docker stops its containers and restarts them with the new configuration. -Since Nginx is a web service, it will work much better if you publish port 80 +Since Nginx is a web service, it works much better if you publish port 80 to clients outside the swarm. You can specify this when you create the service, using the `-p` or `--publish` flag. When updating an existing service, the flag is `--publish-add`. There is also a `--publish-rm` flag to remove a port that @@ -140,8 +140,8 @@ container: * the working directory inside the container using the `--workdir` flag * the username or UID using the `--user` flag -The following service's containers will have an environment variable `$MYVAR` -set to `myvalue`, will run from the `/tmp/` directory, and will run as the +The following service's containers have an environment variable `$MYVAR` +set to `myvalue`, run from the `/tmp/` directory, and run as the `my_user` user. ```bash @@ -182,7 +182,7 @@ An image version can be expressed in several different ways: ``` Some tags represent discrete releases, such as `ubuntu:16.04`. Tags like this - will almost always resolve to a stable digest over time. It is recommended + almost always resolve to a stable digest over time. It is recommended that you use this kind of tag when possible. Other types of tags, such as `latest` or `nightly`, may resolve to a new @@ -213,23 +213,23 @@ An image version can be expressed in several different ways: When you create a service, the image's tag is resolved to the specific digest the tag points to **at the time of service creation**. Worker nodes for that -service will use that specific digest forever unless the service is explicitly +service use that specific digest forever unless the service is explicitly updated. This feature is particularly important if you do use often-changing tags such as `latest`, because it ensures that all service tasks use the same version of the image. > **Note**: If [content trust](/engine/security/trust/content_trust.md) is > enabled, the client actually resolves the image's tag to a digest before -> contacting the swarm manager, in order to verify that the image is signed. +> contacting the swarm manager, to verify that the image is signed. > Thus, if you use content trust, the swarm manager receives the request > pre-resolved. In this case, if the client cannot resolve the image to a > digest, the request fails. {: id="image_resolution_with_trust" } -If the manager is not able to resolve the tag to a digest, each worker +If the manager can't resolve the tag to a digest, each worker node is responsible for resolving the tag to a digest, and different nodes may use different versions of the image. If this happens, a warning like the -following will be logged, substituting the placeholders for real information. +following is logged, substituting the placeholders for real information. ```none unable to pin image to digest: @@ -275,8 +275,8 @@ points to and updates the service tasks to use that digest. > client resolves image and the swarm manager receives the image and digest, > rather than a tag. -Usually, the manager is able to resolve the tag to a new digest and the service -updates, redeploying each task to use the new image. If the manager is unable to +Usually, the manager can resolve the tag to a new digest and the service +updates, redeploying each task to use the new image. If the manager can't resolve the tag or some other problem occurs, the next two sections outline what to expect. @@ -358,9 +358,9 @@ $ docker service create --name my_web \ nginx ``` -Three tasks will run on up to three nodes. You don't need to know which nodes -are running the tasks; connecting to port 8080 on **any** of the 10 nodes will -connect you to one of the three `nginx` tasks. You can test this using `curl`. +Three tasks run on up to three nodes. You don't need to know which nodes +are running the tasks; connecting to port 8080 on **any** of the 10 nodes +connects you to one of the three `nginx` tasks. You can test this using `curl`. The following example assumes that `localhost` is one of the swarm nodes. If this is not the case, or `localhost` does not resolve to an IP address on your host, substitute the host's IP address or resolvable host name. @@ -395,14 +395,14 @@ option to the `--publish` flag. > definition, which causes Docker to assign a random port for each task. > > In addition, if you use `mode=host` and you do not use the -> `--mode=global` flag on `docker service create`, it will be difficult to know -> which nodes are running the service in order to route work to them. +> `--mode=global` flag on `docker service create`, it is difficult to know +> which nodes are running the service to route work to them. ##### Example: Run a `nginx` web server service on every swarm node [nginx](https://hub.docker.com/_/nginx/) is an open source reverse proxy, load balancer, HTTP cache, and a web server. If you run nginx as a service using the -routing mesh, connecting to the nginx port on any swarm node will show you the +routing mesh, connecting to the nginx port on any swarm node shows you the web page for (effectively) **a random swarm node** running the service. The following example runs nginx as a service on each node in your swarm and @@ -417,7 +417,7 @@ $ docker service create \ ``` You can reach the nginx server on port 8080 of every swarm node. If you add a -node to the swarm, a nginx task will be started on it. You cannot start another +node to the swarm, a nginx task is started on it. You cannot start another service or container on any swarm node which binds to port 8080. > **Note**: This is a naive example. Creating an application-layer @@ -508,7 +508,7 @@ placement of services on different nodes. - You can configure the service's [CPU or memory requirements](#reserve-memory-or-cpus-for-a-service), and the - service will only run on nodes which can meet those requirements. + service only runs on nodes which can meet those requirements. - [Placement constraints](#placement-constraints) let you configure the service to run only on nodes with specific (arbitrary) metadata set, and cause the @@ -526,14 +526,14 @@ placement of services on different nodes. placement constraints, placement preferences, and other node-specific limitations into account. - Unlike constraints, placement preferences are best-effort, and a service will + Unlike constraints, placement preferences are best-effort, and a service does not fail to deploy if no nodes can satisfy the preference. If you specify a placement preference for a service, nodes that match that preference are ranked higher when the swarm managers decide which nodes should run the service tasks. Other factors, such as high availability of the service, - will also factor into which nodes are scheduled to run service tasks. For + also factor into which nodes are scheduled to run service tasks. For example, if you have N nodes with the rack label (and then some others), and - your service is configured to run N+1 replicas, the +1 will be scheduled on a + your service is configured to run N+1 replicas, the +1 is scheduled on a node that doesn't already have the service on it if there is one, regardless of whether that node has the `rack` label or not. @@ -582,8 +582,8 @@ information on constraints, refer to the `docker service create` To reserve a given amount of memory or number of CPUs for a service, use the `--reserve-memory` or `--reserve-cpu` flags. If no available nodes can satisfy the requirement (for instance, if you request 4 CPUs and no node in the swarm -has 4 CPUs), the service remains in a pending state until a node is available to -run its tasks. +has 4 CPUs), the service remains in a pending state until an appropriate node is +available to run its tasks. ##### Out Of Memory Exceptions (OOME) @@ -603,10 +603,10 @@ Use placement constraints to control the nodes a service can be assigned to. In the following example, the service only runs on nodes with the [label](engine/swarm/manage-nodes.md#add-or-remove-label-metadata) `region` set to `east`. If no appropriately-labelled nodes are available, -deployment will fail. The `--constraint` flag uses an equality operator -(`==` or `!=`). For replicated services, it is possible that all services will -run on the same node, or each node will only run one replica, or that some nodes -won't run any replicas. For global services, the service will run on every node +deployment fails. The `--constraint` flag uses an equality operator +(`==` or `!=`). For replicated services, it is possible that all services +run on the same node, or each node only runs one replica, or that some nodes +don't run any replicas. For global services, the service runs on every node that meets the placement constraint and any [resource requirements](#reserve-cpu-or-memory-for-a-service). @@ -621,7 +621,7 @@ $ docker service create \ You can also use the `constraint` service-level key in a `docker-compose.yml` file. -If you specify multiple placement constraints, the service will only deploy onto +If you specify multiple placement constraints, the service only deploys onto nodes where they are all met. The following example limits the service to run on all nodes where `region` is set to `east` and `type` is not set to `devel`: @@ -635,7 +635,7 @@ $ docker service create \ ``` You can also use placement constraints in conjunction with placement preferences -and CPU/memory constraints. Be careful not to use settings that will not be +and CPU/memory constraints. Be careful not to use settings that are not possible to fulfill. For more information on constraints, refer to the `docker service create` @@ -648,17 +648,17 @@ can run on, _placement preferences_ try to place services on appropriate nodes in an algorithmic way (currently, only spread evenly). For instance, if you assign each node a `rack` label, you can set a placement preference to spread the service evenly across nodes with the `rack` label, by value. This way, if -you lose a rack, the service will still be running on nodes on other racks. +you lose a rack, the service is still running on nodes on other racks. Placement preferences are not strictly enforced. If no node has the label -you specify in your preference, the service will be deployed as though the +you specify in your preference, the service is deployed as though the preference were not set. > Placement preferences are ignored for global services. The following example sets a preference to spread the deployment across nodes based on the value of the `datacenter` label. If some nodes have -`datacenter=us-east` and others have `datacenter=us-west`, the service will be +`datacenter=us-east` and others have `datacenter=us-west`, the service is deployed as evenly as possible across the two sets of nodes. ```bash @@ -671,9 +671,9 @@ $ docker service create \ > Missing or null labels > -> Nodes which are missing the label used to spread will still receive -> task assignments. As a group, these nodes will receive tasks in equal -> proportion to any of the other groups identified by a specific label +> Nodes which are missing the label used to spread still receive +> task assignments. As a group, these nodes receive tasks in equal +> proportion to any of the other groups identified by a specific label > value. In a sense, a missing label is the same as having the label with > a null value attached to it. If the service should **only** run on > nodes with the label being used for the the spread preference, the @@ -694,7 +694,7 @@ $ docker service create \ ``` You can also use placement preferences in conjunction with placement constraints -or CPU/memory constraints. Be careful not to use settings that will not be +or CPU/memory constraints. Be careful not to use settings that are not possible to fulfill. This diagram illustrates how placement preferences work: @@ -746,7 +746,7 @@ $ docker service create \ The `--update-max-failure-ratio` flag controls what fraction of tasks can fail during an update before the update as a whole is considered to have failed. For example, with `--update-max-failure-ratio 0.1 --update-failure-action pause`, -after 10% of the tasks being updated fail, the update will be paused. +after 10% of the tasks being updated fail, the update is paused. An individual task update is considered to have failed if the task doesn't start up, or if it stops running within the monitoring period specified with @@ -759,7 +759,7 @@ after that is not counted. In case the updated version of a service doesn't function as expected, it's possible to manually roll back to the previous version of the service using -`docker service update`'s `--rollback` flag. This will revert the service +`docker service update`'s `--rollback` flag. This reverts the service to the configuration that was in place before the most recent `docker service update` command. @@ -781,7 +781,7 @@ Related to the new automatic rollback feature, in Docker 17.04 and higher, manual rollback is handled at the server side, rather than the client, if the daemon is running Docker 17.04 or higher. This allows manually-initiated rollbacks to respect the new rollback parameters. The client is version-aware, -so it will still use the old method against an older daemon. +so it still uses the old method against an older daemon. Finally, in Docker 17.04 and higher, `--rollback` cannot be used in conjunction with other flags to `docker service update`. @@ -910,7 +910,7 @@ The following examples show bind mount syntax: > containers at any time if they become unhealthy or unreachable. > > - Host bind mounts are completely non-portable. When you use bind mounts, -> there is no guarantee that your application will run the same way in +> there is no guarantee that your application runs the same way in > development as it does in production. ### Create services using templates diff --git a/engine/swarm/stack-deploy.md b/engine/swarm/stack-deploy.md index 437a98a5620..2c9a6c7bc19 100644 --- a/engine/swarm/stack-deploy.md +++ b/engine/swarm/stack-deploy.md @@ -12,7 +12,7 @@ The `docker stack deploy` command supports any Compose file of version "3.0" or above. If you have an older version, see the [upgrade guide](/compose/compose-file.md#upgrading). -To run through this tutorial, you will need: +To run through this tutorial, you need: 1. A Docker Engine of version 1.13.0 or later, running in [swarm mode](/engine/swarm/swarm-mode.md). If you're not familiar with swarm mode, @@ -127,9 +127,9 @@ counter whenever you visit it. image: redis:alpine ``` - Note that the image for the web app is built using the Dockerfile defined + The image for the web app is built using the Dockerfile defined above. It's also tagged with `127.0.0.1:5000` - the address of the registry - created earlier. This will be important when distributing the app to the + created earlier. This is important when distributing the app to the swarm. @@ -139,7 +139,7 @@ counter whenever you visit it. pull the Redis image if you don't already have it, and create two containers. - You will see a warning about the Engine being in swarm mode. This is because + You see a warning about the Engine being in swarm mode. This is because Compose doesn't take advantage of swarm mode, and deploys everything to a single node. You can safely ignore this. @@ -149,7 +149,7 @@ counter whenever you visit it. WARNING: The Docker Engine you're using is running in swarm mode. Compose does not use swarm mode to deploy services to multiple nodes in - a swarm. All containers will be scheduled on the current node. + a swarm. All containers are scheduled on the current node. To deploy your application across the swarm, use `docker stack deploy`. diff --git a/engine/swarm/swarm-tutorial/create-swarm.md b/engine/swarm/swarm-tutorial/create-swarm.md index a1e5368acf4..1c5904e35b7 100644 --- a/engine/swarm/swarm-tutorial/create-swarm.md +++ b/engine/swarm/swarm-tutorial/create-swarm.md @@ -89,5 +89,5 @@ Windows](/engine/swarm/swarm-tutorial/index.md#use-docker-for-mac-or-docker-for- ## What's next? -In the next section of the tutorial, we'll [add two more nodes](add-nodes.md) to +In the next section of the tutorial, we [add two more nodes](add-nodes.md) to the cluster. diff --git a/engine/swarm/swarm-tutorial/index.md b/engine/swarm/swarm-tutorial/index.md index 4eefcd326a9..50700d4f894 100644 --- a/engine/swarm/swarm-tutorial/index.md +++ b/engine/swarm/swarm-tutorial/index.md @@ -17,8 +17,7 @@ The tutorial guides you through the following activities: * managing the swarm once you have everything running This tutorial uses Docker Engine CLI commands entered on the command line of a -terminal window. You should be able to install Docker on networked machines and -be comfortable with running commands in the shell of your choice. +terminal window. If you are brand new to Docker, see [About Docker Engine](../../index.md). @@ -40,12 +39,12 @@ from a Linux, Mac, or Windows host. Check out [Getting started - Swarms](/get-started/part4.md#prerequisites) for one possible set-up for the hosts. -One of these machines will be a manager (called `manager1`) and two of them will -be workers (`worker1` and `worker2`). +One of these machines is a manager (called `manager1`) and two of them are +workers (`worker1` and `worker2`). >**Note**: You can follow many of the tutorial steps to test single-node swarm -as well, in which case you need only one host. Multi-node commands will not +as well, in which case you need only one host. Multi-node commands do not work, but you can initialize a swarm, create services, and scale them. ### Docker Engine 1.12 or newer @@ -72,29 +71,29 @@ single-node and multi-node swarm scenarios on Linux machines. Alternatively, install the latest [Docker for Mac](/docker-for-mac/index.md) or [Docker for Windows](/docker-for-windows/index.md) application on one computer. You can test both single-node and multi-node swarm from this computer, -but you will need to use Docker Machine to test the multi-node scenarios. +but you need to use Docker Machine to test the multi-node scenarios. * You can use Docker for Mac or Windows to test _single-node_ features of swarm mode, including initializing a swarm with a single node, creating services, and scaling services. Docker "Moby" on Hyperkit (Mac) or Hyper-V (Windows) -will serve as the single swarm node. +serve as the single swarm node.

-* Currently, you cannot use Docker for Mac or Windows alone to test a +* Currently, you cannot use Docker for Mac or Docker for Windows alone to test a _multi-node_ swarm. However, you can use the included version of [Docker -Machine](/machine/overview.md) to create the swarm nodes (see [Get started with Docker Machine and a local VM](/machine/get-started.md)), then follow the -tutorial for all multi-node features. For this scenario, you run commands from -a Docker for Mac or Docker for Windows host, but that Docker host itself is -_not_ participating in the swarm (i.e., it will not be `manager1`, `worker1`, -or `worker2` in our example). After you create the nodes, you can run all +Machine](/machine/overview.md) to create the swarm nodes (see +[Get started with Docker Machine and a local VM](/machine/get-started.md)), then +follow the tutorial for all multi-node features. For this scenario, you run +commands from a Docker for Mac or Docker for Windows host, but that Docker host itself is +_not_ participating in the swarm. After you create the nodes, you can run all swarm commands as shown from the Mac terminal or Windows PowerShell with Docker for Mac or Docker for Windows running. ### The IP address of the manager machine The IP address must be assigned to a network interface available to the host -operating system. All nodes in the swarm must be able to access the manager at +operating system. All nodes in the swarm need to connect to the manager at the IP address. Because other nodes contact the manager node on its IP address, you should use a @@ -117,8 +116,8 @@ The following ports must be available. On some systems, these ports are open by * **TCP** and **UDP port 7946** for communication among nodes * **UDP port 4789** for overlay network traffic -If you are planning on creating an overlay network with encryption (`--opt encrypted`), -you will also need to ensure **ip protocol 50** (**ESP**) traffic is allowed. +If you plan on creating an overlay network with encryption (`--opt encrypted`), +you also need to ensure **ip protocol 50** (**ESP**) traffic is allowed. ## What's next? diff --git a/engine/swarm/swarm_manager_locking.md b/engine/swarm/swarm_manager_locking.md index f840e62dc7e..ef0e9e4165d 100644 --- a/engine/swarm/swarm_manager_locking.md +++ b/engine/swarm/swarm_manager_locking.md @@ -52,15 +52,15 @@ command and provide the following key: Store the key in a safe place, such as in a password manager. When Docker restarts, you need to -[unlock the swarm](swarm_manager_locking.md#unlock-a-swarm). You will see an -error like the following and services will not start. +[unlock the swarm](swarm_manager_locking.md#unlock-a-swarm). A locked swarm +causes an error like the following when you try to start or restart a service: ```bash $ sudo service docker restart $ docker service ls -Error response from daemon: Swarm is encrypted and needs to be unlocked before it can be used. Please use "docker swarm unlock" to unlock it. +Error response from daemon: Swarm is encrypted and needs to be unlocked before it can be used. Use "docker swarm unlock" to unlock it. ``` ## Enable or disable autolock on an existing swarm @@ -81,9 +81,9 @@ will not be able to restart the manager. ``` To disable autolock, set `--autolock` to `false`. The mutual TLS key and the -encryption key used to read and write Raft logs will be stored unencrypted on +encryption key used to read and write Raft logs are stored unencrypted on disk. There is a trade-off between the risk of storing the encryption key -unencrypted at rest and the convenience of being able to restart a swarm without +unencrypted at rest and the convenience of restarting a swarm without needing to unlock each manager. ```bash @@ -111,7 +111,7 @@ you locked the swarm or rotated the key, and the swarm unlocks. Consider a situation where your swarm is running as expected, then a manager node becomes unavailable. You troubleshoot the problem and bring the physical node back online, but you need to unlock the manager by providing the unlock -key in order to read the encrypted credentials and Raft logs. +key to read the encrypted credentials and Raft logs. If the key has not been rotated since the node left the swarm, and you have a quorum of functional manager nodes in the swarm, you can view the current unlock diff --git a/engine/tutorials/networkingcontainers.md b/engine/tutorials/networkingcontainers.md index c83c56dc433..364aa29ca86 100644 --- a/engine/tutorials/networkingcontainers.md +++ b/engine/tutorials/networkingcontainers.md @@ -84,11 +84,14 @@ You can remove a container from a network by disconnecting the container. To do $ docker network disconnect bridge networktest -While you can disconnect a container from a network, you cannot remove the builtin `bridge` network named `bridge`. Networks are natural ways to isolate containers from other containers or other networks. So, as you get more experienced with Docker, you'll want to create your own networks. +While you can disconnect a container from a network, you cannot remove the +builtin `bridge` network named `bridge`. Networks are natural ways to isolate +containers from other containers or other networks. So, as you get more +experienced with Docker, create your own networks. ## Create your own bridge network -Docker Engine natively supports both bridge networks and overlay networks. A bridge network is limited to a single host running Docker Engine. An overlay network can include multiple hosts and is a more advanced topic. For this example, you'll create a bridge network: +Docker Engine natively supports both bridge networks and overlay networks. A bridge network is limited to a single host running Docker Engine. An overlay network can include multiple hosts and is a more advanced topic. For this example, create a bridge network: $ docker network create -d bridge my_bridge @@ -102,7 +105,7 @@ The `-d` flag tells Docker to use the `bridge` driver for the new network. You c 18a2866682b8 none null c288470c46f6 host host -If you inspect the network, you'll find that it has nothing in it. +If you inspect the network, it has nothing in it. $ docker network inspect my_bridge @@ -137,7 +140,7 @@ Launch a container running a PostgreSQL database and pass it the `--net=my_bridg $ docker run -d --net=my_bridge --name db training/postgres -If you inspect your `my_bridge` you'll see it has a container attached. +If you inspect your `my_bridge` you can see it has a container attached. You can also inspect your container to see where it is connected: {% raw %} @@ -153,7 +156,7 @@ Now, go ahead and start your by now familiar web application. This time don't sp ![bridge2](bridge2.png) -Which network is your `web` application running under? Inspect the application and you'll find it is running in the default `bridge` network. +Which network is your `web` application running under? Inspect the application to verify that it is running in the default `bridge` network. {% raw %} $ docker inspect --format='{{json .NetworkSettings.Networks}}' web @@ -181,7 +184,7 @@ Now, open a shell to your running `db` container: --- 172.17.0.2 ping statistics --- 44 packets transmitted, 0 received, 100% packet loss, time 43185ms -After a bit, use `CTRL-C` to end the `ping` and you'll find the ping failed. That is because the two containers are running on different networks. You can fix that. Then, use the `exit` command to close the container. +After a bit, use `CTRL-C` to end the `ping` and notice that the ping failed. That is because the two containers are running on different networks. You can fix that. Then, use the `exit` command to close the container. Docker networking allows you to attach a container to as many networks as you like. You can also attach an already running container. Go ahead and attach your running `web` app to the `my_bridge`. diff --git a/engine/userguide/eng-image/baseimages.md b/engine/userguide/eng-image/baseimages.md index 9e7a3b9a3fe..f3cd799e99e 100644 --- a/engine/userguide/eng-image/baseimages.md +++ b/engine/userguide/eng-image/baseimages.md @@ -26,7 +26,7 @@ ones. ## Create a full image using tar -In general, you'll want to start with a working machine that is running +In general, start with a working machine that is running the distribution you'd like to package as a parent image, though that is not required for some tools like Debian's [Debootstrap](https://wiki.debian.org/Debootstrap), which you can also diff --git a/engine/userguide/eng-image/dockerfile_best-practices.md b/engine/userguide/eng-image/dockerfile_best-practices.md index c307fe91f38..450362858d5 100644 --- a/engine/userguide/eng-image/dockerfile_best-practices.md +++ b/engine/userguide/eng-image/dockerfile_best-practices.md @@ -87,19 +87,19 @@ A Dockerfile for a go application could look like: FROM golang:1.9.2-alpine3.6 AS build # Install tools required to build the project -# We will need to run `docker build --no-cache .` to update those dependencies +# We need to run `docker build --no-cache .` to update those dependencies RUN apk add --no-cache git RUN go get github.com/golang/dep/cmd/dep # Gopkg.toml and Gopkg.lock lists project dependencies -# These layers will only be re-built when Gopkg files are updated +# These layers are only re-built when Gopkg files are updated COPY Gopkg.lock Gopkg.toml /go/src/project/ WORKDIR /go/src/project/ # Install library dependencies RUN dep ensure -vendor-only # Copy all project and build it -# This layer will be rebuilt when ever a file has changed in the project directory +# This layer is rebuilt when ever a file has changed in the project directory COPY . /go/src/project/ RUN go build -o /bin/project @@ -112,7 +112,7 @@ CMD ["--help"] ### Avoid installing unnecessary packages -In order to reduce complexity, dependencies, file sizes, and build times, you +To reduce complexity, dependencies, file sizes, and build times, you should avoid installing extra or unnecessary packages just because they might be “nice to have.” For example, you don’t need to include a text editor in a database image. @@ -157,7 +157,7 @@ mitigated this need: ### Sort multi-line arguments Whenever possible, ease later changes by sorting multi-line arguments -alphanumerically. This will help you avoid duplication of packages and make the +alphanumerically. This helps you avoid duplication of packages and make the list much easier to update. This also makes PRs a lot easier to read and review. Adding a space before a backslash (`\`) helps as well. @@ -172,16 +172,16 @@ Here’s an example from the [`buildpack-deps` image](https://github.com/docker- ### Build cache -During the process of building an image Docker will step through the +During the process of building an image Docker steps through the instructions in your `Dockerfile` executing each in the order specified. -As each instruction is examined Docker will look for an existing image in its +As each instruction is examined Docker looks for an existing image in its cache that it can reuse, rather than creating a new (duplicate) image. If you do not want to use the cache at all you can use the `--no-cache=true` option on the `docker build` command. However, if you do let Docker use its cache then it is very important to -understand when it will, and will not, find a matching image. The basic rules -that Docker will follow are outlined below: +understand when it can, and cannot, find a matching image. The basic rules +that Docker follows are outlined below: * Starting with a parent image that is already in the cache, the next instruction is compared against all child images derived from that base @@ -199,19 +199,19 @@ these checksums. During the cache lookup, the checksum is compared against the checksum in the existing images. If anything has changed in the file(s), such as the contents and metadata, then the cache is invalidated. -* Aside from the `ADD` and `COPY` commands, cache checking will not look at the +* Aside from the `ADD` and `COPY` commands, cache checking does not look at the files in the container to determine a cache match. For example, when processing a `RUN apt-get -y update` command the files updated in the container -will not be examined to determine if a cache hit exists. In that case just -the command string itself will be used to find a match. +are not examined to determine if a cache hit exists. In that case just +the command string itself is used to find a match. -Once the cache is invalidated, all subsequent `Dockerfile` commands will -generate new images and the cache will not be used. +Once the cache is invalidated, all subsequent `Dockerfile` commands +generate new images and the cache is not used. ## The Dockerfile instructions -Below you'll find recommendations for the best way to write the -various instructions available for use in a `Dockerfile`. +These recommendations help you to write an efficient and maintainable +`Dockerfile`. ### FROM @@ -285,7 +285,7 @@ Probably the most common use-case for `RUN` is an application of `apt-get`. The out for. You should avoid `RUN apt-get upgrade` or `dist-upgrade`, as many of the -“essential” packages from the parent images won't upgrade inside an +“essential” packages from the parent images can't upgrade inside an [unprivileged container](https://docs.docker.com/engine/reference/run/#security-configuration). If a package contained in the parent image is out-of-date, you should contact its maintainers. If you know there’s a particular package, `foo`, that needs to be updated, use @@ -361,7 +361,7 @@ each line can also prevent mistakes in package duplication. In addition, when you clean up the apt cache by removing `/var/lib/apt/lists` reduces the image size, since the apt cache is not stored in a layer. Since the -`RUN` statement starts with `apt-get update`, the package cache will always be +`RUN` statement starts with `apt-get update`, the package cache is always refreshed prior to `apt-get install`. > **Note**: The official Debian and Ubuntu images [automatically run `apt-get clean`](https://github.com/moby/moby/blob/03e2923e42446dbb830c654d0eec323a0b4ef02a/contrib/mkimage/debootstrap#L82-L105), @@ -424,7 +424,7 @@ works. [Dockerfile reference for the EXPOSE instruction](../../reference/builder.md#expose) -The `EXPOSE` instruction indicates the ports on which a container will listen +The `EXPOSE` instruction indicates the ports on which a container listens for connections. Consequently, you should use the common, traditional port for your application. For example, an image containing the Apache web server would use `EXPOSE 80`, while an image containing MongoDB would use `EXPOSE 27017` and @@ -439,9 +439,9 @@ the recipient container back to the source (ie, `MYSQL_PORT_3306_TCP`). [Dockerfile reference for the ENV instruction](../../reference/builder.md#env) -In order to make new software easier to run, you can use `ENV` to update the +To make new software easier to run, you can use `ENV` to update the `PATH` environment variable for the software your container installs. For -example, `ENV PATH /usr/local/nginx/bin:$PATH` will ensure that `CMD [“nginx”]` +example, `ENV PATH /usr/local/nginx/bin:$PATH` ensures that `CMD [“nginx”]` just works. The `ENV` instruction is also useful for providing required environment @@ -473,7 +473,7 @@ not immediately obvious. Consequently, the best use for `ADD` is local tar file auto-extraction into the image, as in `ADD rootfs.tar.xz /`. If you have multiple `Dockerfile` steps that use different files from your -context, `COPY` them individually, rather than all at once. This will ensure that +context, `COPY` them individually, rather than all at once. This ensures that each step's build cache is only invalidated (forcing the step to be re-run) if the specifically required files change. @@ -488,7 +488,7 @@ Results in fewer cache invalidations for the `RUN` step, than if you put the Because image size matters, using `ADD` to fetch packages from remote URLs is strongly discouraged; you should use `curl` or `wget` instead. That way you can -delete the files you no longer need after they've been extracted and you won't +delete the files you no longer need after they've been extracted and you don't have to add another layer in your image. For example, you should avoid doing things like: @@ -611,10 +611,10 @@ like `RUN groupadd -r postgres && useradd --no-log-init -r -g postgres postgres` > useradd works around this issue. The Debian/Ubuntu `adduser` wrapper > does not support the `--no-log-init` flag and should be avoided. -You should avoid installing or using `sudo` since it has unpredictable TTY and -signal-forwarding behavior that can cause more problems than it solves. If -you absolutely need functionality similar to `sudo` (e.g., initializing the -daemon as root but running it as non-root), you may be able to use +Avoid installing or using `sudo` since it has unpredictable TTY and +signal-forwarding behavior that can cause problems. If +you absolutely need functionality similar to `sudo`, such as initializing the +daemon as `root` but running it as non-`root`), consider using [“gosu”](https://github.com/tianon/gosu). Lastly, to reduce layers and complexity, avoid switching `USER` back @@ -649,9 +649,9 @@ builds arbitrary user software written in that language within the Images built from `ONBUILD` should get a separate tag, for example: `ruby:1.9-onbuild` or `ruby:2.0-onbuild`. -Be careful when putting `ADD` or `COPY` in `ONBUILD`. The “onbuild” image will -fail catastrophically if the new build's context is missing the resource being -added. Adding a separate tag, as recommended above, will help mitigate this by +Be careful when putting `ADD` or `COPY` in `ONBUILD`. The “onbuild” image +fails catastrophically if the new build's context is missing the resource being +added. Adding a separate tag, as recommended above, helps mitigate this by allowing the `Dockerfile` author to make a choice. ## Examples for Official Repositories diff --git a/engine/userguide/eng-image/multistage-build.md b/engine/userguide/eng-image/multistage-build.md index 739bf1702bf..05494631ce4 100644 --- a/engine/userguide/eng-image/multistage-build.md +++ b/engine/userguide/eng-image/multistage-build.md @@ -78,7 +78,7 @@ rm ./app ``` When you run the `build.sh` script, it needs to build the first image, create -a container from it in order to copy the artifact out, then build the second +a container from it to copy the artifact out, then build the second image. Both images take up room on your system and you still have the `app` artifact on your local disk as well. @@ -132,7 +132,7 @@ number, starting with 0 for the first `FROM` instruction. However, you can name your stages, by adding an `as ` to the `FROM` instruction. This example improves the previous one by naming the stages and using the name in the `COPY` instruction. This means that even if the instructions in your -Dockerfile are re-ordered later, the `COPY` won't break. +Dockerfile are re-ordered later, the `COPY` doesn't break. ```conf FROM golang:1.7.3 as builder diff --git a/engine/userguide/networking/configure-dns.md b/engine/userguide/networking/configure-dns.md index 873497b0766..62e169b73fd 100644 --- a/engine/userguide/networking/configure-dns.md +++ b/engine/userguide/networking/configure-dns.md @@ -9,9 +9,9 @@ containers in user-defined networks. DNS lookup for containers connected to user-defined networks works differently compared to the containers connected to `default bridge` network. -> **Note**: In order to maintain backward compatibility, the DNS configuration +> **Note**: To maintain backward compatibility, the DNS configuration > in `default bridge` network is retained with no behavioral change. -> Please refer to the [DNS in default bridge network](default_network/configure-dns.md) +> Refer to the [DNS in default bridge network](default_network/configure-dns.md) > for more information on DNS configuration in the `default bridge` network. As of Docker 1.10, the docker daemon implements an embedded DNS server which @@ -47,7 +47,7 @@ Various container options that affect container domain name services.

- In addition to --name as described above, a container is discovered by one or more + In addition to --name as described above, a container is discovered by one or more of its configured --network-alias (or --alias in docker network connect command) within the user-defined network. The embedded DNS server maintains the mapping between all of the container aliases and its IP address on a specific user-defined network. @@ -67,8 +67,8 @@ Various container options that affect container domain name services. Using this option as you run a container gives the embedded DNS an extra entry named ALIAS that points to the IP address of the container identified by CONTAINER_NAME. When using --link - the embedded DNS will guarantee that localized lookup result only on that - container where the --link is used. This lets processes inside the new container + the embedded DNS guarantees that localized lookup result only on that + container where the --link is used. This lets processes inside the new container connect to container without having to know its name or IP.

@@ -79,10 +79,10 @@ Various container options that affect container domain name services.

The IP addresses passed via the --dns option is used by the embedded DNS - server to forward the DNS query if embedded DNS server is unable to resolve a name + server to forward the DNS query if embedded DNS server can't resolve a name resolution request from the containers. These --dns IP addresses are managed by the embedded DNS server and - will not be updated in the container's /etc/resolv.conf file. + are not updated in the container's /etc/resolv.conf file.

@@ -92,10 +92,10 @@ Various container options that affect container domain name services.

Sets the domain names that are searched when a bare unqualified hostname is used inside of the container. These --dns-search options are managed by the - embedded DNS server and will not be updated in the container's /etc/resolv.conf file. + embedded DNS server and are not updated in the container's /etc/resolv.conf file. When a container process attempts to access host and the search - domain example.com is set, for instance, the DNS logic will not only - look up host but also host.example.com. + domain example.com is set, for instance, the DNS logic looks up + both host and host.example.com.

@@ -105,7 +105,7 @@ Various container options that affect container domain name services.

Sets the options used by DNS resolvers. These options are managed by the embedded - DNS server and will not be updated in the container's /etc/resolv.conf file. + DNS server and are not updated in the container's /etc/resolv.conf file.

See documentation for resolv.conf for a list of valid options. @@ -125,7 +125,7 @@ unreachable from the container's network. After this filtering, if there are no more `nameserver` entries left in the container's `/etc/resolv.conf` file, the daemon adds public Google DNS nameservers (8.8.8.8 and 8.8.4.4) to the container's DNS configuration. If IPv6 is enabled on the daemon, the public -IPv6 Google DNS nameservers will also be added (2001:4860:4860::8888 and +IPv6 Google DNS nameservers are also added (2001:4860:4860::8888 and 2001:4860:4860::8844). > **Note**: If you need access to a host's localhost resolver, you must modify diff --git a/engine/userguide/networking/default_network/binding.md b/engine/userguide/networking/default_network/binding.md index aa4c9a382c2..f188f4dfdd7 100644 --- a/engine/userguide/networking/default_network/binding.md +++ b/engine/userguide/networking/default_network/binding.md @@ -10,8 +10,8 @@ The information in this section explains binding container ports within the Dock create user-defined networks in addition to the default bridge network. By default Docker containers can make connections to the outside world, but the -outside world cannot connect to containers. Each outgoing connection will -appear to originate from one of the host machine's own IP addresses thanks to an +outside world cannot connect to containers. Each outgoing connection +appears to originate from one of the host machine's own IP addresses thanks to an `iptables` masquerading rule on the host machine that the Docker server creates when it starts: @@ -27,7 +27,7 @@ MASQUERADE all -- 172.17.0.0/16 0.0.0.0/0 The Docker server creates a masquerade rule that lets containers connect to IP addresses in the outside world. -If you want containers to accept incoming connections, you will need to provide +If you want containers to accept incoming connections, you need to provide special options when invoking `docker run`. There are two approaches. First, you can supply `-P` or `--publish-all=true|false` to `docker run` which @@ -43,7 +43,7 @@ It allows you to particularize which port on docker server - which can be any port at all, not just one within the _ephemeral port range_ -- you want mapped to which port in the container. -Either way, you should be able to peek at what Docker has accomplished in your +Either way, you can peek at what Docker has accomplished in your network stack by examining your NAT tables. ``` @@ -66,7 +66,7 @@ DNAT tcp -- 0.0.0.0/0 0.0.0.0/0 tcp dpt:80 to:172. ``` You can see that Docker has exposed these container ports on `0.0.0.0`, the -wildcard IP address that will match any possible incoming port on the host +wildcard IP address that matches any possible incoming port on the host machine. If you want to be more restrictive and only allow container services to be contacted through a specific external interface on the host machine, you have two choices. When you invoke `docker run` you can use either `-p @@ -83,7 +83,7 @@ exposure is achieved purely through iptables rules, and no attempt to bind the exposed port is ever made. This means that nothing prevents shadowing a previously listening service outside of Docker through exposing the same port for a container. In such conflicting situation, Docker created iptables rules -will take precedence and route to the container. +take precedence and route to the container. The `--userland-proxy` parameter, true by default, provides a userland implementation for inter-container and outside-to-container communication. When diff --git a/engine/userguide/networking/default_network/configure-dns.md b/engine/userguide/networking/default_network/configure-dns.md index bc95ca66ca3..3a8319db543 100644 --- a/engine/userguide/networking/default_network/configure-dns.md +++ b/engine/userguide/networking/default_network/configure-dns.md @@ -8,7 +8,7 @@ The information in this section explains configuring container DNS within the Docker default bridge. This is a `bridge` network named `bridge` created automatically when you install Docker. -> **Note**: The [Docker networks feature](../index.md) allows you to create user-defined networks in addition to the default bridge network. Please refer to the [Docker Embedded DNS](../configure-dns.md) section for more information on DNS configurations in user-defined networks. +> **Note**: The [Docker networks feature](../index.md) allows you to create user-defined networks in addition to the default bridge network. Refer to the [Docker Embedded DNS](../configure-dns.md) section for more information on DNS configurations in user-defined networks. How can Docker supply each container with a hostname and DNS configuration, without having to build a custom image with the hostname written inside? Its trick is to overlay three crucial `/etc` files inside the container with virtual files where it can write fresh information. You can see this by running `mount` inside a container: @@ -38,9 +38,9 @@ Four different options affect container domain name services. Sets the hostname by which the container knows itself. This is written into /etc/hostname, into /etc/hosts as the name of the container's host-facing IP address, and is the name that - /bin/bash inside the container will display inside its + /bin/bash inside the container displays inside its prompt. But the hostname is not easy to see from outside the container. - It will not appear in docker ps nor in the + It does not appear in docker ps nor in the /etc/hosts file of any other container.

@@ -73,7 +73,7 @@ Four different options affect container domain name services.

Sets the IP addresses added as nameserver lines to the container's /etc/resolv.conf file. Processes in the container, when - confronted with a hostname not in /etc/hosts, will connect to + confronted with a hostname not in /etc/hosts, connect to these IP addresses on port 53 looking for name resolution services.

@@ -85,7 +85,7 @@ Four different options affect container domain name services. used inside of the container, by writing search lines into the container's /etc/resolv.conf. When a container process attempts to access host and the search domain example.com - is set, for instance, the DNS logic will not only look up host + is set, for instance, the DNS logic not only looks up host but also host.example.com.

@@ -114,14 +114,14 @@ Four different options affect container domain name services. Regarding DNS settings, in the absence of the `--dns=IP_ADDRESS...`, `--dns-search=DOMAIN...`, or `--dns-opt=OPTION...` options, Docker makes each container's `/etc/resolv.conf` look like the `/etc/resolv.conf` of the host machine (where the `docker` daemon runs). When creating the container's `/etc/resolv.conf`, the daemon filters out all localhost IP address `nameserver` entries from the host's original file. -Filtering is necessary because all localhost addresses on the host are unreachable from the container's network. After this filtering, if there are no more `nameserver` entries left in the container's `/etc/resolv.conf` file, the daemon adds public Google DNS nameservers (8.8.8.8 and 8.8.4.4) to the container's DNS configuration. If IPv6 is enabled on the daemon, the public IPv6 Google DNS nameservers will also be added (2001:4860:4860::8888 and 2001:4860:4860::8844). +Filtering is necessary because all localhost addresses on the host are unreachable from the container's network. After this filtering, if there are no more `nameserver` entries left in the container's `/etc/resolv.conf` file, the daemon adds public Google DNS nameservers (8.8.8.8 and 8.8.4.4) to the container's DNS configuration. If IPv6 is enabled on the daemon, the public IPv6 Google DNS nameservers are also added (2001:4860:4860::8888 and 2001:4860:4860::8844). > **Note**: If you need access to a host's localhost resolver, you must modify your DNS service on the host to listen on a non-localhost address that is reachable from within the container. -You might wonder what happens when the host machine's `/etc/resolv.conf` file changes. The `docker` daemon has a file change notifier active which will watch for changes to the host DNS configuration. +You might wonder what happens when the host machine's `/etc/resolv.conf` file changes. The `docker` daemon has a file change notifier active which watches for changes to the host DNS configuration. -> **Note**: The file change notifier relies on the Linux kernel's inotify feature. Because this feature is currently incompatible with the overlay filesystem driver, a Docker daemon using "overlay" will not be able to take advantage of the `/etc/resolv.conf` auto-update feature. +> **Note**: The file change notifier relies on the Linux kernel's inotify feature. Because this feature is currently incompatible with the overlay filesystem driver, a Docker daemon using "overlay" cannot take advantage of the `/etc/resolv.conf` auto-update feature. -When the host file changes, all stopped containers which have a matching `resolv.conf` to the host will be updated immediately to this newest host configuration. Containers which are running when the host configuration changes will need to stop and start to pick up the host changes due to lack of a facility to ensure atomic writes of the `resolv.conf` file while the container is running. If the container's `resolv.conf` has been edited since it was started with the default configuration, no replacement will be attempted as it would overwrite the changes performed by the container. If the options (`--dns`, `--dns-search`, or `--dns-opt`) have been used to modify the default host configuration, then the replacement with an updated host's `/etc/resolv.conf` will not happen as well. +When the host file changes, all stopped containers which have a matching `resolv.conf` to the host are updated immediately to this newest host configuration. Containers which are running when the host configuration changes need to stop and start to pick up the host changes due to lack of a facility to ensure atomic writes of the `resolv.conf` file while the container is running. If the container's `resolv.conf` has been edited since it was started with the default configuration, no replacement is attempted as it would overwrite the changes performed by the container. If the options (`--dns`, `--dns-search`, or `--dns-opt`) have been used to modify the default host configuration, then the replacement with an updated host's `/etc/resolv.conf` does not happen. -> **Note**: For containers which were created prior to the implementation of the `/etc/resolv.conf` update feature in Docker 1.5.0: those containers will **not** receive updates when the host `resolv.conf` file changes. Only containers created with Docker 1.5.0 and above will utilize this auto-update feature. +> **Note**: For containers which were created prior to the implementation of the `/etc/resolv.conf` update feature in Docker 1.5.0: those containers do **not** receive updates when the host `resolv.conf` file changes. Only containers created with Docker 1.5.0 and above utilize this auto-update feature. diff --git a/engine/userguide/networking/default_network/container-communication.md b/engine/userguide/networking/default_network/container-communication.md index 5880ffef7b4..1a7911d095b 100644 --- a/engine/userguide/networking/default_network/container-communication.md +++ b/engine/userguide/networking/default_network/container-communication.md @@ -17,9 +17,9 @@ factor is whether the host machine is forwarding its IP packets. The second is whether the host's `iptables` allow this particular connection. IP packet forwarding is governed by the `ip_forward` system parameter. Packets -can only pass between containers if this parameter is `1`. Usually you will -simply leave the Docker server at its default setting `--ip-forward=true` and -Docker will go set `ip_forward` to `1` for you when the server starts up. If you +can only pass between containers if this parameter is `1`. Usually, the default +setting of `--ip-forward=true` is correct, and causes and +Docker to set `ip_forward` to `1` for you when the server starts up. If you set `--ip-forward=false` and your system's kernel has it enabled, the `--ip-forward=false` option has no effect. To check the setting on your kernel or to turn it on manually: @@ -39,15 +39,15 @@ or to turn it on manually: > **Note**: this setting does not affect containers that use the host > network stack (`--network=host`). -Many using Docker will want `ip_forward` to be on, to at least make +Many using Docker need `ip_forward` to be on, to at least make communication _possible_ between containers and the wider world. May also be needed for inter-container communication if you are in a multiple bridge setup. -Docker will never make changes to your system `iptables` rules if you set -`--iptables=false` when the daemon starts. Otherwise the Docker server will -append forwarding rules to the `DOCKER` filter chain. +Docker never makes changes to your system `iptables` rules if you set +`--iptables=false` when the daemon starts. Otherwise the Docker server +appends forwarding rules to the `DOCKER` filter chain. -Docker will flush any pre-existing rules from the `DOCKER` and `DOCKER-ISOLATION` +Docker flushes any pre-existing rules from the `DOCKER` and `DOCKER-ISOLATION` filter chains, if they exist. For this reason, any rules needed to further restrict access to containers need to be added after Docker has started. @@ -67,13 +67,13 @@ where *ext_if* is the name of the interface providing external connectivity to t Whether two containers can communicate is governed, at the operating system level, by two factors. -- Does the network topology even connect the containers' network interfaces? By default Docker will attach all containers to a single `docker0` bridge, providing a path for packets to travel between them. See the later sections of this document for other possible topologies. +- Does the network topology even connect the containers' network interfaces? By default Docker attaches all containers to a single `docker0` bridge, providing a path for packets to travel between them. See the later sections of this document for other possible topologies. -- Do your `iptables` allow this particular connection? Docker will never make changes to your system `iptables` rules if you set `--iptables=false` when the daemon starts. Otherwise the Docker server will add a default rule to the `FORWARD` chain with a blanket `ACCEPT` policy if you retain the default `--icc=true`, or else will set the policy to `DROP` if `--icc=false`. +- Do your `iptables` allow this particular connection? Docker never makes changes to your system `iptables` rules if you set `--iptables=false` when the daemon starts. Otherwise the Docker server adds a default rule to the `FORWARD` chain with a blanket `ACCEPT` policy if you retain the default `--icc=true`, or else sets the policy to `DROP` if `--icc=false`. It is a strategic question whether to leave `--icc=true` or change it to -`--icc=false` so that `iptables` will protect other containers -- and the main -host -- from having arbitrary ports probed or accessed by a container that gets +`--icc=false` so that `iptables` can protect other containers, and the Docker +host, from having arbitrary ports probed or accessed by a container that gets compromised. If you choose the most secure setting of `--icc=false`, then how can containers @@ -82,14 +82,14 @@ The answer is the `--link=CONTAINER_NAME_or_ID:ALIAS` option, which was mentioned in the previous section because of its effect upon name services. If the Docker daemon is running with both `--icc=false` and `--iptables=true` then, when it sees `docker run` invoked with the `--link=` option, the Docker -server will insert a pair of `iptables` `ACCEPT` rules so that the new +server inserts a pair of `iptables` `ACCEPT` rules so that the new container can connect to the ports exposed by the other container -- the ports that it mentioned in the `EXPOSE` lines of its `Dockerfile`. > **Note**: The value `CONTAINER_NAME` in `--link=` must either be an auto-assigned Docker name like `stupefied_pare` or the name you assigned with `--name=` when you ran `docker run`. It cannot be a hostname, which Docker -will not recognize in the context of the `--link=` option. +does not recognize in the context of the `--link=` option. You can run the `iptables` command on your Docker host to see whether the `FORWARD` chain has a default policy of `ACCEPT` or `DROP`: @@ -160,6 +160,6 @@ host2: eth0/192.168.8.1, docker0/172.18.0.0/16 ``` If the container running on `host1` needs the ability to communicate directly with a container on `host2`, you need a route from `host1` to `host2`. After -the route exists, `host2` needs to be able to accept packets destined for its +the route exists, `host2` needs the ability to accept packets destined for its running container, and forward them along. Setting the policy to `ACCEPT` accomplishes this. diff --git a/engine/userguide/networking/default_network/custom-docker0.md b/engine/userguide/networking/default_network/custom-docker0.md index 2bda8939295..d704a93f17c 100644 --- a/engine/userguide/networking/default_network/custom-docker0.md +++ b/engine/userguide/networking/default_network/custom-docker0.md @@ -22,7 +22,7 @@ Docker configures `docker0` with an IP address, netmask, and IP allocation range Containers which are connected to the default bridge are allocated IP addresses within this range. Certain default settings apply to the default bridge unless you specify otherwise. For instance, the default maximum transmission unit (MTU), -or the largest packet length that the container will allow, defaults to 1500 +or the largest packet length that the container allows, defaults to 1500 bytes. You can configure the default bridge network's settings using flags to the @@ -56,7 +56,7 @@ each: `172.16.1.0/28`. This range must be an IPv4 range for fixed IPs, and must be a subset of the bridge IP range (`docker0` or set using `--bridge` or the `bip` key in the `daemon.json` file). For example, - with `--fixed-cidr=192.168.1.0/25`, IPs for your containers will be chosen from + with `--fixed-cidr=192.168.1.0/25`, IPs for your containers are chosen from the first half of addresses included in the 192.168.1.0/24 subnet. - `--mtu=BYTES`: override the maximum packet length on `docker0`. @@ -83,8 +83,9 @@ docker0 8000.3a1d7362b4ee no veth65f9 vethdda6 ``` -If the `brctl` command is not installed on your Docker host, then on Ubuntu you -should be able to run `sudo apt-get install bridge-utils` to install it. +If the `brctl` command is not installed on your Docker host, run +`sudo apt-get install bridge-utils` (on Ubuntu hosts) to install it. For other +operating systems, consult the OS documentation. Finally, the `docker0` Ethernet bridge settings are used every time you create a new container. Docker selects a free IP address from the range available on the @@ -115,8 +116,8 @@ default via 172.17.42.1 dev eth0 root@f38c87f2a42d:/# exit ``` -Remember that the Docker host will not be willing to forward container packets -out on to the Internet unless its `ip_forward` system setting is `1` -- see the +The Docker host does not forward container packets +out to the outside world unless its `ip_forward` system setting is `1` -- see the section on [Communicating to the outside world](container-communication.md#communicating-to-the-outside-world) for details. diff --git a/engine/userguide/networking/default_network/dockerlinks.md b/engine/userguide/networking/default_network/dockerlinks.md index 854a8a4baa5..5b98b614c7c 100644 --- a/engine/userguide/networking/default_network/dockerlinks.md +++ b/engine/userguide/networking/default_network/dockerlinks.md @@ -67,7 +67,7 @@ This would bind port 5000 in the container to a randomly available port between 8000 and 9000 on the host. There are also a few other ways you can configure the `-p` flag. By -default the `-p` flag will bind the specified port to all interfaces on +default the `-p` flag binds the specified port to all interfaces on the host machine. But you can also specify a binding to a specific interface, for example only to the `localhost`. @@ -88,7 +88,7 @@ You can also bind UDP ports by adding a trailing `/udp`. For example: You also learned about the useful `docker port` shortcut which showed us the current port bindings. This is also useful for showing you specific port configurations. For example, if you've bound the container port to the -`localhost` on the host machine, then the `docker port` output will reflect that. +`localhost` on the host machine, then the `docker port` output reflects that. $ docker port nostalgic_morse 5000 @@ -101,7 +101,7 @@ configurations. For example, if you've bound the container port to the > **Note**: > This section covers the legacy link feature in the default `bridge` network. -> Please refer to [linking containers in user-defined networks](/engine/userguide/networking/work-with-networks.md#linking-containers-in-user-defined-networks) +> Refer to [linking containers in user-defined networks](/engine/userguide/networking/work-with-networks.md#linking-containers-in-user-defined-networks) > for more information on links in user-defined networks. Network port mappings are not the only way Docker containers can connect to one @@ -143,11 +143,11 @@ You can also use `docker inspect` to return the container's name. > **Note**: -> Container names have to be unique. That means you can only call +> Container names must be unique. That means you can only call > one container `web`. If you want to re-use a container name you must delete > the old container (with `docker rm`) before you can create a new > container with the same name. As an alternative you can use the `--rm` -> flag with the `docker run` command. This will delete the container +> flag with the `docker run` command. This deletes the container > immediately after it is stopped. ## Communication across links @@ -172,18 +172,18 @@ Now, create a new `web` container and link it with your `db` container. $ docker run -d -P --name web --link db:db training/webapp python app.py -This will link the new `web` container with the `db` container you created +This links the new `web` container with the `db` container you created earlier. The `--link` flag takes the form: --link :alias Where `name` is the name of the container we're linking to and `alias` is an -alias for the link name. You'll see how that alias gets used shortly. +alias for the link name. That alias is used shortly. The `--link` flag also takes the form: --link -In which case the alias will match the name. You could have written the previous +In this case the alias matches the name. You could write the previous example as: $ docker run -d -P --name web --link db training/webapp python app.py @@ -203,7 +203,7 @@ So what does linking the containers actually do? You've learned that a link allo source container to provide information about itself to a recipient container. In our example, the recipient, `web`, can access information about the source `db`. To do this, Docker creates a secure tunnel between the containers that doesn't need to -expose any ports externally on the container; you'll note when we started the +expose any ports externally on the container; when we started the `db` container we did not use either the `-P` or `-p` flags. That's a big benefit of linking: we don't need to expose the source container, here the PostgreSQL database, to the network. @@ -218,7 +218,7 @@ recipient container in two ways: Docker creates several environment variables when you link containers. Docker automatically creates environment variables in the target container based on -the `--link` parameters. It will also expose all environment variables +the `--link` parameters. It also exposes all environment variables originating from Docker from the source container. These include variables from: * the `ENV` commands in the source container's Dockerfile @@ -298,8 +298,8 @@ with `DB_`, which is populated from the `alias` you specified above. If the `alias` were `db1`, the variables would be prefixed with `DB1_`. You can use these environment variables to configure your applications to connect to the database -on the `db` container. The connection will be secure and private; only the -linked `web` container will be able to talk to the `db` container. +on the `db` container. The connection is secure and private; only the +linked `web` container can communicate with the `db` container. ### Important notes on Docker environment variables @@ -309,7 +309,7 @@ if the source container is restarted. We recommend using the host entries in `/etc/hosts` to resolve the IP address of linked containers. These environment variables are only set for the first process in the -container. Some daemons, such as `sshd`, will scrub them when spawning shells +container. Some daemons, such as `sshd`, scrub them when spawning shells for connection. ### Updating the `/etc/hosts` file @@ -329,10 +329,10 @@ container: You can see two relevant host entries. The first is an entry for the `web` container that uses the Container ID as a host name. The second entry uses the link alias to reference the IP address of the `db` container. In addition to -the alias you provide, the linked container's name--if unique from the alias -provided to the `--link` parameter--and the linked container's hostname will -also be added in `/etc/hosts` for the linked container's IP address. You can ping -that host now via any of these entries: +the alias you provide, the linked container's name, if unique from the alias +provided to the `--link` parameter, and the linked container's hostname are +also added to `/etc/hosts` for the linked container's IP address. You can ping +that host via any of these entries: root@aed84ee21bde:/opt/webapp# apt-get install -yqq inetutils-ping @@ -344,7 +344,7 @@ that host now via any of these entries: 56 bytes from 172.17.0.5: icmp_seq=2 ttl=64 time=0.256 ms > **Note**: -> In the example, you'll note you had to install `ping` because it was not included +> In the example, you had to install `ping` because it was not included > in the container initially. Here, you used the `ping` command to ping the `db` container using its host entry, @@ -356,8 +356,8 @@ to make use of your `db` container. > example, you could have multiple (differently named) web containers attached to your >`db` container. -If you restart the source container, the linked containers `/etc/hosts` files -will be automatically updated with the source container's new IP address, +If you restart the source container, the `/etc/hosts` files on the linked containers +are automatically updated with the source container's new IP address, allowing linked communication to continue. $ docker restart db diff --git a/engine/userguide/networking/default_network/ipv6.md b/engine/userguide/networking/default_network/ipv6.md index c0c95ea7739..b0a20189061 100644 --- a/engine/userguide/networking/default_network/ipv6.md +++ b/engine/userguide/networking/default_network/ipv6.md @@ -19,11 +19,11 @@ reside on layer 3 of the [OSI model](http://en.wikipedia.org/wiki/OSI_model). By default, the Docker daemon configures the container network for IPv4 only. You can enable IPv4/IPv6 dualstack support by running the Docker daemon with the -`--ipv6` flag. Docker will set up the bridge `docker0` with the IPv6 [link-local +`--ipv6` flag. Docker sets up the bridge `docker0` with the IPv6 [link-local address](http://en.wikipedia.org/wiki/Link-local_address) `fe80::1`. -By default, containers that are created will only get a link-local IPv6 address. -To assign globally routable IPv6 addresses to your containers you have to +By default, containers that are created only get a link-local IPv6 address. +To assign globally routable IPv6 addresses to your containers you need to specify an IPv6 subnet to pick the addresses from. Set the IPv6 subnet via the `--fixed-cidr-v6` parameter when starting Docker daemon: @@ -59,20 +59,20 @@ $ sysctl net.ipv6.conf.default.forwarding=1 $ sysctl net.ipv6.conf.all.forwarding=1 ``` -All traffic to the subnet `2001:db8:1::/64` will now be routed via the `docker0` +All traffic to the subnet `2001:db8:1::/64` is routed via the `docker0` interface. > **Note**: IPv6 forwarding may interfere with your existing IPv6 > configuration: If you are using Router Advertisements to get IPv6 settings for > your host's interfaces, set `accept_ra` to `2` using the following command. -> Otherwise IPv6 enabled forwarding will result in rejecting Router Advertisements. +> Otherwise IPv6 enabled forwarding results in rejecting Router Advertisements. > > $ sysctl net.ipv6.conf.eth0.accept_ra=2 ![IPv6 basic host configuration](images/ipv6_basic_host_config.svg) -Every new container will get an IPv6 address from the defined subnet, and a -default route will be added on `eth0` in the container via the address specified +Each new container gets an IPv6 address from the defined subnet, and a +default route is added on `eth0` in the container via the address specified by the daemon option `--default-gateway-v6` (or `default-gateway-v6` in `daemon.json`) if present. The default gateway defaults to `fe80::1`. @@ -95,12 +95,12 @@ default via fe80::1 dev eth0 metric 1024 In this example, the container is assigned a link-local address with the subnet `/64` (`fe80::42:acff:fe11:3/64`) and a globally routable IPv6 address -(`2001:db8:1:0:0:242:ac11:3/64`). The container will create connections to +(`2001:db8:1:0:0:242:ac11:3/64`). The container creates connections to addresses outside of the `2001:db8:1::/64` network via the link-local gateway at `fe80::1` on `eth0`. -Often servers or virtual machines get a `/64` IPv6 subnet assigned (e.g. -`2001:db8:23:42::/64`). In this case you can split it up further and provide +If your server or virtual machine has a `/64` IPv6 subnet assigned to it, such +as `2001:db8:23:42::/64`, you can split it up further and provide Docker a `/80` subnet while using a separate `/80` subnet for other applications on the host: @@ -110,7 +110,7 @@ In this setup the subnet `2001:db8:23:42::/64` with a range from `2001:db8:23:42:0:0:0:0` to `2001:db8:23:42:ffff:ffff:ffff:ffff` is attached to `eth0`, with the host listening at `2001:db8:23:42::1`. The subnet `2001:db8:23:42:1::/80` with an address range from `2001:db8:23:42:1:0:0:0` to -`2001:db8:23:42:1:ffff:ffff:ffff` is attached to `docker0` and will be used by +`2001:db8:23:42:1:ffff:ffff:ffff` is attached to `docker0` and is used by containers. ### Using NDP proxying @@ -137,8 +137,8 @@ $ ip -6 addr show To slit up the configurable address range into two subnets `2001:db8::c000/125` and `2001:db8::c008/125`, use the following `daemon.json` -settings. The first subnet will be used by non-Docker processes on the host, and -the second will be used by Docker. +settings. The first subnet is used by non-Docker processes on the host, and +the second is used by Docker. ```json { @@ -174,7 +174,7 @@ $ ip -6 neigh add proxy 2001:db8::c009 dev eth0 From now on, the kernel answers neighbor solicitation addresses for this address on the device `eth0`. All traffic to this IPv6 address is routed through the -Docker host, which will forward it to the container's network according to its +Docker host, which forwards it to the container's network according to its routing table via the `docker0` device: ```bash @@ -184,7 +184,7 @@ $ ip -6 route show 2001:db8::/64 dev eth0 proto kernel metric 256 ``` -You have to execute the `ip -6 neigh add proxy ...` command for every IPv6 +Execute the `ip -6 neigh add proxy ...` command for every IPv6 address in your Docker subnet. Unfortunately there is no functionality for adding a whole subnet by executing one command. An alternative approach would be to use an NDP proxy daemon such as @@ -208,11 +208,11 @@ three routes configured: - Route all traffic to `2001:db8:2::/64` via Host2 with IP `2001:db8::2` Host1 also acts as a router on OSI layer 3. When one of the network clients -tries to contact a target that is specified in Host1's routing table Host1 will -forward the traffic accordingly. It acts as a router for all networks it knows: +tries to contact a target that is specified in Host1's routing table Host1 +forwards the traffic accordingly. It acts as a router for all networks it knows: `2001:db8::/64`, `2001:db8:1::/64`, and `2001:db8:2::/64`. -On Host2 we have nearly the same configuration. Host2's containers will get IPv6 +On Host2 we have nearly the same configuration. Host2's containers gets IPv6 addresses from `2001:db8:2::/64`. Host2 has three routes configured: - Route all traffic to `2001:db8:0::/64` via `eth0` @@ -223,13 +223,13 @@ The difference to Host1 is that the network `2001:db8:2::/64` is directly attached to Host2 via its `docker0` interface whereas Host2 reaches `2001:db8:1::/64` via Host1's IPv6 address `2001:db8::1`. -This way every container is able to contact every other container. The +This way every container can contact every other container. The containers `Container1-*` share the same subnet and contact each other directly. -The traffic between `Container1-*` and `Container2-*` will be routed via Host1 +The traffic between `Container1-*` and `Container2-*` are routed via Host1 and Host2 because those containers do not share the same subnet. -In a switched environment every host has to know all routes to every subnet. -You always have to update the hosts' routing tables once you add or remove a +In a switched environment every host needs to know all routes to every subnet. +You always need to update the hosts' routing tables once you add or remove a host to the cluster. Every configuration in the diagram that is shown below the dashed line is @@ -240,19 +240,19 @@ adapted to the individual environment. ### Routed network environment In a routed network environment you replace the layer 2 switch with a layer 3 -router. Now the hosts just have to know their default gateway (the router) and +router. Now the hosts just need to know their default gateway (the router) and the route to their own containers (managed by Docker). The router holds all routing information about the Docker subnets. When you add or remove a host to -this environment you just have to update the routing table in the router - not -on every host. +this environment, just update the routing table in the router, rather than on +every host. ![IPv6 routed network example](images/ipv6_routed_network_example.png) In this scenario containers of the same host can communicate directly with each -other. The traffic between containers on different hosts will be routed via -their hosts and the router. For example packet from `Container1-1` to -`Container2-1` will be routed through `Host1`, `Router`, and `Host2` until it -arrives at `Container2-1`. +other. The traffic between containers on different hosts is routed via +their hosts and the router. For example, packets from `Container1-1` to +`Container2-1` are routed through `Host1`, `Router`, and `Host2` until they +arrive at `Container2-1`. To keep the IPv6 addresses short in this example a `/48` network is assigned to every host. The hosts use a `/64` subnet of this for its own services and one diff --git a/engine/userguide/networking/get-started-macvlan.md b/engine/userguide/networking/get-started-macvlan.md index d5c20b8ec7c..96d6edddb95 100644 --- a/engine/userguide/networking/get-started-macvlan.md +++ b/engine/userguide/networking/get-started-macvlan.md @@ -14,7 +14,7 @@ Macvlan offers a number of unique features and plenty of room for further innova - The examples on this page are all single host and setup using Docker 1.12.0+ -- All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. +- All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver creates a `dummy` interface that enables local host connectivity to perform the examples. - Kernel requirements: @@ -43,7 +43,7 @@ In the following example, `eth0` on the docker host has an IP on the `172.16.86. > **Note**: For Macvlan bridge mode the subnet values need to match the NIC's interface of the Docker host. For example, Use the same subnet and gateway of the Docker host ethernet interface that is specified by the `-o parent=` option. -- The parent interface used in this example is `eth0` and it is on the subnet `172.16.86.0/24`. The containers in the `docker network` will also need to be on this same subnet as the parent `-o parent=`. The gateway is an external router on the network, not any ip masquerading or any other local proxy. +- The parent interface used in this example is `eth0` and it is on the subnet `172.16.86.0/24`. The containers in the `docker network` also need to be on this same subnet as the parent `-o parent=`. The gateway is an external router on the network, not any ip masquerading or any other local proxy. - The driver is specified with `-d driver_name` option. In this case `-d macvlan` @@ -91,9 +91,9 @@ ip route # In this case the containers cannot ping the -o parent=172.16.86.250 ``` -You can explicitly specify the `bridge` mode option `-o macvlan_mode=bridge`. It is the default so will be in `bridge` mode either way. +You can explicitly specify the `bridge` mode option `-o macvlan_mode=bridge`. It is the default so is in `bridge` mode either way. -While the `eth0` interface does not need to have an IP address in Macvlan Bridge it is not uncommon to have an IP address on the interface. Addresses can be excluded from getting an address from the default built in IPAM by using the `--aux-address=x.x.x.x` flag. This will blacklist the specified address from being handed out to containers. The same network example above blocking the `-o parent=eth0` address from being handed out to a container. +While the `eth0` interface does not need to have an IP address in Macvlan Bridge it is not uncommon to have an IP address on the interface. Addresses can be excluded from getting an address from the default built in IPAM by using the `--aux-address=x.x.x.x` flag. This blacklists the specified address from being handed out to containers. The same network example above blocking the `-o parent=eth0` address from being handed out to a container. ``` docker network create -d macvlan \ @@ -103,7 +103,7 @@ docker network create -d macvlan \ -o parent=eth0 pub_net ``` -Another option for subpool IP address selection in a network provided by the default Docker IPAM driver is to use `--ip-range=`. This specifies the driver to allocate container addresses from this pool rather then the broader range from the `--subnet=` argument from a network create as seen in the following example that will allocate addresses beginning at `192.168.32.128` and increment upwards from there. +Another option for subpool IP address selection in a network provided by the default Docker IPAM driver is to use `--ip-range=`. This specifies the driver to allocate container addresses from this pool rather then the broader range from the `--subnet=` argument from a network create as seen in the following example that allocates addresses beginning at `192.168.32.128` and increment upwards from there. ``` docker network create -d macvlan \ @@ -125,19 +125,19 @@ docker network rm > Communication with the Docker host over macvlan > > - When using macvlan, you cannot ping or communicate with the default namespace IP address. -> For example, if you create a container and try to ping the Docker host's `eth0`, it will +> For example, if you create a container and try to ping the Docker host's `eth0`, it does > **not** work. That traffic is explicitly filtered by the kernel modules themselves to > offer additional provider isolation and security. > > - A macvlan subinterface can be added to the Docker host, to allow traffic between the Docker > host and containers. The IP address needs to be set on this subinterface and removed from > the parent address. - + ``` -ip link add mac0 link $PARENTDEV type macvlan mode bridge +ip link add mac0 link $PARENTDEV type macvlan mode bridge ``` -On Debian or Ubuntu, adding the following to `/etc/network/interfaces` will make this persistent. +On Debian or Ubuntu, adding the following to `/etc/network/interfaces` makes this persistent. Consult your operating system documentation for more details. ```none @@ -150,7 +150,7 @@ iface mac0 inet dhcp post-down ip link del mac0 link eno1 type macvlan mode bridge ``` -For more on Docker networking commands, see +For more on Docker networking commands, see Working with Docker network commands](/engine/userguide/networking/work-with-networks/). ## Macvlan 802.1q Trunk Bridge Mode example usage @@ -161,11 +161,11 @@ It is very common to have a compute host requirement of running multiple virtual ![Multi Tenant 802.1q Vlans](images/multi_tenant_8021q_vlans.png) -Trunking 802.1q to a Linux host is notoriously painful for many in operations. It requires configuration file changes in order to be persistent through a reboot. If a bridge is involved, a physical NIC needs to be moved into the bridge and the bridge then gets the IP address. This has lead to many a stranded servers since the risk of cutting off access during that convoluted process is high. +Trunking 802.1q to a Linux host is notoriously painful for many in operations. It requires configuration file changes to be persistent through a reboot. If a bridge is involved, a physical NIC needs to be moved into the bridge and the bridge then gets the IP address. This has lead to many a stranded servers since the risk of cutting off access during that convoluted process is high. Like all of the Docker network drivers, the overarching goal is to alleviate the operational pains of managing network resources. To that end, when a network receives a sub-interface as the parent that does not exist, the drivers create the VLAN tagged interfaces while creating the network. -In the case of a host reboot, instead of needing to modify often complex network configuration files the driver will recreate all network links when the Docker daemon restarts. The driver tracks if it created the VLAN tagged sub-interface originally with the network create and will **only** recreate the sub-interface after a restart or delete `docker network rm` the link if it created it in the first place with `docker network create`. +In the case of a host reboot, instead of needing to modify often complex network configuration files the driver recreates all network links when the Docker daemon restarts. The driver tracks if it created the VLAN tagged sub-interface originally with the network create and **only** recreates the sub-interface after a restart or delete `docker network rm` the link if it created it in the first place with `docker network create`. If the user doesn't want Docker to modify the `-o parent` sub-interface, the user simply needs to pass an existing link that already exists as the parent interface. Parent interfaces such as `eth0` are not deleted, only sub-interfaces that are not master links. diff --git a/engine/userguide/networking/index.md b/engine/userguide/networking/index.md index 72293546dcc..e1916c707ee 100644 --- a/engine/userguide/networking/index.md +++ b/engine/userguide/networking/index.md @@ -57,8 +57,8 @@ docker0 Link encap:Ethernet HWaddr 02:42:47:bc:3a:eb > Running on Docker for Mac or Docker for Windows? > > If you are using Docker for Mac (or running Linux containers on Docker for Windows), the -`docker network ls` command will work as described above, but the -`ip addr show` and `ifconfig` commands may be present, but will give you information about +`docker network ls` command works as described above, but the +`ip addr show` and `ifconfig` commands may be present, but give you information about the IP addresses for your local host, not Docker container networks. This is because Docker uses network interfaces running inside a thin VM, instead of on the host machine itself. @@ -163,7 +163,7 @@ $ docker run -itd --name=container2 busybox Inspect the `bridge` network again after starting two containers. Both of the `busybox` containers are connected to the network. Make note of their IP -addresses, which will be different on your host machine than in the example +addresses, which is different on your host machine than in the example below. ```none @@ -213,7 +213,7 @@ $ docker network inspect bridge Containers connected to the default `bridge` network can communicate with each other by IP address. **Docker does not support automatic service discovery on the -default bridge network. If you want containers to be able to resolve IP addresses +default bridge network. If you want containers to resolve IP addresses by container name, you should use _user-defined networks_ instead**. You can link two containers together using the legacy `docker run --link` option, but this is not recommended in most cases. @@ -292,7 +292,7 @@ You can also manually start the `dockerd` with the flags `--bridge=none --iptables=false`. However, this may not start the daemon with the same environment as the system init scripts, so other behaviors may be changed. -Disabling the default bridge network is an advanced option that most users will +Disabling the default bridge network is an advanced option that most users do not need. ## User-defined networks @@ -489,7 +489,7 @@ think you may need to use overlay networks in this way, see If your needs are not addressed by any of the above network mechanisms, you can write your own network driver plugin, using Docker's plugin infrastructure. -The plugin will run as a separate process on the host which runs the Docker +The plugin runs as a separate process on the host which runs the Docker daemon. Using network plugins is an advanced topic. Network plugins follow the same restrictions and installation rules as other @@ -504,9 +504,9 @@ $ docker network create --driver weave mynet ``` You can inspect the network, connect and disconnect containers from it, and -remove it. A specific plugin may have specific requirements in order to be -used. Check that plugin's documentation for specific information. For more -information on writing plugins, see +remove it. A specific plugin may have specific requirements. Check that plugin's +documentation for specific information. For more information on writing plugins, +see [Extending Docker](../../extend/legacy_plugins.md) and [Writing a network driver plugin](../../extend/plugins_network.md). @@ -515,9 +515,9 @@ information on writing plugins, see Docker daemon runs an embedded DNS server which provides DNS resolution among containers connected to the same user-defined network, so that these containers can resolve container names to IP addresses. If the embedded DNS server is -unable to resolve the request, it will be forwarded to any external DNS servers +unable to resolve the request, it is forwarded to any external DNS servers configured for the container. To facilitate this when the container is created, -only the embedded DNS server reachable at `127.0.0.11` will be listed in the +only the embedded DNS server reachable at `127.0.0.11` is listed in the container's `resolv.conf` file. For more information on embedded DNS server on user-defined networks, see [embedded DNS server in user-defined networks](configure-dns.md) @@ -538,7 +538,7 @@ network and user-defined bridge networks. available high-order port (higher than `30000`) on the host machine, unless you specify the port to map to on the host machine at runtime. You cannot specify the port to map to on the host machine when you build the image (in the - Dockerfile), because there is no way to guarantee that the port will be available + Dockerfile), because there is no way to guarantee that the port is available on the host machine where you run the image. This example publishes port 80 in the container to a random high @@ -556,7 +556,7 @@ network and user-defined bridge networks. ``` The next example specifies that port 80 should be mapped to port 8080 on the - host machine. It will fail if port 8080 is not available. + host machine. It fails if port 8080 is not available. ```bash $ docker run -it -d -p 8080:80 nginx @@ -606,7 +606,7 @@ configure it in different ways: Save the file. -2. When you create or start new containers, the environment variables will be +2. When you create or start new containers, the environment variables are set automatically within the container. ### Set the environment variables manually @@ -662,7 +662,7 @@ way to make `iptables` rules persistent. Docker dynamically manages `iptables` rules for the daemon, as well as your containers, services, and networks. In Docker 17.06 and higher, you can add -rules to a new table called `DOCKER-USER`, and these rules will be loaded before +rules to a new table called `DOCKER-USER`, and these rules are loaded before any rules Docker creates automatically. This can be useful if you need to pre-populate `iptables` rules that need to be in place before Docker runs. diff --git a/engine/userguide/networking/overlay-security-model.md b/engine/userguide/networking/overlay-security-model.md index 6fbb4a95c42..48aa80ed50c 100644 --- a/engine/userguide/networking/overlay-security-model.md +++ b/engine/userguide/networking/overlay-security-model.md @@ -30,7 +30,7 @@ automatically rotate the keys every 12 hours. > > Overlay network encryption is not supported on Windows. If a Windows node > attempts to connect to an encrypted overlay network, no error is detected but -> the node will not be able to communicate. +> the node cannot communicate. {: .warning } ## Swarm mode overlay networks and unmanaged containers diff --git a/engine/userguide/networking/overlay-standalone-swarm.md b/engine/userguide/networking/overlay-standalone-swarm.md index 23174ba6efd..f59de80e7ef 100644 --- a/engine/userguide/networking/overlay-standalone-swarm.md +++ b/engine/userguide/networking/overlay-standalone-swarm.md @@ -39,11 +39,11 @@ To use Docker with an external key-value store, you need the following: Docker Machine and Docker Swarm are not mandatory to experience Docker multi-host networking with a key-value store. However, this example uses them to -illustrate how they are integrated. You'll use Machine to create both the +illustrate how they are integrated. You use Machine to create both the key-value store server and the host cluster using a standalone swarm. >**Note**: These examples are not relevant to Docker running in swarm mode and -> will not work in such a configuration. +> do not work in such a configuration. ### Prerequisites @@ -73,7 +73,7 @@ key-value stores. This example uses Consul. When you provision a new machine, the process adds Docker to the host. This means rather than installing Consul manually, you can create an instance using the [consul image from Docker - Hub](https://hub.docker.com/_/consul/). You'll do this in the next step. + Hub](https://hub.docker.com/_/consul/). You do this in the next step. 3. Set your local environment to the `mh-keystore` machine. @@ -110,14 +110,14 @@ Keep your terminal open and move on to ### Create a swarm cluster In this step, you use `docker-machine` to provision the hosts for your network. -You won't actually create the network yet. You'll create several -Docker machines in VirtualBox. One of the machines will act as the swarm manager -and you'll create that first. As you create each host, you'll pass the Docker +You don't actually create the network yet. You create several +Docker machines in VirtualBox. One of the machines acts as the swarm manager +and you create that first. As you create each host, you pass the Docker daemon on that machine options that are needed by the `overlay` network driver. > **Note**: This creates a standalone swarm cluster, rather than using Docker > in swarm mode. These examples are not relevant to Docker running in swarm mode -> and will not work in such a configuration. +> and do not work in such a configuration. 1. Create a swarm manager. @@ -325,7 +325,7 @@ it automatically is part of the network.

If you see this page, the nginx web server is successfully installed and working. Further configuration is required.

-

For online documentation and support please refer to +

For online documentation and support, refer to nginx.org.
Commercial support is available at nginx.com.

diff --git a/engine/userguide/networking/work-with-networks.md b/engine/userguide/networking/work-with-networks.md index dc52f251bf2..3d5ae669c82 100644 --- a/engine/userguide/networking/work-with-networks.md +++ b/engine/userguide/networking/work-with-networks.md @@ -125,7 +125,7 @@ The following arguments can be passed to `docker network create` for any network The following example uses `-o` to bind to a specific IP address available on the host when binding ports, then uses `docker network inspect` to inspect the network, and finally -attaches a new container to the new network. Note that you should replace the IP address `172.23.0.1` shown in the +attaches a new container to the new network. Replace the IP address `172.23.0.1` shown in the example with an IP address available on a network interface in your host. ```bash @@ -262,9 +262,9 @@ needed. when connecting it to a network, by using the `--ip` or `--ip6` flag. When you specify an IP address in this way while using a user-defined network, the configuration is preserved as part of the container's configuration and - will be applied when the container is reloaded. Assigned IP addresses are not + is applied when the container is reloaded. Assigned IP addresses are not preserved when using non-user-defined networks, because there is no guarantee - that a container's subnet will not change when the Docker daemon restarts unless + that a container's subnet does not change when the Docker daemon restarts unless you use user-defined networks. 5. Inspect the network resources used by `container3`. The @@ -455,7 +455,7 @@ After you complete the steps in `container2` can resolve `container3`'s name automatically because both containers are connected to the `isolated_nw` network. However, containers connected to the default `bridge` network cannot resolve each other's container name. If you need -containers to be able to communicate with each other over the `bridge` network, +containers to communicate with each other over the `bridge` network, you need to use the legacy [link](default_network/dockerlinks.md) feature. This is the only use case where using `--link` is recommended. You should strongly consider using user-defined networks instead. @@ -490,7 +490,7 @@ The following example briefly describes how to use `--link`. ``` This is a little tricky, because `container5` does not exist yet. When - `container5` is created, `container4` will be able to resolve the name `c5` to + `container5` is created, `container4` can to resolve the name `c5` to `container5`'s IP address. >**Note**: Any link between containers created with *legacy link* is static in @@ -499,7 +499,7 @@ The following example briefly describes how to use `--link`. networks supports dynamic links between containers, and tolerates restarts and IP address changes in the linked container. - Since you have not yet created container `container5` trying to ping it will result + Since you have not yet created container `container5` trying to ping it results in an error. Attach to `container4` and try to ping either `container5` or `c5`: ```bash @@ -587,12 +587,12 @@ The following example briefly describes how to use `--link`. When you link containers, whether using the legacy `link` method or using user-defined networks, any aliases you specify only have meaning to the -container where they are specified, and won't work on other containers on the +container where they are specified, and don't work on other containers on the default `bridge` network. In addition, if a container belongs to multiple networks, a given linked alias is scoped within a given network. Thus, a container can be linked to different -aliases in different networks, and the aliases will not work for containers which +aliases in different networks, and the aliases do not work for containers which are not on the same network. The following example illustrates these points. @@ -809,8 +809,8 @@ The following example illustrates how to set up and use network aliases. ``` When multiple containers share the same alias, one of those containers - will resolve to the alias. If that container is unavailable, another - container with the alias will be resolved. This provides a sort of high + resolves to the alias. If that container is unavailable, another + container with the alias is resolved. This provides a sort of high availability within the cluster. > **Note**: When the IP address is resolved, the container chosen to resolve @@ -840,7 +840,7 @@ The following example illustrates how to set up and use network aliases. ``` In the terminal attached to `container4`, observe the `ping` output. - It will pause when `container6` goes down, because the `ping` command + It pauses when `container6` goes down, because the `ping` command looks up the IP when it is first invoked, and that IP is no longer reachable. However, the `ping` command has a very long timeout by default, so no error occurs. @@ -868,7 +868,7 @@ The following example illustrates how to set up and use network aliases. In the terminal attached to `container4`, run the `ping` command again. It might now resolve to `container6` again. If you start and stop the `ping` - several times, you will see responses from each of the containers. + several times, you can see responses from each of the containers. ```bash $ docker attach container4 diff --git a/engine/userguide/storagedriver/aufs-driver.md b/engine/userguide/storagedriver/aufs-driver.md index cf30c2b70c0..b0e8e24a32a 100644 --- a/engine/userguide/storagedriver/aufs-driver.md +++ b/engine/userguide/storagedriver/aufs-driver.md @@ -24,7 +24,7 @@ potential performance advantages over the `aufs` storage driver. - If you use Ubuntu, you need to [install extra packages](/engine/installation/linux/ubuntu.md#recommended-extra-packages-for-trusty-1404){: target="_blank" class="_"} to add the AUFS module to the kernel. If you do not install these packages, - you will need to use `devicemapper` on Ubuntu 14.04 (which is not recommended), + you need to use `devicemapper` on Ubuntu 14.04 (which is not recommended), or `overlay2` on Ubuntu 16.04 and higher, which is also supported. - AUFS cannot use the following backing filesystems: `aufs`, `btrfs`, or `ecryptfs`. This means that the filesystem which contains @@ -59,7 +59,7 @@ storage driver is configured, Docker uses it by default. ``` 3. If you are using a different storage driver, either AUFS is not included in - the kernel (in which case a different default driver will be used) or that + the kernel (in which case a different default driver is used) or that Docker has been explicitly configured to use a different driver. Check `/etc/docker/daemon.json` or the output of `ps auxw | grep dockerd` to see if Docker has been started with the `--storage-driver` flag. @@ -118,7 +118,7 @@ subdirectories of `/var/lib/docker/aufs/`. file contains the IDs of all the layers below it in the stack (its parents). - `mnt/`: Mount points, one per image or container layer, which are used to assemble and mount the unified filesystem for a container. For images, which - are read-only, these directories will always be empty. + are read-only, these directories are always empty. #### The container layer @@ -206,7 +206,7 @@ To summarize some of the performance related aspects already mentioned: - The AUFS storage driver can introduce significant latencies into container write performance. This is because the first time a container writes to any - file, the file has to be located and copied into the containers top writable + file, the file needs to be located and copied into the containers top writable layer. These latencies increase and are compounded when these files exist below many image layers and the files themselves are large. diff --git a/engine/userguide/storagedriver/btrfs-driver.md b/engine/userguide/storagedriver/btrfs-driver.md index aa0fdfb666e..7ed3eab99ef 100644 --- a/engine/userguide/storagedriver/btrfs-driver.md +++ b/engine/userguide/storagedriver/btrfs-driver.md @@ -30,7 +30,7 @@ Btrfs Filesystem as Btrfs. [Product compatibility matrix](https://success.docker.com/Policies/Compatibility_Matrix) for all supported configurations for commercially-supported Docker. -- Changing the storage driver will make any containers you have already +- Changing the storage driver makes any containers you have already created inaccessible on the local system. Use `docker save` to save containers, and push existing images to Docker Hub or a private repository, so that you not need to re-create them later. @@ -153,7 +153,7 @@ $ sudo btrfs filesystem balance /var/lib/docker ``` > **Note**: While you can do these operations with Docker running, performance -> will suffer. It might be best to plan an outage window to balance the Btrfs +> suffers. It might be best to plan an outage window to balance the Btrfs > filesystem. ## How the `btrfs` storage driver works @@ -255,7 +255,7 @@ storage driver. > **Note**: Many of these factors are mitigated by using Docker volumes for > write-heavy workloads, rather than relying on storing data in the container's -> writable layer. However, in the case of Btrfs, Docker volumes will still suffer +> writable layer. However, in the case of Btrfs, Docker volumes still suffer > from these draw-backs unless `/var/lib/docker/volumes/` is **not** backed by > Btrfs. diff --git a/engine/userguide/storagedriver/device-mapper-driver.md b/engine/userguide/storagedriver/device-mapper-driver.md index 4fb53bf9e2b..7d49fd9d228 100644 --- a/engine/userguide/storagedriver/device-mapper-driver.md +++ b/engine/userguide/storagedriver/device-mapper-driver.md @@ -12,8 +12,8 @@ storage driver as `devicemapper`, and the kernel framework as `Device Mapper`. For the systems where it is supported, `devicemapper` support is included in the Linux kernel. However, specific configuration is required to use it with -Docker. For instance, on a stock installation of RHEL or CentOS, Docker will -default to `overlay`, which is not a supported configuration. +Docker. For instance, on a stock installation of RHEL or CentOS, Docker +defaults to `overlay`, which is not a supported configuration. The `devicemapper` driver uses block devices dedicated to Docker and operates at the block level, rather than the file level. These devices can be extended by @@ -33,7 +33,7 @@ a filesystem at the level of the operating system. - `devicemapper` is also supported on Docker CE running on CentOS, Fedora, Ubuntu, or Debian. -- Changing the storage driver will make any containers you have already +- Changing the storage driver makes any containers you have already created inaccessible on the local system. Use `docker save` to save containers, and push existing images to Docker Hub or a private repository, so that you not need to re-create them later. @@ -73,7 +73,7 @@ For production systems, see - [Stable](/engine/reference/commandline/dockerd.md#storage-driver-options) - [Edge](/edge/engine/reference/commandline/dockerd.md#storage-driver-options) - Docker will not start if the `daemon.json` file contains badly-formed JSON. + Docker does not start if the `daemon.json` file contains badly-formed JSON. 3. Start Docker. @@ -137,7 +137,7 @@ After you have satisfied the [prerequisites](#prerequisites), follow the steps below to configure Docker to use the `devicemapper` storage driver in `direct-lvm` mode. -> **Warning**: Changing the storage driver will make any containers you have already +> **Warning**: Changing the storage driver makes any containers you have already created inaccessible on the local system. Use `docker save` to save containers, and push existing images to Docker Hub or a private repository, so that you don't need to recreate them later. @@ -187,23 +187,23 @@ Restart Docker for the changes to take effect. Docker invokes the commands to configure the block device for you. > **Warning**: Changing these values after Docker has prepared the block device -> for you is not supported and will cause an error. +> for you is not supported and causes an error. You still need to [perform periodic maintenance tasks](#manage-devicemapper). #### Configure direct-lvm mode manually -The procedure below will create a logical volume configured as a thin pool to +The procedure below creates a logical volume configured as a thin pool to use as backing for the storage pool. It assumes that you have a spare block device at `/dev/xvdf` with enough free space to complete the task. The device identifier and volume sizes may be different in your environment and you should substitute your own values throughout the procedure. The procedure also assumes that the Docker daemon is in the `stopped` state. -1. Identify the block device you want to use. The device will be located under +1. Identify the block device you want to use. The device is located under `/dev/` (such as `/dev/xvdf`) and needs enough free space to store the - images and container layers for the workloads that host will be running. - Ideally, this will be a solid state drive. + images and container layers for the workloads that host runs. + A solid state drive is ideal. 2. Stop Docker. @@ -286,7 +286,7 @@ assumes that the Docker daemon is in the `stopped` state. `thin_pool_autoextend_percent` is the amount of space to add to the device when automatically extending (0 = disabled). - The example below will add 20% more capacity when the disk usage reaches + The example below adds 20% more capacity when the disk usage reaches 80%. ```none @@ -307,7 +307,7 @@ assumes that the Docker daemon is in the `stopped` state. ``` 11. Enable monitoring for logical volumes on your host. Without this step, - automatic extension will not occur even in the presence of the LVM profile. + automatic extension does not occur even in the presence of the LVM profile. ```bash $ sudo lvs -o+seg_monitor @@ -390,8 +390,8 @@ assumes that the Docker daemon is in the `stopped` state. ``` - If Docker is configured correctly, the `Data file` and `Metadata file` will - be blank, and the pool name will be `docker-thinpool`. + If Docker is configured correctly, the `Data file` and `Metadata file` is + blank, and the pool name is `docker-thinpool`. 16. After you have verified that the configuration is correct, you can remove the `/var/lib/docker.bk` directory which contains the previous configuration. @@ -404,8 +404,8 @@ assumes that the Docker daemon is in the `stopped` state. ### Monitor the thin pool -Do not rely on LVM auto-extension alone. The volume group will -automatically extend, but the volume can still fill up. You can monitor +Do not rely on LVM auto-extension alone. The volume group +automatically extends, but the volume can still fill up. You can monitor free space on the volume using `lvs` or `lvs -a`. Consider using a monitoring tool at the OS level, such a Nagios. @@ -466,7 +466,7 @@ In `loop-lvm` mode, a loopback device is used to store the data, and another to store the metadata. `loop-lvm` mode is only supported for testing, because it has significant performance and stability drawbacks. -If you are using `loop-lvm` mode, the output of `docker info` will show file +If you are using `loop-lvm` mode, the output of `docker info` shows file paths for `Data loop file` and `Metadata loop file`: ```bash @@ -621,8 +621,8 @@ block device and other parameters to suit your situation. ### Activate the `devicemapper` after reboot -If you reboot the host and find that the docker service failed to start, -you'll see the error, "Non existing device". You need to re-activate the +If you reboot the host and find that the `docker` service failed to start, +look for the error, "Non existing device". You need to re-activate the logical volumes with this command: ```bash @@ -775,7 +775,7 @@ use `loop-lvm`, the blocks may not be freed. This is another reason not to use The `devicemapper` storage driver uses an `allocate-on-demand` operation to allocate new blocks from the thin pool into a container's writable layer. - Each block is 64KB, so this is the minimum amount of space that will be used + Each block is 64KB, so this is the minimum amount of space that is used for a write. - **Copy-on-write performance impact**: The first time a container modifies a diff --git a/engine/userguide/storagedriver/imagesandcontainers.md b/engine/userguide/storagedriver/imagesandcontainers.md index 23a24f3ffd0..206c4740c3f 100644 --- a/engine/userguide/storagedriver/imagesandcontainers.md +++ b/engine/userguide/storagedriver/imagesandcontainers.md @@ -7,14 +7,10 @@ redirect_from: - /engine/installation/userguide/storagedriver/ --- -To use storage drivers effectively, you must understand how Docker builds and -stores images. Then, you need an understanding of how these images are used by -containers. Finally, you'll need a short introduction to the technologies that -enable both images and container operations. - -Understanding how Docker manages the data within your images and containers will -help you understand the best way to design your containers and Dockerize your -applications, and avoid performance problems along the way. +To use storage drivers effectively, it's important to know how Docker builds and +stores images, how these images are used by containers. You can use this +information to make informed choices about the best way to persist data from +your applications and avoid performance problems along the way. ## Images and layers @@ -81,18 +77,18 @@ command. Two different columns relate to size. each container - `virtual size`: the amount of data used for the read-only image data - used by the container plus the container's writable layer `size`. + used by the container plus the container's writable layer `size`. Multiple containers may share some or all read-only image data. Two containers started from the same image share 100% of the read-only data, while two containers with different images which have layers in common share those common layers. Therefore, you can't just total the - virtual sizes. This will over-estimate the total disk usage by a potentially + virtual sizes. This over-estimates the total disk usage by a potentially non-trivial amount. The total disk space used by all of the running containers on disk is some combination of each container's `size` and the `virtual size` values. If -multiple containers started from the same exact image, the total size on disk for -these containers would be SUM (`size` of containers) plus one container's +multiple containers started from the same exact image, the total size on disk for +these containers would be SUM (`size` of containers) plus one container's (`virtual size`- `size`). This also does not count the following additional ways a container can take up @@ -172,7 +168,7 @@ CMD /app/hello.sh The second image contains all the layers from the first image, plus a new layer with the `CMD` instruction, and a read-write container layer. Docker already has all the layers from the first image, so it does not need to pull them again. -The two images will share any layers they have in common. +The two images share any layers they have in common. If you build images from the two Dockerfiles, you can use `docker images` and `docker history` commands to verify that the cryptographic IDs of the shared @@ -305,7 +301,7 @@ Btrfs, ZFS, and other drivers handle the copy-on-write differently. You can read more about the methods of these drivers later in their detailed descriptions. -Containers that write a lot of data will consume more space than containers +Containers that write a lot of data consume more space than containers that do not. This is because most write operations consume new space in the container's thin writable top layer. @@ -325,7 +321,7 @@ To verify the way that copy-on-write works, the following procedures spins up 5 containers based on the `acme/my-final-image:1.0` image we built earlier and examines how much room they take up. -> **Note**: This procedure won't work on Docker for Mac or Docker for Windows. +> **Note**: This procedure doesn't work on Docker for Mac or Docker for Windows. 1. From a terminal on your Docker host, run the following `docker run` commands. The strings at the end are the IDs of each container. diff --git a/engine/userguide/storagedriver/overlayfs-driver.md b/engine/userguide/storagedriver/overlayfs-driver.md index 03551796280..57f94408886 100644 --- a/engine/userguide/storagedriver/overlayfs-driver.md +++ b/engine/userguide/storagedriver/overlayfs-driver.md @@ -24,11 +24,11 @@ OverlayFS is supported if you meet the following prerequisites: - The `overlay2` driver is supported for Docker EE and recommended for Docker CE. - + - The `overlay` driver is allowed but not recommended for Docker CE. - Version 4.0 or higher of the Linux kernel. If you use an older kernel, you - will need to use the `overlay` driver, which is not recommended. + need to use the `overlay` driver, which is not recommended. - The following backing filesystems are supported: - `ext4` (RHEL 7.1 only) @@ -36,7 +36,7 @@ OverlayFS is supported if you meet the following prerequisites: `xfs_info` to verify that the `ftype` option is set to `1`. To format an `xfs` filesystem correctly, use the flag `-n ftype=1`. -- Changing the storage driver will make any containers you have already +- Changing the storage driver makes any containers you have already created inaccessible on the local system. Use `docker save` to save containers, and push existing images to Docker Hub or a private repository, so that you not need to re-create them later. @@ -102,7 +102,7 @@ Before following this procedure, you must first meet all the - [Stable](/engine/reference/commandline/dockerd.md#storage-driver-options) - [Edge](/edge/engine/reference/commandline/dockerd.md#storage-driver-options) - Docker will not start if the `daemon.json` file contains badly-formed JSON. + Docker does not start if the `daemon.json` file contains badly-formed JSON. 5. Start Docker. @@ -422,8 +422,8 @@ Consider some scenarios where files in a container are modified. - OverlayFS only works with two layers. This means that performance should be better than AUFS, which can suffer noticeable latencies when searching for files in images with many layers. This advantage applies to both - `overlay` and `overlay2` drivers. `overlayfs2` will be slightly - less performant than `overlayfs` on initial read, because it has to look + `overlay` and `overlay2` drivers. `overlayfs2` is slightly + less performant than `overlayfs` on initial read, because it must look through more layers, but it caches the results so this is only a small penalty. @@ -457,7 +457,7 @@ Both `overlay2` and `overlay` drivers are more performant than `aufs` and makes the `overlay` and `overlay2` drivers efficient with memory and a good option for high-density use cases such as PaaS. -- **copy_up**. As with AUFS, OverlayFS has to perform copy-up operations +- **copy_up**. As with AUFS, OverlayFS performs copy-up operations whenever a container writes to a file for the first time. This can add latency into the write operation, especially for large files. However, once the file has been copied up, all subsequent writes to that file occur in the upper @@ -504,7 +504,7 @@ filesystems: in the image (`lowerdir`) and the `fd2` references the file in the container (`upperdir`). A workaround for this is to `touch` the files which causes the copy-up operation to happen. All subsequent `open(2)` operations regardless of - read-only or read-write access mode will be referencing the file in the + read-only or read-write access mode reference the file in the container (`upperdir`). `yum` is known to be affected unless the `yum-plugin-ovl` package is installed. diff --git a/engine/userguide/storagedriver/selectadriver.md b/engine/userguide/storagedriver/selectadriver.md index 29334994d94..e5c4b82684c 100644 --- a/engine/userguide/storagedriver/selectadriver.md +++ b/engine/userguide/storagedriver/selectadriver.md @@ -56,7 +56,7 @@ this decision, there are three high-level factors to consider: [Supported backing filesystems](#supported-backing-filesystems). - After you have narrowed down which storage drivers you can choose from, your - choice will be determined by the characteristics of your workload and the + choice are determined by the characteristics of your workload and the level of stability you need. See [Other considerations](#other-considerations) for help making the final decision. @@ -74,8 +74,7 @@ disable security features of your operating system, such as the need to disable For Docker EE and CS-Engine, the definitive resource for which storage drivers are supported is the [Product compatibility matrix](https://success.docker.com/Policies/Compatibility_Matrix). -In order to get commercial support from Docker, you must use a supported -configuration. +To get commercial support from Docker, you must use a supported configuration. ### Docker CE @@ -97,7 +96,7 @@ use `aufs` on new installations going forward, you need to explicitly configure it, and you may need to install extra packages, such as `linux-image-extra`. See [aufs](aufs-driver.md). -On existing installations using `aufs`, it will continue to be used. +On existing installations using `aufs`, it is still used. When in doubt, the best all-around configuration is to use a modern Linux distribution with a kernel that supports the `overlay2` storage driver, and to @@ -119,7 +118,7 @@ storage driver, be sure to read about > users. If you use a recommended configuration and find a reproducible issue, > it is likely to be fixed very quickly. If the driver that you want to use is > not recommended according to this table, you can run it at your own risk. You -> can and should still report any issues you run into. However, such issues will +> can and should still report any issues you run into. However, such issues > have a lower priority than issues encountered when using a recommended > configuration. @@ -224,9 +223,9 @@ to physical or logical disks on the Docker host. > **Important**: When you change the storage driver, any existing images and > containers become inaccessible. This is because their layers cannot be used -> by the new storage driver. If you revert your changes, you will be able to +> by the new storage driver. If you revert your changes, you can > access the old images and containers again, but any that you pulled or -> created using the new driver will then be inaccessible. +> created using the new driver are then inaccessible. {:.important} ## Related information diff --git a/engine/userguide/storagedriver/vfs-driver.md b/engine/userguide/storagedriver/vfs-driver.md index ab2664abc5b..9b5d84a637c 100644 --- a/engine/userguide/storagedriver/vfs-driver.md +++ b/engine/userguide/storagedriver/vfs-driver.md @@ -40,7 +40,7 @@ Docker 17.12 and higher include support for quotas when using the VFS driver. } ``` - Docker will not start if the `daemon.json` file contains badly-formed JSON. + Docker does not start if the `daemon.json` file contains badly-formed JSON. 3. Start Docker. diff --git a/engine/userguide/storagedriver/zfs-driver.md b/engine/userguide/storagedriver/zfs-driver.md index 62f95aadf88..82f93f1cf90 100644 --- a/engine/userguide/storagedriver/zfs-driver.md +++ b/engine/userguide/storagedriver/zfs-driver.md @@ -37,7 +37,7 @@ use unless you have substantial experience with ZFS on Linux. - ZFS is not supported on Docker EE or CS-Engine, or any other Linux platforms. - The `/var/lib/docker/` directory must be mounted on a ZFS-formatted filesystem. -- Changing the storage driver will make any containers you have already +- Changing the storage driver makes any containers you have already created inaccessible on the local system. Use `docker save` to save containers, and push existing images to Docker Hub or a private repository, so that you not need to re-create them later. diff --git a/enterprise/17.06/index.md b/enterprise/17.06/index.md index f21956f18c8..95c99c7503b 100644 --- a/enterprise/17.06/index.md +++ b/enterprise/17.06/index.md @@ -95,7 +95,7 @@ adopted as quickly for consistency and compatibility reasons. ### Networking * Fix for garbage collection logic in NetworkDB. Entries were not properly garbage collected and deleted within the expected time [docker/libnetwork#1944](https://github.com/docker/libnetwork/pull/1944) [docker/libnetwork#1960](https://github.com/docker/libnetwork/pull/1960) -* Allow configuration of max packet size in network DB to use the full available MTU. Note this will require a configuration in the docker daemon and need a dockerd restart [docker/libnetwork#1839](https://github.com/docker/libnetwork/pull/1839) +* Allow configuration of max packet size in network DB to use the full available MTU. This requires a configuration in the docker daemon and need a dockerd restart [docker/libnetwork#1839](https://github.com/docker/libnetwork/pull/1839) * Overlay fix for transient IP reuse [docker/libnetwork#1935](https://github.com/docker/libnetwork/pull/1935) [docker/libnetwork#1968](https://github.com/docker/libnetwork/pull/1968) * Serialize IP allocation [docker/libnetwork#1788](https://github.com/docker/libnetwork/pull/1788) @@ -139,7 +139,7 @@ adopted as quickly for consistency and compatibility reasons. - Docker 17.06 by default disables communication with legacy (v1) registries. If you require interaction with registries that have not yet migrated to the v2 protocol, set the `--disable-legacy-registry=false` daemon - option. Interaction with v1 registries will be removed in Docker 17.12. + option. ### Builder @@ -345,7 +345,7 @@ adopted as quickly for consistency and compatibility reasons. + Add support for "grace periods" on healthchecks through the `HEALTHCHECK --start-period` and `--health-start-period` flag to `docker service create`, `docker service update`, `docker create`, and `docker run` to support containers with an initial startup time [#28938](https://github.com/moby/moby/pull/28938) -* `docker service create` now omits fields that are not specified by the user, when possible. This will allow defaults to be applied inside the manager [#32284](https://github.com/moby/moby/pull/32284) +* `docker service create` now omits fields that are not specified by the user, when possible. This allows defaults to be applied inside the manager [#32284](https://github.com/moby/moby/pull/32284) * `docker service inspect` now shows default values for fields that are not specified by the user [#32284](https://github.com/moby/moby/pull/32284) * Move `docker service logs` out of experimental [#32462](https://github.com/moby/moby/pull/32462) * Add support for Credential Spec and SELinux to services to the API [#32339](https://github.com/moby/moby/pull/32339) @@ -391,8 +391,8 @@ adopted as quickly for consistency and compatibility reasons. ### Known issues If a container is spawned on node A, using the same IP of a container destroyed -on nodeB within 5 min from the time that it exit, the container on node A will -not be reachable until one of these 2 conditions happens: +on nodeB within 5 min from the time that it exit, the container on node A is +not reachable until one of these 2 conditions happens: 1. Container on A sends a packet out, 2. The timer that cleans the arp entry in the overlay namespace is triggered (around 5 minutes). diff --git a/enterprise/backup.md b/enterprise/backup.md index f0f4e9c8404..5575ccd1a81 100644 --- a/enterprise/backup.md +++ b/enterprise/backup.md @@ -17,7 +17,7 @@ created to make sure it's not corrupt. One way to test your backups is to do a fresh installation in a separate infrastructure and restore the new installation using the backup you've created. -If you create backups for a single component, you won't be able to restore your +If you create backups for a single component, you can't restore your deployment to its previous state. ## Restore Docker Enterprise Edition diff --git a/enterprise/telemetry.md b/enterprise/telemetry.md index 36a1d63e8d0..b6908284449 100644 --- a/enterprise/telemetry.md +++ b/enterprise/telemetry.md @@ -18,7 +18,7 @@ the telemetry plugin does not collect or attempt to send any information to Dock If you do not wish to send any usage data to Docker Inc., you can disable the plugin, either using the Docker CLI or using Universal Control Plane. - > **Note**: If you're using Docker EE Standard or Advanced with Universal Control Plane, use Universal Control Plane (UCP) to enable and disable metrics. Only use the CLI if you do not have UCP. UCP will re-enable the telemetry plugin for hosts where it was disabled with the CLI. + > **Note**: If you're using Docker EE Standard or Advanced with Universal Control Plane, use Universal Control Plane (UCP) to enable and disable metrics. Only use the CLI if you do not have UCP. UCP re-enables the telemetry plugin for hosts where it was disabled with the CLI. ### Use the Docker CLI @@ -53,7 +53,7 @@ Docker by going to **Admin Settings** and choosing **Usage**. ![UCP admin settings Usage defaults](images/usage-defaults.png){: .with-border} To disable the telemetry plugin, disable all three options and click **Save**. -Enabling either or both of the top two options will enable the telemetry plugin. +Enabling either or both of the top two options enable the telemetry plugin. You can find out more about an individual option by clicking the **?** icon. > **Note**: If API usage statistics are enabled, Docker only gathers aggregate stats about what API endpoints are used. API payload contents are not collected. diff --git a/enterprise/upgrade.md b/enterprise/upgrade.md index 5cffa6c5c7f..2b3ad6bb627 100644 --- a/enterprise/upgrade.md +++ b/enterprise/upgrade.md @@ -12,14 +12,14 @@ following components: 2. Universal Control Plane (UCP). 3. Docker Trusted Registry (DTR). -While upgrading, some of these components will become temporarily unavailable. +While upgrading, some of these components become temporarily unavailable. So you should schedule your upgrades to take place outside business peak hours to make sure there's no impact to your business. ## Create a backup Before upgrading Docker EE, you should make sure you [create a backup](backup.md). -This will make it possible to recover if anything goes wrong during the upgrade. +This makes it possible to recover if anything goes wrong during the upgrade. ## Check the compatibility matrix @@ -27,17 +27,17 @@ You should also check the [compatibility matrix](https://success.docker.com/Poli to make sure all Docker EE components are certified to work with one another. You may also want to check the [Docker EE maintenance lifecycle](https://success.docker.com/Policies/Maintenance_Lifecycle), -to understand until when your version will be supported. +to understand until when your version may be supported. ## Upgrade Docker Engine To avoid application downtime, you should be running Docker in Swarm mode and -deploying your workloads as Docker services. That way you'll be able to +deploying your workloads as Docker services. That way you can drain the nodes of any workloads before starting the upgrade. If you have workloads running as containers as opposed to swarm services, make sure they are configured with a [restart policy](/engine/admin/start-containers-automatically/). -This will make sure your containers are started automatically after the upgrade. +This ensures that your containers are started automatically after the upgrade. To ensure that workloads running as Swarm services have no downtime, you need to: @@ -45,7 +45,7 @@ To ensure that workloads running as Swarm services have no downtime, you need to 2. Upgrade the Docker Engine on that node. 3. Make the node available again. -If you do this sequentially for every node, you'll be able to upgrade with no +If you do this sequentially for every node, you can upgrade with no application downtime. When upgrading manager nodes, make sure the upgrade of a node finishes before you start upgrading the next node. Upgrading multiple manager nodes at the same diff --git a/get-started/index.md b/get-started/index.md index 3161fc75f3b..0d9e4d4bc5c 100644 --- a/get-started/index.md +++ b/get-started/index.md @@ -74,7 +74,7 @@ actually does. ## Prerequisites -While we'll define concepts along the way, it is good for you to understand +While we define concepts along the way, it is good for you to understand [what Docker is](https://www.docker.com/what-docker) before we begin. We also need to assume you are familiar with a few concepts before we continue: @@ -85,7 +85,7 @@ We also need to assume you are familiar with a few concepts before we continue: - Basic familiarity with the ideas of code dependencies and building - Machine resource usage terms, like CPU percentages, RAM use in bytes, etc. -Finally, though we'll remind you again when you need these things, you can +Finally, though we remind you again when you need these things, you can save yourself some distraction at that time by [signing up for a Docker ID](https://cloud.docker.com) and using it on your local machine by running the following command: @@ -145,8 +145,8 @@ installed.
> **Note**: version 1.13 or higher is required -You should be able to run `docker run hello-world` and see a response like this: -> **Note**: You may need to add your user to the `docker` group in order to call this command without sudo. [Read more](https://docs.docker.com/engine/installation/linux/linux-postinstall/) +When you run `docker run hello-world`, look for a response like this: +> **Note**: You may need to add your user to the `docker` group to call this command without sudo. [Read more](https://docs.docker.com/engine/installation/linux/linux-postinstall/) > **Note**: If there are networking issues in your setup, `docker run hello-world` may fail to execute successfully. In case you are behind a proxy server and you suspect that it blocks the connection, check the [next part](https://docs.docker.com/get-started/part2/) of the tutorial. @@ -177,6 +177,7 @@ application, system dependencies are not an issue, and resource density is increased. Orchestration of scaling behavior is a matter of spinning up new executables, not new VM hosts. -We'll be learning about all of these things, but first let's learn to walk. +This tutorial discusses all of these things, but first let's start with the +basics. [On to Part 2 >>](part2.md){: class="button outline-btn" style="margin-bottom: 30px; margin-right: 100%"} diff --git a/get-started/part2.md b/get-started/part2.md index 4eaa1588481..4210189ce28 100644 --- a/get-started/part2.md +++ b/get-started/part2.md @@ -18,7 +18,7 @@ description: Learn how to write, build, and run a simple app -- the Docker way. ## Introduction -It's time to begin building an app the Docker way. We'll start at the bottom of +It's time to begin building an app the Docker way. We start at the bottom of the hierarchy of such an app, which is a container, which we cover on this page. Above this level is a service, which defines how containers behave in production, covered in [Part 3](part3.md). Finally, at the top level is the @@ -33,9 +33,9 @@ stack, defining the interactions of all the services, covered in In the past, if you were to start writing a Python app, your first order of business was to install a Python runtime onto your machine. But, -that creates a situation where the environment on your machine has to be just -so in order for your app to run as expected; ditto for the server that runs -your app. +that creates a situation where the environment on your machine needs to be +perfect for your app to run as expected, and also needs to match your production +environment. With Docker, you can just grab a portable Python runtime as an image, no installation necessary. Then, your build can include the base Python image @@ -46,13 +46,13 @@ These portable images are defined by something called a `Dockerfile`. ## Define a container with `Dockerfile` -`Dockerfile` will define what goes on in the environment inside your +`Dockerfile` defines what goes on in the environment inside your container. Access to resources like networking interfaces and disk drives is virtualized inside this environment, which is isolated from the rest of your -system, so you have to map ports to the outside world, and +system, so you need to map ports to the outside world, and be specific about what files you want to "copy in" to that environment. However, after doing that, you can expect that the build of your app defined in this -`Dockerfile` will behave exactly the same wherever it runs. +`Dockerfile` behaves exactly the same wherever it runs. ### `Dockerfile` @@ -107,8 +107,8 @@ This `Dockerfile` refers to a couple of files we haven't created yet, namely Create two more files, `requirements.txt` and `app.py`, and put them in the same folder with the `Dockerfile`. This completes our app, which as you can see is quite simple. When the above `Dockerfile` is built into an image, `app.py` and -`requirements.txt` will be present because of that `Dockerfile`'s `ADD` command, -and the output from `app.py` will be accessible over HTTP thanks to the `EXPOSE` +`requirements.txt` is present because of that `Dockerfile`'s `ADD` command, +and the output from `app.py` is accessible over HTTP thanks to the `EXPOSE` command. ### `requirements.txt` @@ -151,14 +151,14 @@ Now we see that `pip install -r requirements.txt` installs the Flask and Redis libraries for Python, and the app prints the environment variable `NAME`, as well as the output of a call to `socket.gethostname()`. Finally, because Redis isn't running (as we've only installed the Python library, and not Redis -itself), we should expect that the attempt to use it here will fail and produce +itself), we should expect that the attempt to use it here fails and produces the error message. > **Note**: Accessing the name of the host when inside a container retrieves the container ID, which is like the process ID for a running executable. That's it! You don't need Python or anything in `requirements.txt` on your -system, nor will building or running this image install them on your system. It +system, nor does building or running this image install them on your system. It doesn't seem like you've really set up an environment with Python and Flask, but you have. @@ -220,7 +220,7 @@ $ curl http://localhost:4000 This port remapping of `4000:80` is to demonstrate the difference between what you `EXPOSE` within the `Dockerfile`, and what you `publish` using -`docker run -p`. In later steps, we'll just map port 80 on the host to port 80 +`docker run -p`. In later steps, we just map port 80 on the host to port 80 in the container and use `http://localhost`. Hit `CTRL+C` in your terminal to quit. @@ -231,7 +231,7 @@ Hit `CTRL+C` in your terminal to quit. type `CTRL+C` to get the prompt back (or open another shell), then type `docker container ls` to list the running containers, followed by `docker container stop ` to stop the - container. Otherwise, you'll get an error response from the daemon + container. Otherwise, you get an error response from the daemon when you try to re-run the container in the next step. Now let's run the app in the background, in detached mode: @@ -251,7 +251,7 @@ CONTAINER ID IMAGE COMMAND CREATED 1fa4ab2cf395 friendlyhello "python app.py" 28 seconds ago ``` -You'll see that `CONTAINER ID` matches what's on `http://localhost:4000`. +Notice that `CONTAINER ID` matches what's on `http://localhost:4000`. Now use `docker container stop` to end the process, using the `CONTAINER ID`, like so: @@ -262,7 +262,7 @@ docker container stop 1fa4ab2cf395 ## Share your image To demonstrate the portability of what we just created, let's upload our built -image and run it somewhere else. After all, you'll need to learn how to push to +image and run it somewhere else. After all, you need to know how to push to registries when you want to deploy containers to production. A registry is a collection of repositories, and a repository is a collection of @@ -270,7 +270,7 @@ images—sort of like a GitHub repository, except the code is already built. An account on a registry can create many repositories. The `docker` CLI uses Docker's public registry by default. -> **Note**: We'll be using Docker's public registry here just because it's free +> **Note**: We use Docker's public registry here just because it's free and pre-configured, but there are many public ones to choose from, and you can even set up your own private registry using [Docker Trusted Registry](/datacenter/dtr/2.2/guides/). @@ -293,11 +293,11 @@ The notation for associating a local image with a repository on a registry is `username/repository:tag`. The tag is optional, but recommended, since it is the mechanism that registries use to give Docker images a version. Give the repository and tag meaningful names for the context, such as -`get-started:part2`. This will put the image in the `get-started` repository and +`get-started:part2`. This puts the image in the `get-started` repository and tag it as `part2`. Now, put it all together to tag the image. Run `docker tag image` with your -username, repository, and tag names so that the image will upload to your +username, repository, and tag names so that the image uploads to your desired destination. The syntax of the command is: ```shell @@ -331,7 +331,7 @@ docker push username/repository:tag ``` Once complete, the results of this upload are publicly available. If you log in -to [Docker Hub](https://hub.docker.com/), you will see the new image there, with +to [Docker Hub](https://hub.docker.com/), you see the new image there, with its pull command. ### Pull and run the image from the remote repository @@ -343,7 +343,7 @@ command: docker run -p 4000:80 username/repository:tag ``` -If the image isn't available locally on the machine, Docker will pull it from +If the image isn't available locally on the machine, Docker pulls it from the repository. ```shell @@ -364,12 +364,12 @@ Status: Downloaded newer image for john/get-started:part2 No matter where `docker run` executes, it pulls your image, along with Python and all the dependencies from `requirements.txt`, and runs your code. It all -travels together in a neat little package, and the host machine doesn't have to -install anything but Docker to run it. +travels together in a neat little package, and you don't need to install +anything on the host machine for Docker to run it. ## Conclusion of part two -That's all for this page. In the next section, we will learn how to scale our +That's all for this page. In the next section, we learn how to scale our application by running this container in a **service**. [Continue to Part 3 >>](part3.md){: class="button outline-btn"} diff --git a/get-started/part3.md b/get-started/part3.md index 3ac0f8d8b09..d8afbb0bdb6 100644 --- a/get-started/part3.md +++ b/get-started/part3.md @@ -12,7 +12,7 @@ description: Learn how to define load-balanced and scalable service that runs co - Get [Docker Compose](/compose/overview.md). On [Docker for Mac](/docker-for-mac/index.md) and [Docker for Windows](/docker-for-windows/index.md) it's pre-installed, so you're good-to-go. -On Linux systems you will need to [install it +On Linux systems you need to [install it directly](https://github.com/docker/compose/releases). On pre Windows 10 systems _without Hyper-V_, use [Docker Toolbox](https://docs.docker.com/toolbox/overview.md). @@ -22,8 +22,8 @@ Toolbox](https://docs.docker.com/toolbox/overview.md). - Learn how to create containers in [Part 2](part2.md). - Make sure you have published the `friendlyhello` image you created by -[pushing it to a registry](/get-started/part2.md#share-your-image). We'll -use that shared image here. +[pushing it to a registry](/get-started/part2.md#share-your-image). We use that +shared image here. - Be sure your image works as a deployed container. Run this command, slotting in your info for `username`, `repo`, and `tag`: `docker run -p 80:80 @@ -104,7 +104,7 @@ This `docker-compose.yml` file tells Docker to do the following: - Map port 80 on the host to `web`'s port 80. - Instruct `web`'s containers to share port 80 via a load-balanced network - called `webnet`. (Internally, the containers themselves will publish to + called `webnet`. (Internally, the containers themselves publish to `web`'s port 80 at an ephemeral port.) - Define the `webnet` network with the default settings (which is a @@ -113,16 +113,16 @@ This `docker-compose.yml` file tells Docker to do the following: ## Run your new load-balanced app -Before we can use the `docker stack deploy` command we'll first run: +Before we can use the `docker stack deploy` command we first run: ```shell docker swarm init ``` ->**Note**: We'll get into the meaning of that command in [part 4](part4.md). -> If you don't run `docker swarm init` you'll get an error that "this node is not a swarm manager." +>**Note**: We get into the meaning of that command in [part 4](part4.md). +> If you don't run `docker swarm init` you get an error that "this node is not a swarm manager." -Now let's run it. You have to give your app a name. Here, it is set to +Now let's run it. You need to give your app a name. Here, it is set to `getstartedlab`: ```shell @@ -138,13 +138,13 @@ Get the service ID for the one service in our application: docker service ls ``` -You'll see output for the `web` service, prepended with your app name. If you -named it the same as shown in this example, the name will be +Look for output for the `web` service, prepended with your app name. If you +named it the same as shown in this example, the name is `getstartedlab_web`. The service ID is listed as well, along with the number of replicas, image name, and exposed ports. A single container running in a service is called a **task**. Tasks are given unique -IDs that numerically increment, up to the number of `replicas` you defined in +IDs that numerically increment, up to the number of `replicas` you defined in `docker-compose.yml`. List the tasks for your service: ```shell @@ -152,7 +152,7 @@ docker service ps getstartedlab_web ``` Tasks also show up if you just list all the containers on your system, though that -will not be filtered by service: +is not filtered by service: ```shell docker container ls -q @@ -163,9 +163,9 @@ your browser and hit refresh a few times. ![Hello World in browser](images/app80-in-browser.png) -Either way, you'll see the container ID change, demonstrating the +Either way, the container ID changes, demonstrating the load-balancing; with each request, one of the 5 tasks is chosen, in a -round-robin fashion, to respond. The container IDs will match your output from +round-robin fashion, to respond. The container IDs match your output from the previous command (`docker container ls -q`). > Running Windows 10? @@ -173,16 +173,16 @@ the previous command (`docker container ls -q`). > Windows 10 PowerShell should already have `curl` available, but if not you can > grab a Linux terminal emulator like > [Git BASH](https://git-for-windows.github.io/){: target="_blank" class="_"}, -> or download +> or download > [wget for Windows](http://gnuwin32.sourceforge.net/packages/wget.htm) > which is very similar. > Slow response times? > -> Depending on your environment's networking configuration, it may take up to 30 +> Depending on your environment's networking configuration, it may take up to 30 > seconds for the containers > to respond to HTTP requests. This is not indicative of Docker or -> swarm performance, but rather an unmet Redis dependency that we will +> swarm performance, but rather an unmet Redis dependency that we > address later in the tutorial. For now, the visitor counter isn't working > for the same reason; we haven't yet added a service to persist data. @@ -196,7 +196,7 @@ saving the change, and re-running the `docker stack deploy` command: docker stack deploy -c docker-compose.yml getstartedlab ``` -Docker will do an in-place update, no need to tear the stack down first or kill +Docker performs an in-place update, no need to tear the stack down first or kill any containers. Now, re-run `docker container ls -q` to see the deployed instances reconfigured. @@ -219,7 +219,7 @@ started. It's as easy as that to stand up and scale your app with Docker. You've taken a huge step towards learning how to run containers in production. Up next, you -will learn how to run this app as a bonafide swarm on a cluster of Docker +learn how to run this app as a bonafide swarm on a cluster of Docker machines. > **Note**: Compose files like this are used to define applications with Docker, and can be uploaded to cloud providers using [Docker diff --git a/get-started/part4.md b/get-started/part4.md index beab97e0424..7127cec8172 100644 --- a/get-started/part4.md +++ b/get-started/part4.md @@ -22,8 +22,8 @@ Windows](/docker-for-windows/index.md), but on Linux systems you need to - Learn how to create containers in [Part 2](part2.md). - Make sure you have published the `friendlyhello` image you created by -[pushing it to a registry](/get-started/part2.md#share-your-image). We'll -be using that shared image here. +[pushing it to a registry](/get-started/part2.md#share-your-image). We use that +shared image here. - Be sure your image works as a deployed container. Run this command, slotting in your info for `username`, `repo`, and `tag`: `docker run -p 80:80 @@ -63,7 +63,7 @@ machine what it can and cannot do. Up until now, you have been using Docker in a single-host mode on your local machine. But Docker also can be switched into **swarm mode**, and that's what enables the use of swarms. Enabling swarm mode instantly makes the current -machine a swarm manager. From then on, Docker will run the commands you execute +machine a swarm manager. From then on, Docker runs the commands you execute on the swarm you're managing, rather than just on the current machine. ## Set up your swarm @@ -72,7 +72,7 @@ A swarm is made up of multiple nodes, which can be either physical or virtual machines. The basic concept is simple enough: run `docker swarm init` to enable swarm mode and make your current machine a swarm manager, then run `docker swarm join` on other machines to have them join the swarm as workers. -Choose a tab below to see how this plays out in various contexts. We'll use VMs +Choose a tab below to see how this plays out in various contexts. We use VMs to quickly create a two-machine cluster and turn it into a swarm. ### Create a cluster @@ -87,7 +87,7 @@ to quickly create a two-machine cluster and turn it into a swarm. #### VMs on your local machine (Mac, Linux, Windows 7 and 8) -First, you'll need a hypervisor that can create virtual machines (VMs), so +You need a hypervisor that can create virtual machines (VMs), so [install Oracle VirtualBox](https://www.virtualbox.org/wiki/Downloads) for your machine's OS. @@ -115,7 +115,7 @@ docker-machine create --driver virtualbox myvm2 #### VMs on your local machine (Windows 10) First, quickly create a virtual switch for your virtual machines (VMs) to share, -so they will be able to connect to each other. +so they can connect to each other. 1. Launch Hyper-V Manager 2. Click **Virtual Switch Manager** in the right-hand menu @@ -158,11 +158,11 @@ myvm2 - virtualbox Running tcp://192.168.99.101:2376 v17. #### Initialize the swarm and add nodes -The first machine will act as the manager, which executes management commands -and authenticates workers to join the swarm, and the second will be a worker. +The first machine acts as the manager, which executes management commands +and authenticates workers to join the swarm, and the second is a worker. You can send commands to your VMs using `docker-machine ssh`. Instruct `myvm1` -to become a swarm manager with `docker swarm init` and you'll see output like +to become a swarm manager with `docker swarm init` and look for output like this: ```shell @@ -189,8 +189,8 @@ To add a manager to this swarm, run 'docker swarm join-token manager' and follow > Having trouble using SSH? Try the --native-ssh flag > -> Docker Machine has [the option to let you use your own system's SSH](/machine/reference/ssh/#different-types-of-ssh), if -> for some reason you're having trouble sending commands to your Swarm manager. Just specify the +> Docker Machine has [the option to let you use your own system's SSH](/machine/reference/ssh/#different-types-of-ssh), if +> for some reason you're having trouble sending commands to your Swarm manager. Just specify the > `--native-ssh` flag when invoking the `ssh` command: > > ``` @@ -351,8 +351,8 @@ docker stack deploy -c docker-compose.yml getstartedlab And that's it, the app is deployed on a swarm cluster! Now you can use the same [docker commands you used in part -3](/get-started/part3.md#run-your-new-load-balanced-app). Only this time you'll -see that the services (and associated containers) have been distributed between +3](/get-started/part3.md#run-your-new-load-balanced-app). Only this time notice +that the services (and associated containers) have been distributed between both `myvm1` and `myvm2`. ``` @@ -382,7 +382,7 @@ the VM but doesn't give you immediate access to files on your local host. > > * On Mac and Linux, you can use `docker-machine scp :~` to copy files across machines, but Windows users need a Linux terminal emulator -like [Git Bash](https://git-for-windows.github.io/){: target="_blank" class="_"} in order for this to work. +like [Git Bash](https://git-for-windows.github.io/){: target="_blank" class="_"} for this to work. > > This tutorial demos both `docker-machine ssh` and `docker-machine env`, since these are available on all platforms via the `docker-machine` CLI. @@ -397,7 +397,7 @@ browser, hitting refresh (or just `curl` them). ![Hello World in browser](images/app-in-browser-swarm.png) -You'll see five possible container IDs all cycling by randomly, demonstrating +There are five possible container IDs all cycling by randomly, demonstrating the load-balancing. The reason both IP addresses work is that nodes in a swarm participate in an @@ -411,7 +411,7 @@ look: > Having connectivity trouble? > -> Keep in mind that in order to use the ingress network in the swarm, +> Keep in mind that to use the ingress network in the swarm, > you need to have the following ports open between the swarm nodes > before you enable swarm mode: > @@ -432,8 +432,8 @@ image](part2.md#publish-the-image)). In either case, simply run `docker stack deploy` again to deploy these changes. You can join any machine, physical or virtual, to this swarm, using the -same `docker swarm join` command you used on `myvm2`, and capacity will be added -to your cluster. Just run `docker stack deploy` afterwards, and your app will +same `docker swarm join` command you used on `myvm2`, and capacity is added +to your cluster. Just run `docker stack deploy` afterwards, and your app can take advantage of the new resources. ## Cleanup and reboot @@ -451,7 +451,7 @@ docker stack rm getstartedlab > At some point later, you can remove this swarm if you want to with > `docker-machine ssh myvm2 "docker swarm leave"` on the worker > and `docker-machine ssh myvm1 "docker swarm leave --force"` on the -> manager, but _you'll need this swarm for part 5, so please keep it +> manager, but _you need this swarm for part 5, so keep it > around for now_. ### Unsetting docker-machine shell variable settings @@ -470,7 +470,7 @@ see the [Machine topic on unsetting environment variables](/machine/get-started/ ### Restarting Docker machines -If you shut down your local host, Docker machines will stop running. You can check the status of machines by running `docker-machine ls`. +If you shut down your local host, Docker machines stops running. You can check the status of machines by running `docker-machine ls`. ``` $ docker-machine ls diff --git a/get-started/part5.md b/get-started/part5.md index 633e7bb69fc..5a180bf22d8 100644 --- a/get-started/part5.md +++ b/get-started/part5.md @@ -15,7 +15,7 @@ description: Learn how to create a multi-container application that uses all the - Learn how to create containers in [Part 2](part2.md). - Make sure you have published the `friendlyhello` image you created by -[pushing it to a registry](/get-started/part2.md#share-your-image). We'll +[pushing it to a registry](/get-started/part2.md#share-your-image). We use that shared image here. - Be sure your image works as a deployed container. Run this command, @@ -31,7 +31,7 @@ by `docker-machine start myvm2` to boot the worker. - Have the swarm you created in [part 4](part4.md) running and ready. Run `docker-machine ssh myvm1 "docker node ls"` to verify this. If the swarm is up, -both nodes will report a `ready` status. If not, reinitialze the swarm and join +both nodes report a `ready` status. If not, reinitialze the swarm and join the worker as described in [Set up your swarm](/get-started/part4.md#set-up-your-swarm). @@ -41,7 +41,7 @@ In [part 4](part4.md), you learned how to set up a swarm, which is a cluster of machines running Docker, and deployed an application to it, with containers running in concert on multiple machines. -Here in part 5, you'll reach the top of the hierarchy of distributed +Here in part 5, you reach the top of the hierarchy of distributed applications: the **stack**. A stack is a group of interrelated services that share dependencies, and can be orchestrated and scaled together. A single stack is capable of defining and coordinating the functionality of an entire @@ -50,7 +50,7 @@ application (though very complex applications may want to use multiple stacks). Some good news is, you have technically been working with stacks since part 3, when you created a Compose file and used `docker stack deploy`. But that was a single service stack running on a single host, which is not usually what takes -place in production. Here, you will take what you've learned, make +place in production. Here, you can take what you've learned, make multiple services relate to each other, and run them on multiple machines. You're doing great, this is the home stretch! @@ -98,14 +98,14 @@ with the following. Be sure to replace `username/repo:tag` with your image detai ``` The only thing new here is the peer service to `web`, named `visualizer`. - You'll see two new things here: a `volumes` key, giving the visualizer + Notice two new things here: a `volumes` key, giving the visualizer access to the host's socket file for Docker, and a `placement` key, ensuring that this service only ever runs on a swarm manager -- never a worker. That's because this container, built from [an open source project created by Docker](https://github.com/ManoMarks/docker-swarm-visualizer), displays Docker services running on a swarm in a diagram. - We'll talk more about placement constraints and volumes in a moment. + We talk more about placement constraints and volumes in a moment. 2. Make sure your shell is configured to talk to `myvm1` (full examples are [here](part4.md#configure-a-docker-machine-shell-to-the-swarm-manager)). @@ -126,7 +126,7 @@ with the following. Be sure to replace `username/repo:tag` with your image detai ``` 3. Re-run the `docker stack deploy` command on the manager, and -whatever services need updating will be updated: +whatever services need updating are updated: ```shell $ docker stack deploy -c docker-compose.yml getstartedlab @@ -138,7 +138,7 @@ whatever services need updating will be updated: You saw in the Compose file that `visualizer` runs on port 8080. Get the IP address of one of your nodes by running `docker-machine ls`. Go - to either IP address at port 8080 and you will see the visualizer running: + to either IP address at port 8080 and you can see the visualizer running: ![Visualizer screenshot](images/get-started-visualizer1.png) @@ -153,7 +153,7 @@ whatever services need updating will be updated: The visualizer is a standalone service that can run in any app that includes it in the stack. It doesn't depend on anything else. Now let's create a service that *does* have a dependency: the Redis - service that will provide a visitor counter. + service that provides a visitor counter. ## Persist the data @@ -232,7 +232,7 @@ Redis service. Be sure to replace `username/repo:tag` with your image details. - The placement constraint you put on the Redis service, ensuring that it always uses the same host. - - The volume you created that lets the container access `./data` (on the host) as `/data` (inside the Redis container). While containers come and go, the files stored on `./data` on the specified host will persist, enabling continuity. + - The volume you created that lets the container access `./data` (on the host) as `/data` (inside the Redis container). While containers come and go, the files stored on `./data` on the specified host persists, enabling continuity. You are ready to deploy your new Redis-using stack. @@ -277,11 +277,11 @@ Redis service. Be sure to replace `username/repo:tag` with your image details. ``` -6. Check the web page at one of your nodes (e.g. `http://192.168.99.101`) and you'll see the results of the visitor counter, which is now live and storing information on Redis. +6. Check the web page at one of your nodes, such as `http://192.168.99.101`, and take a look at the results of the visitor counter, which is now live and storing information on Redis. ![Hello World in browser with Redis](images/app-in-browser-redis.png) - Also, check the visualizer at port 8080 on either node's IP address, and you'll see the `redis` service running along with the `web` and `visualizer` services. + Also, check the visualizer at port 8080 on either node's IP address, and notice see the `redis` service running along with the `web` and `visualizer` services. ![Visualizer with redis screenshot](images/visualizer-with-redis.png) diff --git a/get-started/part6.md b/get-started/part6.md index f72f822a21d..c4d73875662 100644 --- a/get-started/part6.md +++ b/get-started/part6.md @@ -14,8 +14,8 @@ description: Deploy your app to production using Docker CE or EE. - Learn how to create containers in [Part 2](part2.md). - Make sure you have published the `friendlyhello` image you created by -[pushing it to a registry](/get-started/part2.md#share-your-image). We'll -be using that shared image here. +[pushing it to a registry](/get-started/part2.md#share-your-image). We use that +shared image here. - Be sure your image works as a deployed container. Run this command, slotting in your info for `username`, `repo`, and `tag`: `docker run -p 80:80 @@ -27,7 +27,7 @@ username/repo:tag`, then visit `http://localhost/`. You've been editing the same Compose file for this entire tutorial. Well, we have good news. That Compose file works just as well in production as it does -on your machine. Here, we'll go through some options for running your +on your machine. Here, We go through some options for running your Dockerized application. ## Choose an option @@ -43,7 +43,7 @@ To set up and deploy: - Use Docker Cloud to create your computing resources and create your swarm. - Deploy your app. -> **Note**: We will be linking into the Docker Cloud documentation here; be sure +> **Note**: We do not link into the Docker Cloud documentation here; be sure to come back to this page after completing each step. ### Connect Docker Cloud @@ -188,9 +188,9 @@ These are the ports you need to expose for each service: | `visualizer` | HTTP | TCP | 8080 | | `redis` | TCP | TCP | 6379 | -Methods for doing this will vary depending on your cloud provider. +Methods for doing this vary depending on your cloud provider. -We'll use Amazon Web Services (AWS) as an example. +We use Amazon Web Services (AWS) as an example. > What about the redis service to persist data? > @@ -209,12 +209,12 @@ to view the nodes. 2. On the left menu, go to Network & Security > **Security Groups**. - You'll see security groups related to your swarm + See the ecurity groups related to your swarm for `getstartedlab-Manager-`, `getstartedlab-Nodes-`, and `getstartedlab-SwarmWide-`. 3. Select the "Node" security group for the swarm. The group name -will be something like this: `getstartedlab-NodeVpcSG-9HV9SMHDZT8C`. +is something like this: `getstartedlab-NodeVpcSG-9HV9SMHDZT8C`. 4. Add Inbound rules for the `web`, `visualizer`, and `redis` services, setting the Type, Protocol and Port for each as shown in the @@ -223,7 +223,7 @@ services, setting the Type, Protocol and Port for each as shown in the ![open web service port](images/cloud-aws-web-port-open.png) > **Tip**: When you save the new rules, HTTP and TCP - ports will be auto-created for both IPv4 and IPv6 style addresses. + ports are auto-created for both IPv4 and IPv6 style addresses. ![security groups rules](images/cloud-aws-web-and-visualizer-ports.png) @@ -259,9 +259,9 @@ image](part2.md#publish-the-image)). ``` Unlike the scenario where you were running the swarm on local Docker machine -VMs, your swarm and any apps deployed on it will continue to run on cloud +VMs, your swarm and any apps deployed on it continue to run on cloud servers regardless of whether you shut down your local host. - + {% endcapture %} {% capture enterpriseboilerplate %} Customers of Docker Enterprise Edition run a stable, commercially-supported @@ -279,7 +279,7 @@ file from directly within the UI](/datacenter/ucp/2.1/guides/user/services/){: o ![Deploy an app on DDC](/datacenter/ucp/2.1/guides/images/deploy-app-ui-1.png) -After that, you'll see it running, and can change any aspect of the application +After that, you can see it running, and can change any aspect of the application you choose, or even edit the Compose file itself. ![Managing app on DDC](/datacenter/ucp/2.1/guides/images/deployed_visualizer.png) diff --git a/hackathon/index.md b/hackathon/index.md index 945647eae8f..037be5e4422 100644 --- a/hackathon/index.md +++ b/hackathon/index.md @@ -207,7 +207,7 @@ cash in our rewards store at http://www.cafepress.com/dockerdocshackathon. Our points-to-cash conversion rate will be figured out at the end of the hackathon, and will essentially be a function of the number of points that -hackathon participants logged, and the number of dollars we have to spend on +hackathon participants logged, and the number of dollars we need to spend on prizes. [View our available prizes](http://www.cafepress.com/dockerdocshackathon){: class="button primary-btn"} @@ -261,7 +261,7 @@ there. You'll see point values for each of the bugs, such as `points/100` and `points/250`. Opening an accepted PR that fixes these bugs by the end of the hackathon earns you that number of points. -So if you fix a 100-point bug, that's $10 you have to spend in [the swag store](http://www.cafepress.com/dockerdocshackathon). +So if you fix a 100-point bug, that's $10 you need to spend in [the swag store](http://www.cafepress.com/dockerdocshackathon). ### Questions? @@ -277,7 +277,7 @@ To participate in the Docs Hackathon you must submit a pull request that handles Multiple entries per person allowed and encouraged. -Participation in the Docs Hackathon and submission of content therein gives Docker rights to use submitted content (e.g., text, code, and images) for potential future promotional marketing activities. +Participation in the Docs Hackathon and submission of content therein gives Docker rights to use submitted content, including text, code, and images, for potential future promotional marketing activities. For the purposes of copyright all content submitted to Docker for the Docs Hackathon belongs to Docker. diff --git a/index.md b/index.md index dec87f398b1..c32555e2362 100644 --- a/index.md +++ b/index.md @@ -24,7 +24,7 @@ production servers in the cloud. Total reading time is less than an hour. ## Try Docker Enterprise Edition -Run your solution in production with Docker Enterprise Edition and you'll get a +Run your solution in production with Docker Enterprise Edition to get a management dashboard, security scanning, LDAP integration, content signing, multi-cloud support, and more. Click below to test-drive a running instance of Docker EE without installing anything. @@ -34,200 +34,6 @@ Docker EE without installing anything.
-{% if site.edge == true %} -{% capture ce-edge-section %} - -## Docker CE Edge - -The Docker CE Edge channel provides monthly releases which allow you to try -new features of Docker and verify bug fixes quickly. Edge releases are only -supported for one month, and a given Edge release will not receive any updates -once a new edge release is available. - -Stable releases are not published to the Edge channel, so Linux repository users -still need to subscribe to the Stable channel as well. - -Commercial support is not available for Docker CE. - -For information about all Docker release channels and expectations about -support, see [Docker channels](/engine/installation/#docker-channels). - - -Read more about Docker CE Edge releases - -
- -This page lists features that are only available in Docker CE Edge releases. -Where applicable, the API and CLI reference documentation has been updated to -reflect these features, but **full documentation for a given feature may not be -available until a Docker CE Stable release incorporates the feature**. - -### Docker CE Edge new features - - -
-
- -#### Docker CE Edge 17.04 - -The following major features and changes are included in Docker CE Edge 17.04. -Continue reading, or go straight to [API and CLI](#api-and-cli), -[Daemon](#daemon), [Dockerfile](#dockerfile), [Services](#services), or -[Stacks](#stacks). - -[Read the full release notes](https://github.com/moby/moby/releases/tag/v17.04.0-ce){: target="_blank" class="_" } - -##### API and CLI - -- Add `--device-cgroup-rule` flag to give containers access to devices that appear - after the container is started. {% include github-pr.md pr=22563 %} - -- Allow swarm nodes to join with `--availability=drain` to prevent them from - taking non-manager workloads. {% include github-pr.md pr=24993 %} - -- Add `publish` and `expose` filters to `docker ps`, so that containers can be - filtered by port or port range for TCP or UDP protocols {% include github-pr.md pr=27557 %} - -- Add `--no-trunc` and `--format` flags to the `docker service ls` command, and - as well as the ability to specify the default format for `docker service ls` - using the `ServicesFormat` option to the Docker CLI. Also add a - `docker stack services` command. {% include github-pr.md pr=28199 %} - -- Add ability to filter plugins by whether they are enabled or disabled in - `docker plugin ls` output. {% include github-pr.md pr=28627 %} - -- Add `mode` option to `--log-opts` flag for both `docker` and `dockerd`. If set - to `non-blocking`, and the log buffer fills up, log messages will be lost, but - the container will not block. The `max-buffer-size` option controls the - maximum size of the ring buffer. Defaults to `blocking`, which will cause the - container to block if messages cannot be logged. See - [Options for all drivers](/engine/admin/logging/overview.md#options-for-all-drivers). - {% include github-pr.md pr=28762 %} - -- It is no longer possible to inadvertently pull images on an architecture where - they will not run. {% include github-pr.md pr=29001 %} - -- It is now possible to create AWS log groups when using the AWS logging driver. - See [`awslogs-create-group`](engine/admin/logging/awslogs.md#awslogs-create-group). - {% include github-pr.md pr=29504 %} - -- Add the ability to filter `docker network ls` output by creation time, using - the `{% raw %}{{CreatedAt}}{% endraw %}` format specifier. - {% include github-pr.md pr=29900 %} - -- Named but untagged images are now removed if you run `docker image prune` if - `--dangling-only` is set to `true`. {% include github-pr.md pr=30330 %} - -- Add `--add-host` flag to `docker build`, which will add entries to the - `/etc/hosts` file of a container created from that image. The `/etc/hosts` - file is not saved within the image itself. {% include github-pr.md pr=30383 %} - -- Prevent `docker network ls` from pulling all the endpoints, to reduce - impact on the network. {% include github-pr.md pr=30673 %} - -- Windows-specific commands and options no longer show in command help text on - non-Windows clients. {% include github-pr.md pr=30780 %} - -- When you specify an IP address when running `docker network connect`, the - IP address is now checked for validity. {% include github-pr.md pr=30807 %} - -- Add the ability to customize bind-mount consistency to be more appropriate - for some platforms and workloads. Options are `consistent` (the default), - `cached`, or `delegated`. {% include github-pr.md pr=31047 %} - -##### Daemon - -- Docker Daemon logging settings no longer affect the `docker build` command. - {% include github-pr.md pr=29552 %} - -- Add a `registry-mirrors` configuration option for the Docker daemon, which - replaces the daemon's registry mirrors with a new set of registry mirrors. - {% include github-pr.md pr=29650 %} - -- Add the ability to specify the default shared memory size for the Docker - daemon, using the `--default-shm-size` or the `default-shm-size` key in - `daemon.json`. {% include github-pr.md pr=29692 %} - -- Add a `no-new-privileges` configuration option for the Docker daemon, which - prevents unprivileged containers from gaining new privileges. - {% include github-pr.md pr=29984 %} - -- If a Docker client communicates with an older daemon and attempts to perform - an operation not supported by the daemon, an error is printed, which shows - the API versions of both the client and daemon. - {% include github-pr.md pr=30187 %} - -- The Docker daemon no longer depends upon `sqlite`. This change means that it - is not possible to upgrade the Docker daemon from version 1.9 directly to the - latest version. It is recommended to upgrade from one major version to the - next, in sequence. {% include github-pr.md pr=30208 %} - -##### Dockerfile - -- Using the pattern `**/` in a Dockerfile now (correctly) behaves the same as - `**`. {% include github-pr.md pr=29043 %} - -- Time values less than 1 second are no longer allowed in health-check options - in the Dockerfile. {% include github-pr.md pr=31177 %} - -##### Services - -- When a service is updated with both `--secret-add` and `--secret-rm` in the - same operation, the order of operations is now changed so that the - `--secret-rm` always occurs first. {% include github-pr.md pr=29802 %} - -- Add the ability to create or update a service to be read-only using the - `--read-only` flag. {% include github-pr.md pr=30162 %} - -- Docker now updates swarm nodes if the swarm configuration is updated. - {% include github-pr.md pr=30259 %} - -- Add topology-aware placement preferences for Swarm services. This feature - allows services to be balanced over nodes based on a particular user-defined - property, such as which datacenter or rack they are located in. - See [Control service scale and placement](/engine/swarm/services.md#control-service-scale-and-placement). - {% include github-pr.md pr=30725 %} - -- Add the ability to customize the stop signal which will be sent to nodes, when - creating or updating a service. {% include github-pr.md pr=30754 %} - -- Add the ability to address a secret by name or prefix, as well as ID, when - updating it. {% include github-pr.md pr=30856 %} - -- Add the ability to roll back to a previous version of a service if an - updated service fails to deploy. Several flags are available at service - creation or update,to control the rollback action, failure threshold, - monitoring delay, rollback delay, and parallelism. - {% include github-pr.md pr=31108 %} - -- Add the ability to specify the stream when using the Docker service logs API. - {% include github-pr.md pr=31313 %} - -- Add `--tail` and `--since` flags to `docker service logs` command, to filter - the logs by time or to show the tail of the logs and show new content as it - is logged. {% include github-pr.md pr=31500 %} - -- Add a `--verbose` flag to the `docker inspect` command. For swarm networks, - this flag shows all nodes and services attached to the network. - {% include github-pr.md pr=31710 %} - -##### Stacks - -- Compose file version 3.2 is now supported. This includes support for different - types of endpoints and expands the options you can use when specifying mounts. - {% include github-pr.md pr=31795 %} - -
- -
-
-{% endcapture %} -{{ ce-edge-section | markdownify }} -{% endif %} - ## Docker Editions
diff --git a/kitematic/minecraft-server.md b/kitematic/minecraft-server.md index d6b5db5e335..67580284fcc 100644 --- a/kitematic/minecraft-server.md +++ b/kitematic/minecraft-server.md @@ -18,9 +18,9 @@ button. ![create Minecraft container](images/minecraft-create.png) -After the image finishes downloading, you'll see the home screen for the +After the image finishes downloading, you see the home screen for the Minecraft container. Your Minecraft server is now up and running inside a Docker -container. Note that we've marked the IP and port you can use to connect to +container. We've marked the IP and port you can use to connect to your Minecraft server in red (your IP and port may be different from what's shown). @@ -48,13 +48,13 @@ Click on the play button to connect to your Minecraft server and enjoy! ### Change map using Docker volume -Open the "data" folder from Kitematic (You'll need to "Enable all volumes to edit +Open the "data" folder from Kitematic (Select "Enable all volumes to edit files via Finder"). We use Docker Volume to map the folder from the Minecraft Docker container onto your computer. ![Minecraft data volume](images/minecraft-data-volume.png) -The Finder will open, allowing you to replace your current map with the new one +The Finder opens, allowing you to replace your current map with the new one you desire. ![Minecraft maps](images/minecraft-map.png) diff --git a/kitematic/nginx-web-server.md b/kitematic/nginx-web-server.md index bab647fc5f1..08e49d49e0f 100644 --- a/kitematic/nginx-web-server.md +++ b/kitematic/nginx-web-server.md @@ -5,14 +5,13 @@ keywords: docker, documentation, about, technology, kitematic, gui, nginx, tutor title: 'Kitematic tutorial: Serve a static website with NGINX' --- -In this tutorial, you will: +This tutorial guides you through these steps: - Download and run a web server container - Explore the container's website data natively on your Mac - Use volumes to modify the website data -In this example website we'll be serving the popular 2048 game, as shown below. -Let's get to it! +This example website serves the popular 2048 game. Let's get to it! ![2048 game](images/nginx-2048.png) @@ -24,8 +23,8 @@ Kitematic](index.md). Once installed and running, the app should look like this: ![Nginx create](images/nginx-create.png) Click on the _Create_ button of the `hello-world-nginx` listing as shown above. -Kitematic will download (also known as pull the image) and then run a tiny Nginx web server -in a Docker container, allowing it to serve website data to your Mac. +Kitematic pulls and runs a tiny Nginx web server in a Docker container, allowing +it to serve website data to your Mac. ![download Nginx hello world](images/nginx-hello-world.png) diff --git a/kitematic/rethinkdb-dev-database.md b/kitematic/rethinkdb-dev-database.md index d2c0c36c573..f6912ec1c71 100644 --- a/kitematic/rethinkdb-dev-database.md +++ b/kitematic/rethinkdb-dev-database.md @@ -5,7 +5,7 @@ keywords: docker, documentation, about, technology, kitematic, gui, rethink, tut title: 'Kitematic tutorial: Create a local RethinkDB database for development' --- -In this tutorial, you will: +This tutorial guides you through these steps: - Create a RethinkDB Container for Development - (Advanced) Clone a small Node.js application and write data into RethinkDB. @@ -19,8 +19,8 @@ this: ![Rethink create button](images/rethink-create.png) Click on the _Create_ button of the `rethinkdb` image listing in the recommended -list as shown above. This will download & run a RethinkDB container within a few -minutes. Once it's done, you'll have a local RethinkDB database up and running. +list as shown above. This downloads and runs a RethinkDB container within a few +minutes. Once it's done, a local RethinkDB database is up and running. ![Rethink container](images/rethink-container.png) @@ -37,14 +37,13 @@ for you). This means you can now reach RethinkDB via a client driver at ### (Advanced) Save Data into RethinkDB with a local Node.js App -Now, you'll create the RethinkDB example chat application running on your local +Now, create the RethinkDB example chat application running on your local macOS system to test drive your new containerized database. First, if you don't have it yet, [download and install Node.js](http://nodejs.org/). -> **Note**: This example needs Xcode installed. We'll replace it with something -> with fewer dependencies soon. +> **Note**: This example needs Xcode installed. In your terminal, type: diff --git a/kitematic/userguide.md b/kitematic/userguide.md index ad942eba8d8..be0373409bd 100644 --- a/kitematic/userguide.md +++ b/kitematic/userguide.md @@ -14,7 +14,7 @@ interface (GUI) for running Docker containers. Kitematic integrates with [Docker Machine](/machine/) to provision a VirtualBox VM and install the Docker Engine locally on your machine. -Once installed, the Kitematic GUI launches and from the home screen you will be +Once installed, the Kitematic GUI launches and from the home screen you are presented with curated images that you can run instantly. You can search for any public images on Docker Hub from Kitematic just by typing in the search bar. You can use the GUI to create, run and manage your containers just by clicking @@ -60,10 +60,10 @@ and run the container. ## Working with a container -If you select a non-running container, either stopped, or paused, you will be able -to "Restart" or "Stop" the container using the icons. You can also view the entire +If you select a non-running container, either stopped, or paused, you can +"Restart" or "Stop" the container using the icons. You can also view the entire main container process' output logs, and in the Settings section you can make -changes which will be used if you "Restart" this container. +changes which are used if you "Restart" this container. By selecting a running container from the left list, you can see some state information for your container - either a preview of the HTML output for a container that has a web @@ -72,12 +72,12 @@ configured. ![Redis container in Kitematic](images/cli-redis-container.png) -The summary page will show different things depending on the image metadata. If +The summary page shows different things depending on the image metadata. If a known "web" port (see below) is `EXPOSED`, then Kitematic assumes its a web page, -and will show a preview of the site at `/`. If other ports are exposed, then it -will show a list of those ports, and the Docker daemon IP and port they are mapped -to. If there are any `VOLUMES`, then these will be shown. At minimum, the summary -screen will show the main container process' log output. +and shows a preview of the site at `/`. If other ports are exposed, then it +shows a list of those ports, and the Docker daemon IP and port they are mapped +to. If there are any `VOLUMES`, then these are shown. At minimum, the summary +screen shows the main container process' log output. The currently detected "web" ports are, `80`, `8000`, `8080`, `3000`, `5000`, `2368`, `9200`, and `8983`. @@ -88,15 +88,15 @@ You can view the entire main container process' log output either by clicking on preview image, or by clicking on the "Logs" tab. You can then scroll through the logs from the current running container. Note that -if you make changes to the container settings, then the container will be restarted, -so this will reset this log view. +if you make changes to the container settings, then the container is restarted, +so this resets this log view. ### Starting a terminal in a container -The "Terminal" icon at the top of the container summary will `docker exec -i -t sh`. -This will allow you to make quick changes, or to debug a problem. +The "Terminal" icon at the top of the container summary runs `docker exec -i -t sh`. +This allows you to make quick changes, or to debug a problem. -> **Note**: Your exec'ed `sh` process will not have the same environment settings +> **Note**: Your exec'ed `sh` process does not have the same environment settings > as the main container process and its children. > Get the environment commands for your shell: `docker-machine env default`. @@ -113,7 +113,7 @@ Quick access to this folder (or directory) is available via the app: ![Accessing the volumes directory](images/volumes-dir.png) > **Note**: When you "Enable all volumes to edit files in Finder", the Docker -> container will be stopped, removed and re-created with the new `volumes` +> container is stopped, removed and re-created with the new `volumes` > flag. #### Changing Volume Directories @@ -130,7 +130,7 @@ screen allows you to set the mappings individually. ![screen shot 2015-02-28 at 2 48 01 pm](images/change-folder.png) > **Note**: When you "Change Folders", the Docker -> container will be stopped, removed and re-created with the new `volumes` +> container is stopped, removed and re-created with the new `volumes` > flag. ### Setting the container name @@ -140,7 +140,7 @@ with a `-` if there are more than one. To simplify administration, or when using container linking or volumes, you may want to rename it. -> **Note**: When you rename the container it will be stopped, removed and +> **Note**: When you rename the container it is stopped, removed and > re-created with the new name (due to the default volumes mapping). ### Adding Environment variables @@ -149,20 +149,20 @@ Many images use environment variables to let you customize them. The "General" "Settings" tab allows you to add and modify the environment variables used to start a container. -The list of environment variables will show any that have been set on the image +The list of environment variables shows any that have been set on the image metadata - for example, using the `ENV` instruction in the Dockerfile. -When you "Save" the changed environment variables, the container will be +When you "Save" the changed environment variables, the container is stopped, removed and re-created. ### Delete container On the "General" "Settings" tab, you can delete the container. Clicking "Delete -Container" will also stop the container if necessary. +Container" also stops the container if necessary. You can also delete a container by clicking the `X` icon in the container list. -Kitematic will prompt you to confirm that you want to delete. +Kitematic prompts you to confirm that you want to delete. #### List the exposed Ports and how to access them @@ -186,15 +186,15 @@ shown below: Start by opening a Docker-CLI ready terminal by clicking the whale button as described above. Once the terminal opens, enter `docker run -d -P redis`. This -will pull and run a new Redis container via the Docker CLI. +pulls, creates, and runs a new Redis container via the Docker CLI. ![Docker CLI terminal window](images/cli-terminal.png) > **Note**: If you're creating containers from the command line, use `docker run -d` > so that Kitematic can re-create the container when settings are changed via the -> Kitematic user interface. Containers started without `-d` will fail to re-start. +> Kitematic user interface. Containers started without `-d` fails to restart. -Now, go back to Kitematic. The Redis container should now be visible. +Now, go back to Kitematic. The Redis container is now visible. ![Redis container in Kitematic](images/cli-redis-container.png) diff --git a/machine/AVAILABLE_DRIVER_PLUGINS.md b/machine/AVAILABLE_DRIVER_PLUGINS.md index d3f44b9ee40..8b9fe05527b 100644 --- a/machine/AVAILABLE_DRIVER_PLUGINS.md +++ b/machine/AVAILABLE_DRIVER_PLUGINS.md @@ -12,7 +12,7 @@ This document is intended to act as a reference for the available 3rd-party driver plugins available in the ecosystem beyond the core Machine drivers. If you have created a Docker Machine driver, we highly encourage you to submit a pull request adding the relevant information to the list. Submitting your -driver here will allow others to discover it and the core Machine team to keep +driver here allows others to discover it and the core Machine team to keep you informed of upstream changes. **NOTE**: The linked repositories are not maintained by or formally associated diff --git a/machine/DRIVER_SPEC.md b/machine/DRIVER_SPEC.md index d99b01f5d8f..a66efc10a43 100644 --- a/machine/DRIVER_SPEC.md +++ b/machine/DRIVER_SPEC.md @@ -19,19 +19,18 @@ for Docker Machine. The provider must offer a base operating system supported by the Docker Engine. -Currently Machine requires Ubuntu for non-Boot2Docker machines. This will -change in the future. +Currently Machine requires Ubuntu for non-Boot2Docker machines. ## API Access We prefer accessing the provider service via HTTP APIs and strongly recommend using those over external executables. For example, using the Amazon EC2 API -instead of the EC2 command line tools. If in doubt, contact a project +instead of the EC2 command line tools. If in doubt, contact a project maintainer. ## SSH -The provider must offer SSH access to control the instance. This does not +The provider must offer SSH access to control the instance. This does not have to be public, but must offer it as Machine relies on SSH for system level maintenance. @@ -41,49 +40,49 @@ The following instance operations should be supported by the provider. ## Create -`Create` will launch a new instance and make sure it is ready for provisioning. +`Create` launches a new instance and make sure it is ready for provisioning. This includes setting up the instance with the proper SSH keys and making -sure SSH is available including any access control (firewall). This should +sure SSH is available including any access control (firewall). This should return an error on failure. ## Remove -`Remove` will remove the instance from the provider. This should remove the +`Remove` removes the instance from the provider. This should remove the instance and any associated services or artifacts that were created as part -of the instance including keys and access groups. This should return an +of the instance including keys and access groups. This should return an error on failure. ## Start -`Start` will start a stopped instance. This should ensure the instance is -ready for operations such as SSH and Docker. This should return an error on +`Start` starts a stopped instance. This should ensure the instance is +ready for operations such as SSH and Docker. This should return an error on failure. ## Stop -`Stop` will stop a running instance. This should ensure the instance is +`Stop` stops a running instance. This should ensure the instance is stopped and return an error on failure. ## Kill -`Kill` will forcibly stop a running instance. This should ensure the instance +`Kill` forcibly stops a running instance. This should ensure the instance is stopped and return an error on failure. ## Restart -`Restart` will restart a running instance. This should ensure the instance -is ready for operations such as SSH and Docker. This should return an error +`Restart` restarts a running instance. This should ensure the instance +is ready for operations such as SSH and Docker. This should return an error on failure. ## Status -`Status` will return the state of the instance. This should return the -current state of the instance (running, stopped, error, etc). This should +`Status` returns the state of the instance. This should return the +current state of the instance (running, stopped, error, etc). This should return an error on failure. # Testing -Testing is strongly recommended for drivers. Unit tests are preferred as well +Testing is strongly recommended for drivers. Unit tests are preferred as well as inclusion into the [integration tests](https://github.com/docker/machine#integration-tests). # Maintaining @@ -93,8 +92,8 @@ the driver plugins as executables. # Implementation -The following describes what is needed to create a Machine Driver. The driver -interface has methods that must be implemented for all drivers. These include +The following describes what is needed to create a Machine Driver. The driver +interface has methods that must be implemented for all drivers. These include operations such as `Create`, `Remove`, `Start`, `Stop` etc. For details see the [Driver Interface](https://github.com/docker/machine/blob/master/drivers/drivers.go#L24). @@ -117,8 +116,8 @@ Each driver must then use an `init` func to "register" the driver: ## Flags -Driver flags are used for provider specific customizations. To add flags, use -a `GetCreateFlags` func. For example: +Driver flags are used for provider specific customizations. To add flags, use +a `GetCreateFlags` func. For example: func GetCreateFlags() []cli.Flag { return []cli.Flag{ diff --git a/machine/completion.md b/machine/completion.md index 78fbecb49f8..e4b4e37964f 100644 --- a/machine/completion.md +++ b/machine/completion.md @@ -30,19 +30,21 @@ Place the completion script in `/etc/bash_completion.d/` as follows: sudo curl -L https://raw.githubusercontent.com/docker/machine/v{{site.machine_version}}/contrib/completion/bash/docker-machine.bash -o /etc/bash_completion.d/docker-machine ``` -Completion will be available upon next login. +Completion is available upon next login. ### Zsh -Place the completion script in your `/path/to/zsh/completion`, using e.g. `~/.zsh/completion/`: +Place the completion script in your a `completion` file within the ZSH +configuration directory, such as `~/.zsh/completion/`. ```shell mkdir -p ~/.zsh/completion curl -L https://raw.githubusercontent.com/docker/machine/v{{site.machine_version}}/contrib/completion/zsh/_docker-machine > ~/.zsh/completion/_docker-machine ``` -Include the directory in your `$fpath`, e.g. by adding in `~/.zshrc`: +Include the directory in your `$fpath`, by adding a like the following to the +`~/.zshrc` configuration file. ```shell fpath=(~/.zsh/completion $fpath) @@ -62,7 +64,7 @@ exec $SHELL -l ## Available completions -Depending on what you typed on the command line so far, it will complete: +Depending on what you typed on the command line so far, it completes: - commands and their options - container IDs and names diff --git a/machine/concepts.md b/machine/concepts.md index 979f60b2fb2..680d97a6625 100644 --- a/machine/concepts.md +++ b/machine/concepts.md @@ -39,11 +39,16 @@ For a complete list of `docker-machine` subcommands, see the ## Custom root Certificate Authority for Registry -Users using their own Docker Registry will experience `x509: certificate signed by unknown authority` -error messages if their registry is signed by custom root Certificate Authority and it is -not registered with Docker Engine. As discussed in the +if your registry is signed by a custom root Certificate Authority and it is +not registered with Docker Engine, you may see the following error message: + +```none +x509: certificate signed by unknown authority +``` + +As discussed in the [Docker Engine documentation](/engine/security/certificates.md#understanding-the-configuration) -certificates should be placed at `/etc/docker/certs.d/hostname/ca.crt` +place the certificates in `/etc/docker/certs.d/hostname/ca.crt` where `hostname` is your Registry server's hostname. ```console @@ -62,20 +67,19 @@ or firewall issues. There are also reasons from the other end of the chain: your cloud provider or the network in between. To help `docker-machine` be as stable as possible, we added a monitoring of -crashes whenever you try to `create` or `upgrade` a host. This will send, over +crashes whenever you try to `create` or `upgrade` a host. This sends, over HTTPS, to Bugsnag some information about your `docker-machine` version, build, OS, ARCH, the path to your current shell and, the history of the last command as you could see it with a `--debug` option. This data is sent to help us pinpoint -recurring issues with `docker-machine` and will only be transmitted in the case +recurring issues with `docker-machine` and is only transmitted in the case of a crash of `docker-machine`. -If you wish to opt out of error reporting, you can create a `no-error-report` -file in your `$HOME/.docker/machine` directory, and Docker Machine will disable -this behavior. e.g.: +To opt out of error reporting, create a `no-error-report` +file in your `$HOME/.docker/machine` directory: $ mkdir -p ~/.docker/machine && touch ~/.docker/machine/no-error-report -Leaving the file empty is fine -- Docker Machine just checks for its presence. +The file doesn't need to have any contents. ## Getting help @@ -83,14 +87,14 @@ Docker Machine is still in its infancy and under active development. If you need help, would like to contribute, or simply want to talk about the project with like-minded individuals, we have a number of open channels for communication. -- To report bugs or file feature requests: please use the +- To report bugs or file feature requests, use the [issue tracker on Github](https://github.com/docker/machine/issues). -- To talk about the project with people in real time: please join the +- To talk about the project with people in real time, join the `#docker-machine` channel on IRC. -- To contribute code or documentation changes: please +- To contribute code or documentation changes, [submit a pull request on Github](https://github.com/docker/machine/pulls). -For more information and resources, please visit +For more information and resources, visit [our help page](/opensource/get-help.md). ## Where to go next diff --git a/machine/drivers/aws.md b/machine/drivers/aws.md index a021e6da906..04949e08d07 100644 --- a/machine/drivers/aws.md +++ b/machine/drivers/aws.md @@ -65,13 +65,13 @@ You can use environment variables: - `--amazonec2-ssh-keypath`: Path to Private Key file to use for instance. Matching public key with .pub extension should exist - `--amazonec2-ssh-user`: The SSH Login username, which must match the default SSH user set in the ami used. - `--amazonec2-subnet-id`: AWS VPC subnet ID. -- `--amazonec2-tags`: AWS extra tag key-value pairs (comma-separated, e.g. key1,value1,key2,value2). +- `--amazonec2-tags`: AWS extra tag key-value pairs. Comma-separated. For example, `key1,value1,key2,value2`. - `--amazonec2-use-ebs-optimized-instance`: Create an EBS Optimized Instance, instance type must support it. - `--amazonec2-use-private-address`: Use the private IP address for docker-machine, but still create a public IP address. - `--amazonec2-userdata`: Path to file with cloud-init user data. - `--amazonec2-volume-type`: The Amazon EBS volume type to be attached to the instance. - `--amazonec2-vpc-id`: Your VPC ID to launch the instance in. -- `--amazonec2-zone`: The AWS zone to launch the instance in (i.e. one of a,b,c,d,e). +- `--amazonec2-zone`: The AWS zone to launch the instance in (one of a,b,c,d,e). @@ -112,7 +112,7 @@ You can use environment variables: ## Default AMIs -By default, the Amazon EC2 driver will use a daily image of Ubuntu 16.04 LTS. +By default, the Amazon EC2 driver uses a daily image of Ubuntu 16.04 LTS. | Region | AMI ID | | -------------- | ------------ | @@ -131,13 +131,13 @@ By default, the Amazon EC2 driver will use a daily image of Ubuntu 16.04 LTS. ## Security Group -Note that a security group will be created and associated to the host. This security group will have the following ports opened inbound: +A security group is created and associated to the host. This security group has the following ports opened inbound: - ssh (22/tcp) - docker (2376/tcp) - swarm (3376/tcp), only if the node is a swarm master -If you specify a security group yourself using the `--amazonec2-security-group` flag, the above ports will be checked and opened and the security group modified. +If you specify a security group yourself using the `--amazonec2-security-group` flag, the above ports are checked and opened and the security group modified. If you want more ports to be opened, like application specific ports, use the AWS console and modify the configuration manually. ## VPC ID @@ -163,7 +163,7 @@ This example assumes the VPC ID was found in the `a` availability zone. Use the` ## VPC Connectivity Machine uses SSH to complete the set up of instances in EC2 and requires the ability to access the instance directly. -If you use the flag `--amazonec2-private-address-only`, you will need to ensure that you have some method of accessing the new instance from within the internal network of the VPC (e.g. a corporate VPN to the VPC, a VPN instance inside the VPC or using Docker-machine from an instance within your VPC). +If you use the flag `--amazonec2-private-address-only`, ensure that you can access the new instance from within the internal network of the VPC, such as a corporate VPN to the VPC, a VPN instance inside the VPC, or using `docker-machine` from an instance within your VPC. Configuration of VPCs is beyond the scope of this guide, however the first step in troubleshooting is ensuring if you are using private subnets that you follow the design guidance in the [AWS VPC User Guide](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Scenario2.html) and have some form of NAT available so that the set up process can access the internet to complete set up. diff --git a/machine/drivers/azure.md b/machine/drivers/azure.md index 190fe8b7573..9c6f2115077 100644 --- a/machine/drivers/azure.md +++ b/machine/drivers/azure.md @@ -4,7 +4,7 @@ keywords: machine, Microsoft Azure, driver title: Microsoft Azure --- -You will need an Azure Subscription to use this Docker Machine driver. +You need an Azure Subscription to use this Docker Machine driver. [Sign up for a free trial.][trial] > **NOTE:** This documentation is for the new version of the Azure driver, which started @@ -17,7 +17,7 @@ You will need an Azure Subscription to use this Docker Machine driver. ## Authentication -The first time you try to create a machine, Azure driver will ask you to +The first time you try to create a machine, Azure driver asks you to authenticate: $ docker-machine create --driver azure --azure-subscription-id @@ -25,7 +25,7 @@ authenticate: Microsoft Azure: To sign in, use a web browser to open the page https://aka.ms/devicelogin. Enter the code [...] to authenticate. -After authenticating, the driver will remember your credentials up to two weeks. +After authenticating, the driver remembers your credentials up to two weeks. > **KNOWN ISSUE:** There is a known issue with Azure Active Directory causing stored > credentials to expire within hours rather than 14 days when the user logs in with @@ -48,7 +48,7 @@ Optional: - `--azure-availability-set`: Azure Availability Set to place the virtual machine into. [[?][av-set]] - `--azure-docker-port`: Port number for Docker engine. -- `--azure-environment`: Azure environment (e.g. `AzurePublicCloud`, `AzureChinaCloud`). +- `--azure-environment`: Azure environment. For example, `AzurePublicCloud` or`AzureChinaCloud`. - `--azure-image`: Azure virtual machine image in the format of Publisher:Offer:Sku:Version [[?][vm-image]] - `--azure-location`: Azure region to create the virtual machine. [[?][location]] - `--azure-no-public-ip`: Do not create a public IP address for the machine (implies `--azure-use-private-ip`). Should be used only when creating machines from an Azure VM within the same subnet. @@ -60,7 +60,7 @@ Optional: - `--azure-static-public-ip`: Assign a static public IP address to the machine. - `--azure-subnet`: Azure Subnet Name to be used within the Virtual Network. - `--azure-subnet-prefix`: Private CIDR block. Used to create subnet if it does not exist. Must match in the case that the subnet does exist. -- `--azure-use-private-ip`: Use private IP address of the machine to connect. It's useful for managing Docker machines from another machine on the same network e.g. while deploying Swarm. +- `--azure-use-private-ip`: Use private IP address of the machine to connect. Useful for managing Docker machines from another machine on the same network, such as when deploying Swarm. - `--azure-vnet`: Azure Virtual Network name to connect the virtual machine. [[?][vnet]] To specify a Virtual Network from another resource group, use `resourcegroup:vnet-name` format. diff --git a/machine/drivers/digital-ocean.md b/machine/drivers/digital-ocean.md index 1f9a905b282..d00e339803c 100644 --- a/machine/drivers/digital-ocean.md +++ b/machine/drivers/digital-ocean.md @@ -12,17 +12,17 @@ Control Panel and pass that to `docker-machine create` with the `--digitalocean- ## Usage $ docker-machine create --driver digitalocean --digitalocean-access-token=aa9399a2175a93b17b1c86c807e08d3fc4b79876545432a629602f61cf6ccd6b test-this - -### When explicitly passing environment variables - + +### When explicitly passing environment variables + export DIGITALOCEAN_ACCESS_TOKEN="yourtoken"; export DIGITALOCEAN_SSH_KEY_FINGERPRINT="from your DO's profile security-ssh keys"; \ export DIGITALOCEAN_IMAGE="centos-7-x64"; export DIGITALOCEAN_REGION="tor1" - + $ docker-machine create --driver digitalocean --digitalocean-access-token $DIGITALOCEAN_ACCESS_TOKEN --digitalocean-ssh-key-fingerprint $DIGITALOCEAN_SSH_KEY_FINGERPRINT --digitalocean-image $DIGITALOCEAN_IMAGE --digitalocean-region $DIGITALOCEAN_REGION - + ### When passing a boolean value to any option $ docker-machine create --driver digitalocean --digitalocean-access-token=aa9399a2175a93b17b1c86c807e08d3fc4b79876545432a629602f61cf6ccd6b --digitalocean-size 1gb --digitalocean-backups=true test-this - + ## Options @@ -39,7 +39,7 @@ Control Panel and pass that to `docker-machine create` with the `--digitalocean- - `--digitalocean-tags`: Comma-separated list of tags to apply to the Droplet, see [Droplet tagging](https://developers.digitalocean.com/documentation/v2/#tags) - `--digitalocean-userdata`: Path to file containing User Data for the droplet. -The DigitalOcean driver will use `ubuntu-16-04-x64` as the default image. +The DigitalOcean driver uses `ubuntu-16-04-x64` as the default image. #### Environment variables and default values diff --git a/machine/drivers/exoscale.md b/machine/drivers/exoscale.md index db68d625ad9..ec820012f8d 100644 --- a/machine/drivers/exoscale.md +++ b/machine/drivers/exoscale.md @@ -17,15 +17,15 @@ Get your API key and API secret key from [API details](https://portal.exoscale.c ## Options -- `--exoscale-affinity-group`: [Anti-affinity group][anti-affinity] the machine will be started in. +- `--exoscale-affinity-group`: [Anti-affinity group][anti-affinity] the machine is started in. - `--exoscale-api-key`: **required** Your API key; - `--exoscale-api-secret-key`: **required** Your API secret key; - `--exoscale-availability-zone`: Exoscale [availability zone][datacenters] (ch-dk-2, at-vie-1, de-fra-1, ...); - `--exoscale-disk-size`: Disk size for the host in GB (10, 50, 100, 200, 400); -- `--exoscale-image`: Image template (e.g. `ubuntu-16.04` also known as `Linux Ubuntu 16.04 LTS 64-bit`, [see below](#image-template-name)); +- `--exoscale-image`: Image template, for example `ubuntu-16.04`, also known as `Linux Ubuntu 16.04 LTS 64-bit`, [see below](#image-template-name)); - `--exoscale-instance-profile`: Instance profile (Small, Medium, Large, ...); -- `--exoscale-security-group`: Security group. _It will be created if it doesn't exist_; -- `--exoscale-ssh-user`: SSH username (e.g. `ubuntu`, [see below](#ssh-username)); +- `--exoscale-security-group`: Security group. _It is created if it doesn't exist_; +- `--exoscale-ssh-user`: SSH username, such as `ubuntu`, [see below](#ssh-username)); - `--exoscale-url`: Your API endpoint; - `--exoscale-userdata`: Path to file containing user data for [cloud-init](https://cloud-init.io/); @@ -51,7 +51,8 @@ Get your API key and API secret key from [API details](https://portal.exoscale.c The [VM templates][templates] available at Exoscale are listed on the Portal when adding a new instance. -For any Linux template, you may use the shorter name composed only of the name and version. E.g. +For any Linux template, you may use the shorter name composed only of the name +and version, as shown below. | Full name | Short name | | ------------------------------- | -------------------- | @@ -60,7 +61,7 @@ For any Linux template, you may use the shorter name composed only of the name a | Linux CentOS 7.3 64-bit | `centos-7.3` | | Linux CoreOS stable 1298 64-bit | `coreos-stable-1298` | -**NB:** Docker won't work for non-Linux machines like OpenBSD and Windows Server. +**NB:** Docker doesn't work for non-Linux machines like OpenBSD and Windows Server. ### SSH Username diff --git a/machine/drivers/gce.md b/machine/drivers/gce.md index 2dd0316ba47..6ca7196be16 100644 --- a/machine/drivers/gce.md +++ b/machine/drivers/gce.md @@ -5,15 +5,15 @@ title: Google Compute Engine --- Create machines on [Google Compute Engine](https://cloud.google.com/compute/). -You will need a Google account and a project ID. -See for details on projects. +You need a Google account and a project ID. +See [https://cloud.google.com/compute/docs/projects](https://cloud.google.com/compute/docs/projects) for details on projects. ### Credentials The Google driver uses [Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) to get authorization credentials for use in calling Google APIs. -So if `docker-machine` is used from a GCE host, authentication will happen automatically +So if `docker-machine` is used from a GCE host, authentication occurs automatically via the built-in service account. Otherwise, [install gcloud](https://cloud.google.com/sdk/) and get through the oauth2 process with `gcloud auth login`. @@ -48,12 +48,12 @@ To create a machine instance, specify `--driver google`, the project ID and the - `--google-subnetwork`: Specify subnetwork in which to provision VM. - `--google-tags`: Instance tags (comma-separated). - `--google-use-existing`: Don't create a new VM, use an existing one. This is useful when you'd like to provision Docker on a VM you created yourself, maybe because it uses create options not supported by this driver. -- `--google-use-internal-ip-only`: When this option is used during create, the new VM will not be assigned a public IP address. This is useful only when the host running `docker-machine` is located inside the Google Cloud infrastructure; otherwise, `docker-machine` can't reach the VM to provision the Docker daemon. The presence of this flag implies `--google-use-internal-ip`. -- `--google-use-internal-ip`: When this option is used during create it will make docker-machine use internal rather than public NATed IPs. The flag is persistent in the sense that a machine created with it retains the IP. It's useful for managing docker machines from another machine on the same network e.g. while deploying swarm. +- `--google-use-internal-ip-only`: When this option is used during create, the new VM is not assigned a public IP address. This is useful only when the host running `docker-machine` is located inside the Google Cloud infrastructure; otherwise, `docker-machine` can't reach the VM to provision the Docker daemon. The presence of this flag implies `--google-use-internal-ip`. +- `--google-use-internal-ip`: When this option is used during create, docker-machine uses internal rather than public NATed IPs. The flag is persistent in the sense that a machine created with it retains the IP. It's useful for managing docker machines from another machine on the same network, such as when deploying swarm. - `--google-username`: The username to use for the instance. - `--google-zone`: The zone to launch the instance. -The GCE driver will use the `ubuntu-1604-xenial-v20161130` instance image unless otherwise specified. To obtain a +The GCE driver uses the `ubuntu-1604-xenial-v20161130` instance image unless otherwise specified. To obtain a list of image URLs run: gcloud compute images list --uri @@ -62,7 +62,7 @@ Google Compute Engine supports [image families](https://cloud.google.com/compute An image family is like an image alias that always points to the latest image in the family. To create an instance from an image family, set `--google-machine-image` to the family's URL. -The following command will show images and which family they belong to (if any): +The following command shows images and which family they belong to (if any): gcloud compute images list diff --git a/machine/drivers/generic.md b/machine/drivers/generic.md index f70c72426e6..38aaf42d146 100644 --- a/machine/drivers/generic.md +++ b/machine/drivers/generic.md @@ -10,14 +10,14 @@ This is useful if you are using a provider that Machine does not support directly or if you would like to import an existing host to allow Docker Machine to manage. -The driver will perform a list of tasks on create: +The driver performs a list of tasks on create: -- If docker is not running on the host, it will be installed automatically. -- It will update the host packages (`apt-get update`, `yum update`...). -- It will generate certificates to secure the docker daemon. -- If the host uses systemd, it will create /etc/systemd/system/docker.service.d/10-machine.conf -- The docker daemon will be restarted, thus all running containers will be stopped. -- The hostname will be changed to fit the machine name. +- If docker is not running on the host, it is installed automatically. +- It updates the host packages (`apt-get update`, `yum update`...). +- It generates certificates to secure the docker daemon. +- If the host uses systemd, it creates /etc/systemd/system/docker.service.d/10-machine.conf +- The docker daemon restarts, thus all running containers are stopped. +- The hostname is updated to fit the machine name. ### Example @@ -35,14 +35,14 @@ to the host. ### Sudo privileges The user that is used to SSH into the host can be specified with -`--generic-ssh-user` flag. This user has to have password-less sudo +`--generic-ssh-user` flag. This user needs password-less sudo privileges. If it's not the case, you need to edit the `sudoers` file and configure the user as a sudoer with `NOPASSWD`. See https://help.ubuntu.com/community/Sudoers. ### Options -- `--generic-engine-port`: Port to use for Docker Daemon (Note: This flag will not work with boot2docker). +- `--generic-engine-port`: Port to use for Docker Daemon (Note: This flag does not work with boot2docker). - `--generic-ip-address`: **required** IP Address of host. - `--generic-ssh-key`: Path to the SSH user private key. - `--generic-ssh-user`: SSH username used to connect. @@ -62,12 +62,12 @@ as a sudoer with `NOPASSWD`. See https://help.ubuntu.com/community/Sudoers. ### Systemd settings -For systems that use systemd, if you have an existing configuration defined in +For systems that use systemd, if you have an existing configuration defined in '/etc/systemd/system/docker.service.d/' this may conflict with the settings created by -docker-machine. Make sure you don't have any other configuration files in this location +docker-machine. Make sure you don't have any other configuration files in this location that override the [ExecStart] setting. -Once you have confirmed any conflicting settings have been removed, run +Once you have confirmed any conflicting settings have been removed, run `sudo systemctl daemon reload` followed by `sudo systemctl restart docker` diff --git a/machine/drivers/hyper-v.md b/machine/drivers/hyper-v.md index d4e48bfe73e..4bd1b365159 100644 --- a/machine/drivers/hyper-v.md +++ b/machine/drivers/hyper-v.md @@ -15,9 +15,9 @@ Hyper-V](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick > **Notes**: > -> * You will need to use an Administrator level account to create and manage Hyper-V machines. +> * You must use an Administrator level account to create and manage Hyper-V machines. > ->* You will need an existing virtual switch to use the +>* You need an existing virtual switch to use the > driver. Hyper-V can share an external network interface (aka > bridging), see [this blog](http://blogs.technet.com/b/canitpro/archive/2014/03/11/step-by-step-enabling-hyper-v-for-use-on-windows-8-1.aspx). > If you would like to use NAT, create an internal network, and use @@ -83,7 +83,7 @@ For this example, we created a virtual switch called `Primary Virtual Switch`. #### 4. Create the nodes with Docker Machine and the Microsoft Hyper-V driver -* Start an "elevated" PowerShell (i.e., running as administrator). To do this, search for PowerShell, right-click, and choose Run as administrator. +* Start an "elevated" PowerShell with administrator privileges. To do this, search for PowerShell, right-click, and choose Run as administrator. * Run the `docker-machine create` commands to create machines. @@ -98,7 +98,7 @@ you can create these swarm nodes: `manager1`, `worker1`, `worker2`. ```shell docker-machine create -d hyperv --hyperv-virtual-switch "" ``` - + Here is an example of creating `manager1` node: ```shell @@ -129,7 +129,7 @@ you can create these swarm nodes: `manager1`, `worker1`, `worker2`. ``` * Use the same process, driver, and network switch to create the other nodes. - For our example, the commands will look like this: + For our example, the commands are: ```shell docker-machine create -d hyperv --hyperv-virtual-switch "Primary Virtual Switch" worker1 diff --git a/machine/drivers/openstack.md b/machine/drivers/openstack.md index 4a806f53c68..e3424168409 100644 --- a/machine/drivers/openstack.md +++ b/machine/drivers/openstack.md @@ -9,8 +9,8 @@ Create machines on [OpenStack](http://www.openstack.org/software/) Mandatory: - `--openstack-auth-url`: Keystone service base URL. -- `--openstack-flavor-id` or `--openstack-flavor-name`: Identify the flavor that will be used for the machine. -- `--openstack-image-id` or `--openstack-image-name`: Identify the image that will be used for the machine. +- `--openstack-flavor-id` or `--openstack-flavor-name`: Identify the flavor used for the machine. +- `--openstack-image-id` or `--openstack-image-name`: Identify the image used for the machine. ## Usage @@ -23,21 +23,21 @@ Mandatory: - `--openstack-domain-name` or `--openstack-domain-id`: Domain to use for authentication (Keystone v3 only). - `--openstack-endpoint-type`: Endpoint type can be `internalURL`, `adminURL`, or `publicURL`. It is a helper for the driver to choose the right URL in the OpenStack service catalog. If not provided the default is `publicURL`. -- `--openstack-floatingip-pool`: The IP pool that will be used to get a public IP can assign it to the machine. If there is an - IP address already allocated but not assigned to any machine, this IP will be chosen and assigned to the machine. If - there is no IP address already allocated, a new IP will be allocated and assigned to the machine. +- `--openstack-floatingip-pool`: The IP pool used to get a public IP can assign it to the machine. If there is an + IP address already allocated but not assigned to any machine, this IP is chosen and assigned to the machine. If + there is no IP address already allocated, a new IP is allocated and assigned to the machine. - `--openstack-keypair-name`: Specify the existing Nova keypair to use. -- `--openstack-insecure`: Explicitly allow openstack driver to perform "insecure" SSL (https) requests. The server's certificate will not be verified against any certificate authorities. This option should be used with caution. -- `--openstack-ip-version`: If the instance has both IPv4 and IPv6 address, you can select IP version. If not provided `4` will be used. -- `--openstack-net-name` or `--openstack-net-id`: Identify the private network the machine will be connected on. If your OpenStack project contains only one private network it will be use automatically. +- `--openstack-insecure`: Explicitly allow openstack driver to perform "insecure" SSL (https) requests. The server's certificate is not verified against any certificate authorities. This option should be used with caution. +- `--openstack-ip-version`: If the instance has both IPv4 and IPv6 address, you can select IP version. If not provided, defaults to `4`. +- `--openstack-net-name` or `--openstack-net-id`: Identify the private network the machine is connected to. If your OpenStack project contains only one private network it is used automatically. - `--openstack-password`: User password. It can be omitted if the standard environment variable `OS_PASSWORD` is set. - `--openstack-private-key-file`: Used with `--openstack-keypair-name`, associates the private key to the keypair. - `--openstack-region`: The region to work on. Can be omitted if there is only one region on the OpenStack. - `--openstack-sec-groups`: If security groups are available on your OpenStack you can specify a comma separated list - to use for the machine (e.g. `secgrp001,secgrp002`). + to use for the machine, such as secgrp001,secgrp002`. - `--openstack-ssh-port`: Customize the SSH port if the SSH server on the machine does not listen on the default port. -- `--openstack-ssh-user`: The username to use for SSH into the machine. If not provided `root` will be used. -- `--openstack-tenant-name` or `--openstack-tenant-id`: Identify the tenant in which the machine will be created. +- `--openstack-ssh-user`: The username to use for SSH into the machine. If not provided defaults to `root`. +- `--openstack-tenant-name` or `--openstack-tenant-id`: Identify the tenant in which the machine is created. - `--openstack-user-data-file`: File containing an OpenStack userdata script. - `--openstack-username`: User identifier to authenticate with. diff --git a/machine/drivers/os-base.md b/machine/drivers/os-base.md index dc4a369bd88..6bab20fe8ba 100644 --- a/machine/drivers/os-base.md +++ b/machine/drivers/os-base.md @@ -46,5 +46,5 @@ provider's image flag and one of its available images. For example, to select a If you change the parent image for a provider, you may also need to change the SSH user. For example, the default Red Hat AMI on EC2 expects the -SSH user to be `ec2-user`, so you would have to specify this with +SSH user to be `ec2-user`, so you need to specify this with `--amazonec2-ssh-user ec2-user`. diff --git a/machine/drivers/rackspace.md b/machine/drivers/rackspace.md index dde4b2ee7a6..2ab5d1b39bc 100644 --- a/machine/drivers/rackspace.md +++ b/machine/drivers/rackspace.md @@ -14,7 +14,7 @@ Create machines on [Rackspace cloud](http://www.rackspace.com/cloud) - `--rackspace-active-timeout`: Rackspace active timeout - `--rackspace-api-key`: **required** Rackspace API key. -- `--rackspace-docker-install`: Set if Docker has to be installed on the machine. +- `--rackspace-docker-install`: Set if Docker needs to be installed on the machine. - `--rackspace-endpoint-type`: Rackspace endpoint type (`adminURL`, `internalURL` or the default `publicURL`). - `--rackspace-flavor-id`: Rackspace flavor ID. Default: General Purpose 1GB. - `--rackspace-image-id`: Rackspace image ID. Default: Ubuntu 16.04 LTS (Xenial Xerus) (PVHVM). @@ -23,7 +23,7 @@ Create machines on [Rackspace cloud](http://www.rackspace.com/cloud) - `--rackspace-ssh-user`: SSH user for the newly booted machine. - `--rackspace-username`: **required** Rackspace account username. -The Rackspace driver will use `821ba5f4-712d-4ec8-9c65-a3fa4bc500f9` (Ubuntu 16.04 LTS) by default. +The Rackspace driver uses `821ba5f4-712d-4ec8-9c65-a3fa4bc500f9` (Ubuntu 16.04 LTS) by default. #### Environment variables and default values diff --git a/machine/drivers/soft-layer.md b/machine/drivers/soft-layer.md index 4ff4e874c39..a4ad5a76b3f 100644 --- a/machine/drivers/soft-layer.md +++ b/machine/drivers/soft-layer.md @@ -18,21 +18,21 @@ You need to generate an API key in the softlayer control panel. - `--softlayer-api-endpoint`: Change SoftLayer API endpoint. - `--softlayer-api-key`: **required** API key for your user account. - `--softlayer-cpu`: Number of CPUs for the machine. -- `--softlayer-disk-size`: A value of `0` will set the SoftLayer default. +- `--softlayer-disk-size`: A value of `0` sets the SoftLayer default. - `--softlayer-domain`: **required** Domain name for the machine. - `--softlayer-hostname`: Hostname for the machine. - `--softlayer-hourly-billing`: Specifies that hourly billing should be used, otherwise monthly billing is used. - `--softlayer-image`: OS Image to use. - `--softlayer-local-disk`: Use local machine disk instead of SoftLayer SAN. - `--softlayer-memory`: Memory for host in MB. -- `--softlayer-network-max-speed`: Speed of network uplinks in Mbps (e.g., 1000, 100, 10). +- `--softlayer-network-max-speed`: Speed of network uplinks in Mbps (1000, 100, 10). - `--softlayer-private-net-only`: Disable public networking. - `--softlayer-private-vlan-id`: Your private VLAN ID. - `--softlayer-public-vlan-id`: Your public VLAN ID. - `--softlayer-region`: SoftLayer region. - `--softlayer-user`: **required** Username for your SoftLayer account, API key needs to match this user. -The SoftLayer driver will use `UBUNTU_LATEST` as the image type by default. +The SoftLayer driver uses `UBUNTU_LATEST` as the image type by default. #### Environment variables and default values diff --git a/machine/drivers/virtualbox.md b/machine/drivers/virtualbox.md index 6473c03343d..63186007ff0 100644 --- a/machine/drivers/virtualbox.md +++ b/machine/drivers/virtualbox.md @@ -6,8 +6,8 @@ title: Oracle VirtualBox Create machines locally using [VirtualBox](https://www.virtualbox.org/). This driver requires VirtualBox 5+ to be installed on your host. -Using VirtualBox 4.3+ should work but will give you a warning. Older versions -will refuse to work. +Using VirtualBox 4.3+ should work but emits a warning. Older versions +do not work. ## Usage @@ -43,27 +43,27 @@ The size of the VM's disk can be configured this way: - `--virtualbox-ui-type`: Specify the UI Type: (gui|sdl|headless|separate) The `--virtualbox-boot2docker-url` flag takes a few different forms. By -default, if no value is specified for this flag, Machine will check locally for -a boot2docker ISO. If one is found, that will be used as the ISO for the +default, if no value is specified for this flag, Machine checks locally for +a boot2docker ISO. If one is found, it is used as the ISO for the created machine. If one is not found, the latest ISO release available on -[boot2docker/boot2docker](https://github.com/boot2docker/boot2docker) will be -downloaded and stored locally for future use. Note that this means you must run +[boot2docker/boot2docker](https://github.com/boot2docker/boot2docker) is +downloaded and stored locally for future use. Therefore, you must run `docker-machine upgrade` deliberately on a machine if you wish to update the "cached" boot2docker ISO. This is the default behavior (when `--virtualbox-boot2docker-url=""`), but the option also supports specifying ISOs by the `http://` and `file://` protocols. -`file://` will look at the path specified locally to locate the ISO: for +`file://` looks at the path specified locally to locate the ISO: for instance, you could specify `--virtualbox-boot2docker-url file://$HOME/Downloads/rc.iso` to test out a release candidate ISO that you have downloaded already. You could also just get an ISO straight from the Internet using the `http://` form. To customize the host only adapter, you can use the `--virtualbox-hostonly-cidr` -flag. This will specify the host IP and Machine will calculate the VirtualBox +flag. This specifies the host IP and Machine calculates the VirtualBox DHCP server address (a random IP on the subnet between `.1` and `.25`) so it does not clash with the specified host IP. -Machine will also specify the DHCP lower bound to `.100` and the upper bound +Machine specifies the DHCP lower bound to `.100` and the upper bound to `.254`. For example, a specified CIDR of `192.168.24.1/24` would have a DHCP server between `192.168.24.2-25`, a lower bound of `192.168.24.100` and upper bound of `192.168.24.254`. @@ -95,6 +95,6 @@ Vboxfs suffers from a [longstanding bug](https://www.virtualbox.org/ticket/9069) causing [sendfile(2)](http://linux.die.net/man/2/sendfile) to serve cached file contents. -This will often cause problems when using a web server such as nginx to serve +This causes problems when using a web server such as Nginx to serve static files from a shared volume. For development environments, a good workaround is to disable sendfile in your server configuration. diff --git a/machine/drivers/vm-cloud.md b/machine/drivers/vm-cloud.md index 3f54edc6641..4904c08a6c8 100644 --- a/machine/drivers/vm-cloud.md +++ b/machine/drivers/vm-cloud.md @@ -4,7 +4,9 @@ keywords: machine, VMware vCloud Air, driver title: VMware vCloud Air --- -Creates machines on [vCloud Air](http://vcloud.vmware.com) subscription service. You need an account within an existing subscription of vCloud Air VPC or Dedicated Cloud. +Creates machines on [vCloud Air](http://vcloud.vmware.com) subscription service. +You need an account within an existing subscription of vCloud Air VPC or +Dedicated Cloud. ## Usage @@ -27,7 +29,7 @@ Creates machines on [vCloud Air](http://vcloud.vmware.com) subscription service. - `--vmwarevcloudair-username`: **required** vCloud Air Username. - `--vmwarevcloudair-vdcid`: Virtual Data Center ID. -The VMware vCloud Air driver will use the `Ubuntu Server 12.04 LTS (amd64 20140927)` image by default. +The VMware vCloud Air driver uses the `Ubuntu Server 12.04 LTS (amd64 20140927)` image by default. #### Environment variables and default values diff --git a/machine/drivers/vsphere.md b/machine/drivers/vsphere.md index 64318397828..7e986a951ec 100644 --- a/machine/drivers/vsphere.md +++ b/machine/drivers/vsphere.md @@ -4,7 +4,10 @@ keywords: machine, VMware vSphere, driver title: VMware vSphere --- -Creates machines on a [VMware vSphere](http://www.vmware.com/products/vsphere) Virtual Infrastructure. The machine must have a working vSphere ESXi installation. You can use a paid license or free 60 day trial license. Your installation may also include an optional VCenter server. +Creates machines on a [VMware vSphere](http://www.vmware.com/products/vsphere) +Virtual Infrastructure. The machine must have a working vSphere ESXi +installation. You can use a paid license or free 60 day trial license. Your +installation may also include an optional VCenter server. ## Usage @@ -17,9 +20,9 @@ Creates machines on a [VMware vSphere](http://www.vmware.com/products/vsphere) V - `--vmwarevsphere-datacenter`: Datacenter for Docker VM (must be set to `ha-datacenter` when connecting to a single host). - `--vmwarevsphere-datastore`: Datastore for Docker VM. - `--vmwarevsphere-disk-size`: Size of disk for Docker VM (in MB). -- `--vmwarevsphere-hostsystem`: vSphere compute resource where the docker VM will be instantiated. This can be omitted if using a cluster with DRS. +- `--vmwarevsphere-hostsystem`: vSphere compute resource where the docker VM is instantiated. This can be omitted if using a cluster with DRS. - `--vmwarevsphere-memory-size`: Size of memory for Docker VM (in MB). -- `--vmwarevsphere-network`: Network where the Docker VM will be attached. +- `--vmwarevsphere-network`: Network where the Docker VM is attached. - `--vmwarevsphere-password`: **required** vSphere Password. - `--vmwarevsphere-pool`: Resource pool for Docker VM. - `--vmwarevsphere-username`: **required** vSphere Username. diff --git a/machine/examples/aws.md b/machine/examples/aws.md index 1201652ae00..46d5052c4cd 100644 --- a/machine/examples/aws.md +++ b/machine/examples/aws.md @@ -17,7 +17,7 @@ from those Docker desktop applications. See Docker Cloud (Edge feature) on [Mac](/docker-for-mac/index.md#docker-cloud-edge-feature) or [Windows](/docker-for-windows/index.md#docker-cloud-edge-feature). > -> Docker Machine will still work as described here, but Docker Cloud +> Docker Machine still works as described here, but Docker Cloud supercedes Machine for this purpose. {: .important} @@ -46,7 +46,7 @@ Follow along with this example to create a Dockerized [Amazon Web Services (AWS) 1. Optionally, create an AWS credential file. You can create an `~/.aws/credentials` file to hold your AWS keys so that - you don't have to type them every time you run the `docker-machine create` + you don't need to type them every time you run the `docker-machine create` command. Here is an example of a credentials file. ```conf diff --git a/machine/examples/index.md b/machine/examples/index.md index f50b1b05513..8ba93f3ada2 100644 --- a/machine/examples/index.md +++ b/machine/examples/index.md @@ -16,7 +16,7 @@ Docker desktop applications. See Docker Cloud (Edge feature) on [Mac](/docker-for-mac/index.md#docker-cloud-edge-feature) or [Windows](/docker-for-windows/index.md#docker-cloud-edge-feature). > -> Docker Machine will still work as described here, but Docker Cloud +> Docker Machine still works as described here, but Docker Cloud supercedes Machine for this purpose. {: .important} diff --git a/machine/examples/ocean.md b/machine/examples/ocean.md index a50358ca182..36342cd1e0b 100644 --- a/machine/examples/ocean.md +++ b/machine/examples/ocean.md @@ -16,7 +16,7 @@ those Docker desktop applications. See Docker Cloud (Edge feature) on [Mac](/docker-for-mac/index.md#docker-cloud-edge-feature) or [Windows](/docker-for-windows/index.md#docker-cloud-edge-feature). > -> Docker Machine will still work as described below, but Docker Cloud +> Docker Machine still works as described below, but Docker Cloud supercedes Machine for this purpose. {: .important} @@ -38,7 +38,7 @@ To generate your access token: ![Generate token](../img/ocean_gen_token.png) -3. Give the token a clever name (e.g. "machine"), make sure the **Write (Optional)** checkbox is checked, and click **Generate Token**. +3. Give the token a descriptive name, make sure the **Write (Optional)** checkbox is checked, and click **Generate Token**. ![Name and generate token](../img/ocean_token_create.png) @@ -46,14 +46,14 @@ To generate your access token: ![Copy and save personal access token](../img/ocean_save_token.png) - This is the personal access token you'll use in the next step to create your cloud server. + This is the personal access token used in the next step to create your cloud server. ### Step 3. Use Machine to create the Droplet 1. Run `docker-machine create` with the `digitalocean` driver and pass your key to the `--digitalocean-access-token` flag, along with a name for the new cloud server. - For this example, we'll call our new Droplet "docker-sandbox". + For this example, the new Droplet is called `docker-sandbox`: ```none $ docker-machine create --driver digitalocean --digitalocean-access-token xxxxx docker-sandbox @@ -86,7 +86,11 @@ To generate your access token: default - virtualbox Running tcp://192.168.99.100:2376 docker-sandbox * digitalocean Running tcp://45.55.139.48:2376 - The new `docker-sandbox` machine is running, and it is the active host as indicated by the asterisk (\*). When you create a new machine, your command shell automatically connects to it. If for some reason your new machine is not the active host, you'll need to run `docker-machine env docker-sandbox`, followed by `eval $(docker-machine env docker-sandbox)` to connect to it. + The new `docker-sandbox` machine is running, and it is the active host as + indicated by the asterisk (\*). When you create a new machine, your command + shell automatically connects to it. If for some reason your new machine is + not the active host, run `docker-machine env docker-sandbox`, followed by + `eval $(docker-machine env docker-sandbox)` to connect to it. ### Step 4. Run Docker commands on the Droplet @@ -143,9 +147,12 @@ To remove a host and all of its containers and images, first stop the machine, t NAME ACTIVE DRIVER STATE URL SWARM default * virtualbox Running tcp:////xxx.xxx.xx.xxx:xxxx -If you monitor the Digital Ocean console while you run these commands, you will see it update first to reflect that the Droplet was stopped, and then removed. +If you monitor the Digital Ocean console while you run these commands, notice +that it updates first to reflect that the Droplet was stopped, and then removed. -If you create a host with Docker Machine, but remove it through the cloud provider console, Machine will lose track of the server status. So please use the `docker-machine rm` command for hosts you create with `docker-machine create`. +If you create a host with Docker Machine, but remove it through the cloud +provider console, Machine loses track of the server status. Use the +`docker-machine rm` command for hosts you create with `docker-machine create`. ## Where to go next diff --git a/machine/get-started-cloud.md b/machine/get-started-cloud.md index a088fef06ff..c6b4c79d6f2 100644 --- a/machine/get-started-cloud.md +++ b/machine/get-started-cloud.md @@ -16,14 +16,14 @@ Docker desktop applications. See Docker Cloud (Edge feature) on [Mac](/docker-for-mac/index.md#docker-cloud-edge-feature) or [Windows](/docker-for-windows/index.md#docker-cloud-edge-feature). > -> Docker Machine will still work as described here, but Docker Cloud supercedes Machine for this purpose. +> Docker Machine still works as described here, but Docker Cloud supercedes Machine for this purpose. {: .important} Docker Machine driver plugins are available for many cloud platforms, so you can use Machine to provision cloud hosts. When you use Docker Machine for provisioning, you create cloud hosts with Docker Engine installed on them. -You'll need to install and run Docker Machine, and create an account with the +Install and run Docker Machine, and create an account with the cloud provider. Then you provide account verification, security credentials, and configuration @@ -70,11 +70,11 @@ specific to the cloud service you are using * `` - name of the host you want to create -For convenience, `docker-machine` will use sensible defaults for choosing +For convenience, `docker-machine` uses sensible defaults for choosing settings such as the image that the server is based on, but you override the -defaults using the respective flags (e.g. `--digitalocean-image`). This is +defaults using the respective flags, such as `--digitalocean-image`. This is useful if, for example, you want to create a cloud server with a lot of memory -and CPUs (by default `docker-machine` creates a small server). +and CPUs, rather than the default behavior of creating smaller servers. For a full list of the flags/settings available and their defaults, see the output of `docker-machine create -h` at the command line, the diff --git a/machine/get-started.md b/machine/get-started.md index 80725fdf96f..02eb9238d6f 100644 --- a/machine/get-started.md +++ b/machine/get-started.md @@ -55,9 +55,9 @@ lightweight macOS virtualization solution built on top of the [Hypervisor.framework](https://developer.apple.com/reference/hypervisor) in macOS 10.10 Yosemite and higher. -Currently, there is no `docker-machine create` driver for HyperKit, so you will -use `virtualbox` driver to create local machines. (See the [Docker Machine -driver for Oracle VirtualBox](drivers/virtualbox.md).) Note that you can run +Currently, there is no `docker-machine create` driver for HyperKit, so +use the `virtualbox` driver to create local machines. (See the [Docker Machine +driver for Oracle VirtualBox](drivers/virtualbox.md).) You can run both HyperKit and Oracle VirtualBox on the same system. To learn more, see [Docker for Mac vs. Docker Toolbox](/docker-for-mac/docker-toolbox/). @@ -71,7 +71,7 @@ Docker for Mac and Docker for Windows both require newer versions of their respective operating systems, so users with older OS versions must use Docker Toolbox. -* If you are using Docker Toolbox on either Mac or an older version Windows system (without Hyper-V), you will use the `virtualbox` driver to create a local +* If you are using Docker Toolbox on either Mac or an older version Windows system (without Hyper-V), use the `virtualbox` driver to create a local machine based on Oracle [VirtualBox](https://www.virtualbox.org/){: target="_blank" class="_"}. (See the [Docker Machine driver for Oracle VirtualBox](drivers/virtualbox.md).) @@ -87,10 +87,10 @@ Microsoft Hyper-V](drivers/hyper-v.md).) to install Docker Machine, VirtualBox is automatically installed. -* If you used the Quickstart Terminal to launch your first machine and set your terminal environment to point to it, a default machine was automatically -created. If this is the case, you can still follow along with these steps, but -create another machine and name it something other than "default" (e.g., staging -or sandbox). +* If you used the Quickstart Terminal to launch your first machine and set your + terminal environment to point to it, a default machine was automatically + created. If so, you can still follow along with these steps, but + create another machine and name it something other than `default`. ## Use Machine to run Docker containers @@ -173,12 +173,12 @@ choose another name for this new machine. $ eval "$(docker-machine env default)" **Note**: If you are using `fish`, or a Windows shell such as - Powershell/`cmd.exe` the above method will not work as described. + Powershell/`cmd.exe`, the above method does not work as described. Instead, see [the `env` command's documentation](/machine/reference/env.md){: target="_blank" class="_"} to learn how to set the environment variables for your shell. This sets environment variables for the current shell that the Docker - client will read which specify the TLS settings. You need to do this + client reads which specify the TLS settings. You need to do this each time you open a new shell or restart your machine. (See also, how to [unset environment variables in the current shell](/machine/get-started.md#unset-environment-variables-in-the-current-shell).) @@ -230,7 +230,7 @@ Run a container with `docker run` to verify your set up.

If you see this page, the nginx web server is successfully installed and working. Further configuration is required.

-

For online documentation and support please refer to +

For online documentation and support, refer to nginx.org.
Commercial support is available at nginx.com.

@@ -239,7 +239,7 @@ Run a container with `docker run` to verify your set up. - You can create and manage as many local VMs running Docker as you please; just run `docker-machine create` again. All created machines will appear in the output of `docker-machine ls`. + You can create and manage as many local VMs running Docker as your local resources permit; just run `docker-machine create` again. All created machines appear in the output of `docker-machine ls`. ## Start and stop machines @@ -250,7 +250,7 @@ If you are finished using a host for the time being, you can stop it with `docke ## Operate on machines without specifying the name -Some `docker-machine` commands will assume that the given operation should be run on a machine named `default` (if it exists) if no machine name is specified. Because using a local VM named `default` is such a common pattern, this allows you to save some typing on the most frequently used Machine commands. +Some `docker-machine` commands assume that the given operation should be run on a machine named `default` (if it exists) if no machine name is specified. Because using a local VM named `default` is such a common pattern, this allows you to save some typing on the most frequently used Machine commands. For example: @@ -355,17 +355,18 @@ shell to talk to different Docker engines. ## Start local machines on startup -In order to ensure that the Docker client is automatically configured at the -start of each shell session, some users like to embed `eval $(docker-machine env -default)` in their shell profiles (e.g., the `~/.bash_profile` file). However, -this fails if the `default` machine is not running. If desired, you can -configure your system to start the `default` machine automatically. +To ensure that the Docker client is automatically configured at the start of +each shell session, you can embed `eval $(docker-machine env default)` in your +shell profiles, by adding it to the `~/.bash_profile` file or the equivalent +configuration file for your shell. However, this fails if a machine called +`default` is not running. You can configure your system to start the `default` +machine automatically. The following example shows how to do this in macOS. -Here is an example of how to configure this on macOS. -Create a file called `com.docker.machine.default.plist` under `~/Library/LaunchAgents` with the following content: +Create a file called `com.docker.machine.default.plist` in the +`~/Library/LaunchAgents/` directory, with the following content: -``` +```xml @@ -389,7 +390,8 @@ Create a file called `com.docker.machine.default.plist` under `~/Library/LaunchA ``` -You can change the `default` string above to make this `LaunchAgent` start any machine(s) you desire. +You can change the `default` string above to make this `LaunchAgent` start a +different machine. ## Where to go next diff --git a/machine/install-machine.md b/machine/install-machine.md index 6e2c2be63fc..e0b52ede6fb 100644 --- a/machine/install-machine.md +++ b/machine/install-machine.md @@ -44,7 +44,7 @@ curl -L https://github.com/docker/machine/releases/download/v{{site.machine_vers chmod +x "$HOME/bin/docker-machine.exe" ``` - > The above command will work on Windows only if you use a + > The above command works on Windows only if you use a terminal emulator such as [Git BASH](https://git-for-windows.github.io/){: target="_blank" class="_"}, which supports Linux commands like `chmod`. {: .important} @@ -103,7 +103,7 @@ To uninstall Docker Machine: and other data related to each virtual machine created by `docker-machine` is stored in `~/.docker/machine/machines/` on Mac and Linux and in `~\.docker\machine\machines\` on Windows. We recommend that you do not edit or -remove those files directly as this will only affect information for the Docker +remove those files directly as this only affects information for the Docker CLI, not the actual VMs, regardless of whether they are local or on remote servers. diff --git a/machine/overview.md b/machine/overview.md index 3552e953762..90f81375eaa 100644 --- a/machine/overview.md +++ b/machine/overview.md @@ -51,7 +51,7 @@ Docker Machine has these two broad use cases. ![Docker Machine on Mac and Windows](img/machine-mac-win.png){: .white-bg} - If you work primarily on an older Mac or Windows laptop or desktop that doesn't meet the requirements for the new [Docker for Mac](/docker-for-mac/index.md) and [Docker for Windows](/docker-for-windows/index.md) apps, then you need Docker Machine in order to "run Docker" (that is, Docker Engine) locally. Installing Docker Machine on a Mac or Windows box with the [Docker Toolbox](/toolbox/overview.md) installer provisions a local virtual machine with Docker Engine, gives you the ability to connect it, and run `docker` commands. + If you work primarily on an older Mac or Windows laptop or desktop that doesn't meet the requirements for the new [Docker for Mac](/docker-for-mac/index.md) and [Docker for Windows](/docker-for-windows/index.md) apps, then you need Docker Machine run Docker Engine locally. Installing Docker Machine on a Mac or Windows box with the [Docker Toolbox](/toolbox/overview.md) installer provisions a local virtual machine with Docker Engine, gives you the ability to connect it, and run `docker` commands. * **I want to provision Docker hosts on remote systems** diff --git a/machine/reference/create.md b/machine/reference/create.md index f8706a1067b..c0155db49a9 100644 --- a/machine/reference/create.md +++ b/machine/reference/create.md @@ -35,7 +35,7 @@ To see how to connect Docker to this machine, run: docker-machine env dev ## Accessing driver-specific flags in the help text -The `docker-machine create` command has some flags which are applicable to all +The `docker-machine create` command has some flags which apply to all drivers. These largely control aspects of Machine's provisioning process (including the creation of Docker Swarm containers) that the user may wish to customize. @@ -121,7 +121,7 @@ Options: You may notice that some flags specify environment variables that they are associated with as well (located to the far left hand side of the row). If these environment variables are set when `docker-machine create` is invoked, -Docker Machine will use them for the default value of the flag. +Docker Machine uses them for the default value of the flag. ## Specifying configuration options for the created Docker engine @@ -139,7 +139,7 @@ that they are running themselves using the `--insecure-registry` flag for the daemon. Docker Machine supports the configuration of such options for the created engines via the `create` command flags which begin with `--engine`. -Note that Docker Machine simply sets the configured parameters on the daemon +Docker Machine only sets the configured parameters on the daemon and does not set up any of the "dependencies" for you. For instance, if you specify that the created daemon should use `btrfs` as a storage driver, you still must ensure that the proper dependencies are installed, the BTRFS @@ -156,7 +156,7 @@ $ docker-machine create -d virtualbox \ foobarmachine ``` -This will create a virtual machine running locally in Virtualbox which uses the +This creates a virtual machine running locally in Virtualbox which uses the `overlay` storage backend, has the key-value pairs `foo=bar` and `spam=eggs` as labels on the engine, and allows pushing / pulling from the insecure registry located at `registry.myco.com`. You can verify much of this by inspecting the @@ -213,8 +213,8 @@ specify arbitrary environment variables to be set within the engine with the syn ## Specifying Docker Swarm options for the created machine -In addition to being able to configure Docker Engine options as listed above, -you can use Machine to specify how the created Swarm master should be +In addition to configuring Docker Engine options as listed above, +you can use Machine to specify how the created swarm manager is configured. There is a `--swarm-strategy` flag, which you can use to specify the [scheduling strategy](/swarm/scheduler/strategy.md) which Docker Swarm should use (Machine defaults to the `spread` strategy). @@ -227,8 +227,8 @@ allows you to access [experimental features](https://github.com/docker/swarm/tre in Docker Swarm. If you're not sure how to configure these options, it is best to not specify -configuration at all. Docker Machine will choose sensible defaults for you and -you won't have to worry about it. +configuration at all. Docker Machine chooses sensible defaults for you and +you don't need to worry about it. Example create: @@ -242,18 +242,18 @@ $ docker-machine create -d virtualbox \ upbeat ``` -This will set the swarm scheduling strategy to "binpack" (pack in containers as +This sets the swarm scheduling strategy to "binpack" (pack in containers as tightly as possible per host instead of spreading them out), and the "heartbeat" interval to 5 seconds. ## Pre-create check -Since many drivers require a certain set of conditions to be in place before -they can successfully perform a create (e.g. VirtualBox should be installed, or -the provided API credentials should be valid), Docker Machine has a "pre-create -check" which is specified at the driver level. +Many drivers require a certain set of conditions to be in place before +machines can be created. For instance, VirtualBox needs to be installed before +the `virtualbox` driver can be used. For this reason, Docker Machine has a +"pre-create check" which is specified at the driver level. -If this pre-create check succeeds, Docker Machine will proceed with the creation -as normal. If the pre-create check fails, the Docker Machine process will exit +If this pre-create check succeeds, Docker Machine proceeds with the creation +as normal. If the pre-create check fails, the Docker Machine process exits with status code 3 to indicate that the source of the non-zero exit was the pre-create check failing. diff --git a/machine/reference/env.md b/machine/reference/env.md index ba786724aae..de68111b7c6 100644 --- a/machine/reference/env.md +++ b/machine/reference/env.md @@ -25,8 +25,8 @@ Options: --no-proxy Add machine IP to NO_PROXY environment variable ``` -`docker-machine env machinename` will print out `export` commands which can be -run in a subshell. Running `docker-machine env -u` will print `unset` commands +`docker-machine env machinename` prints out `export` commands which can be +run in a subshell. Running `docker-machine env -u` prints `unset` commands which reverse this effect. ```none @@ -37,7 +37,7 @@ DOCKER_HOST=tcp://192.168.99.101:2376 DOCKER_CERT_PATH=/Users/nathanleclaire/.docker/machines/.client DOCKER_TLS_VERIFY=1 DOCKER_MACHINE_NAME=dev -$ # If you run a docker command, now it will run against that host. +$ # If you run a docker command, now it runs against that host. $ eval "$(docker-machine env -u)" $ env | grep DOCKER $ # The environment variables have been unset. @@ -50,7 +50,7 @@ supports. Docker Machine detects the shells available in your environment and li Docker supports `bash`, `cmd`, `powershell`, and `emacs`. If you are using `fish` and the `SHELL` environment variable is correctly set to -the path where `fish` is located, `docker-machine env name` will print out the +the path where `fish` is located, `docker-machine env name` prints out the values in the format which `fish` expects: ```none @@ -93,12 +93,12 @@ set DOCKER_MACHINE_NAME=dev ## Excluding the created machine from proxies -The env command supports a `--no-proxy` flag which will ensure that the created +The env command supports a `--no-proxy` flag which ensures that the created machine's IP address is added to the [`NO_PROXY`/`no_proxy` environment variable](https://wiki.archlinux.org/index.php/Proxy_settings). -This is useful when using `docker-machine` with a local VM provider (e.g. -`virtualbox` or `vmwarefusion`) in network environments where an HTTP proxy is +This is useful when using `docker-machine` with a local VM provider, such as +`virtualbox` or `vmwarefusion`, in network environments where an HTTP proxy is required for internet access. ```none diff --git a/machine/reference/inspect.md b/machine/reference/inspect.md index bcd62e6a301..3823aa6d6c5 100644 --- a/machine/reference/inspect.md +++ b/machine/reference/inspect.md @@ -16,8 +16,8 @@ Options: --format, -f Format the output using the given go template. ``` -By default, this will render information about a machine as JSON. If a format is -specified, the given template will be executed for each result. +By default, this renders information about a machine as JSON. If a format is +specified, the given template is executed for each result. Go's [text/template](http://golang.org/pkg/text/template/) package describes all the details of the format. diff --git a/machine/reference/ls.md b/machine/reference/ls.md index 5cb5a3d4279..eac9ae64373 100644 --- a/machine/reference/ls.md +++ b/machine/reference/ls.md @@ -20,7 +20,7 @@ Options: ## Timeout The `ls` command tries to reach each host in parallel. If a given host does not -answer in less than 10 seconds, the `ls` command will state that this host is in +answer in less than 10 seconds, the `ls` command states that this host is in `Timeout` state. In some circumstances (poor connection, high load, or while troubleshooting), you may want to increase or decrease this value. You can use the -t flag for this purpose with a numerical value in seconds. @@ -36,7 +36,8 @@ default - virtualbox Running tcp://192.168.99.100:2376 v1 ## Filtering The filtering flag (`--filter`) format is a `key=value` pair. If there is more -than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`) +than one filter, then pass multiple flags. For example: +`--filter "foo=bar" --filter "bif=baz"` The currently supported filters are: @@ -72,7 +73,7 @@ foo2 * virtualbox Running tcp://192.168.99.107:2376 v1.9. ## Formatting -The formatting option (`--format`) will pretty-print machines using a Go template. +The formatting option (`--format`) pretty-prints machines using a Go template. Valid placeholders for the Go template are listed below: @@ -90,8 +91,8 @@ Valid placeholders for the Go template are listed below: | .DockerVersion | Docker Daemon version | | .ResponseTime | Time taken by the host to respond | -When using the `--format` option, the `ls` command will either output the data exactly as the template declares or, -when using the table directive, will include column headers as well. +When using the `--format` option, the `ls` command either outputs the data exactly as the template declares or, +when using the table directive, includes column headers as well. The following example uses a template without headers and outputs the `Name` and `Driver` entries separated by a colon for all running machines: diff --git a/machine/reference/mount.md b/machine/reference/mount.md index 07807f6f9c0..e126be77449 100644 --- a/machine/reference/mount.md +++ b/machine/reference/mount.md @@ -23,7 +23,7 @@ bar Now you can use the directory on the machine, for mounting into containers. -Any changes done in the local directory, will be reflected in the machine too. +Any changes done in the local directory, is reflected in the machine too. ```none $ eval $(docker-machine env dev) @@ -46,5 +46,5 @@ You can also call `fuserunmount` (or `fusermount -u`) commands directly. $ docker-machine mount -u dev:/home/docker/foo foo $ rmdir foo ``` -**Note that files are actually being stored on the machine, *not* on the host.** +**Files are actually being stored on the machine, *not* on the host.** So make sure to make a copy of any files you want to keep, before removing it! diff --git a/machine/reference/provision.md b/machine/reference/provision.md index 23a37172fd9..98d20202f16 100644 --- a/machine/reference/provision.md +++ b/machine/reference/provision.md @@ -23,8 +23,8 @@ Setting Docker configuration on the remote daemon... The Machine provisioning process will: -1. Set the hostname on the instance to the name Machine addresses it by (e.g. - `default`). +1. Set the hostname on the instance to the name Machine addresses it by, such + as `default`. 2. Install Docker if it is not present already. 3. Generate a set of certificates (usually with the default, self-signed CA) and configure the daemon to accept connections over TLS. diff --git a/machine/reference/rm.md b/machine/reference/rm.md index 49b2fcd8fcc..272f75d0a19 100644 --- a/machine/reference/rm.md +++ b/machine/reference/rm.md @@ -4,7 +4,7 @@ keywords: machine, rm, subcommand title: docker-machine rm --- -Remove a machine. This will remove the local reference as well as delete it +Remove a machine. This removese the local reference and deletes it on the cloud provider or virtualization management platform. ```none diff --git a/machine/reference/scp.md b/machine/reference/scp.md index f27e4cac590..a79225e46da 100644 --- a/machine/reference/scp.md +++ b/machine/reference/scp.md @@ -8,7 +8,7 @@ Copy files from your local host to a machine, from machine to machine, or from a machine to your local host using `scp`. The notation is `machinename:/path/to/files` for the arguments; in the host -machine's case, you don't have to specify the name, just the path. +machine's case, you don't need to specify the name, just the path. ## Example @@ -51,16 +51,14 @@ baz When you copy files to a remote server with `docker-machine scp` for app deployment, make sure `docker-compose` and the Docker daemon know how to find -them. You can specify absolute paths, e.g. `/home/myuser/workspace` in a -[Compose file](/compose/compose-file/index.md), which will be mounted into the -container at `/workspace`, from the absolute path on the remote host where the -Docker daemon is running. Local client paths (e.g., on your laptop) will not -work for daemons running on a remote machine, so avoid using relative paths. +them. Avoid using relative paths, but specify absolute paths in +[Compose files](/compose/compose-file/index.md). It's best to specify absolute +paths both for the location on the Docker daemon and within the container. For example, imagine you want to transfer your local directory `/Users/londoncalling/webapp` to a remote machine and bind mount it into a -container on the remote host. (We'll suppose the remote user is `ubuntu`.) You -could do something like this: +container on the remote host. If the remote user is `ubuntu`, use a command like +this: ```none $ docker-machine scp -r /Users/londoncalling/webapp MACHINE-NAME:/home/ubuntu/webapp diff --git a/machine/reference/ssh.md b/machine/reference/ssh.md index 5cf755a06b7..091a7f41a5c 100644 --- a/machine/reference/ssh.md +++ b/machine/reference/ssh.md @@ -42,7 +42,7 @@ Mem: 1023556 183136 840420 0 30920 Swap: 1212036 0 1212036 ``` -Commands with flags will work as well: +Commands with flags work as well: ```none $ docker-machine ssh dev df -h @@ -61,7 +61,7 @@ If you are using the "external" SSH type as detailed in the next section, you can include additional arguments to pass through to the `ssh` binary in the generated command (unless they conflict with any of the default arguments for the command generated by Docker Machine). For instance, the following command -will forward port 8080 from the `default` machine to `localhost` on your host +forwards port 8080 from the `default` machine to `localhost` on your host computer: ```bash @@ -70,18 +70,18 @@ $ docker-machine ssh default -L 8080:localhost:8080 ## Different types of SSH -When Docker Machine is invoked, it will check to see if you have the venerable -`ssh` binary around locally and will attempt to use that for the SSH commands it +When Docker Machine is invoked, it checks to see if you have the venerable +`ssh` binary around locally and attempts to use that for the SSH commands it needs to run, whether they are a part of an operation such as creation or have been requested by the user directly. If it does not find an external `ssh` -binary locally, it will default to using a native Go implementation from +binary locally, it defaults to using a native Go implementation from [crypto/ssh](https://godoc.org/golang.org/x/crypto/ssh). This is useful in situations where you may not have access to traditional UNIX tools, such as if you are using Docker Machine on Windows without having msysgit installed alongside of it. -In most situations, you will not have to worry about this implementation detail -and Docker Machine will act sensibly out of the box. However, if you +In most situations, you do not need to worry about this implementation detail +and Docker Machine acts sensibly out of the box. However, if you deliberately want to use the Go native version, you can do so with a global command line flag / environment variable like so: @@ -89,5 +89,5 @@ command line flag / environment variable like so: $ docker-machine --native-ssh ssh dev ``` -There are some variations in behavior between the two methods, so please report +There are some variations in behavior between the two methods, so report any issues or inconsistencies if you come across them. \ No newline at end of file diff --git a/machine/reference/upgrade.md b/machine/reference/upgrade.md index cad3d6bdbde..2b93f195cb1 100644 --- a/machine/reference/upgrade.md +++ b/machine/reference/upgrade.md @@ -8,9 +8,9 @@ Upgrade a machine to the latest version of Docker. How this upgrade happens depends on the underlying distribution used on the created instance. For example, if the machine uses Ubuntu as the underlying operating system, it -will run a command similar to `sudo apt-get upgrade docker-engine`, because +runs a command similar to `sudo apt-get upgrade docker-engine`, because Machine expects Ubuntu machines it manages to use this package. As another -example, if the machine uses boot2docker for its OS, this command will download +example, if the machine uses boot2docker for its OS, this command downloads the latest boot2docker ISO and replace the machine's existing ISO with the latest. @@ -26,5 +26,5 @@ Waiting for VM to start... > **Note**: If you are using a custom boot2docker ISO specified using > `--virtualbox-boot2docker-url` or an equivalent flag, running an upgrade on -> that machine will completely replace the specified ISO with the latest +> that machine completely replaces the specified ISO with the latest > "vanilla" boot2docker ISO available. \ No newline at end of file diff --git a/notary/advanced_usage.md b/notary/advanced_usage.md index 20fa99ff5c9..b9543e8e2b3 100644 --- a/notary/advanced_usage.md +++ b/notary/advanced_usage.md @@ -11,7 +11,7 @@ their own Notary service. Make sure you have first read and understood how to ## An important note about the examples This document's command examples omit the `-s` and `-d` flags. If you do not -know what these options do, please read the [Getting +know what these options do, read the [Getting Started](getting_started.md) docs or run `notary --help` before continuing. Once you understand what these flags do, you must provide your own values for these options while following this document. You can also configure these options, see @@ -21,15 +21,15 @@ options while following this document. You can also configure these options, see Before adding and signing content to a collection, you must first initialize that collection. -``` +```bash $ notary init example.com/collection No root keys found. Generating a new root key... You are about to create a new root signing key passphrase. This passphrase -will be used to protect the most sensitive key in your signing system. Please -choose a long, complex passphrase and be careful to keep the password and the +is used to protect the most sensitive key in your signing system. +Choose a long, complex passphrase and be careful to keep the password and the key file itself secure and backed up. It is highly recommended that you use a -password manager to generate the passphrase and keep it safe. There will be no +password manager to generate the passphrase and keep it safe. There is no way to recover this key. You can find the key in your config directory. Enter passphrase for new root key with ID 1f54328: Repeat passphrase for new root key with ID 1f54328: @@ -37,35 +37,49 @@ Enter passphrase for new targets key with ID 1df39fc (example.com/collection): Repeat passphrase for new targets key with ID 1df39fc (example.com/collection): ``` -Initializing a trusted collection will generate the following items; all keys use +Initializing a trusted collection generates the following items; all keys use asymmetric algorithms, but there is no requirement that they all use the _same_ algorithm: -- If no root key is found, an initial root key will be generated. This key will be used as the default root of trust for all your trusted collections. -- A targets key and a snapshot key. The same password is used to encrypt both of these as the security profile of them (when both held by the author of the trusted collection) is identical. This is why you will not be asked for a snapshot key password. -- A timestamp key. This is generated by the server on a request from the client, returning just the public key. The server holds the private key and will sign timestamps on behalf of the user. -- Stub signed notary metadata. This stages the base version of the trust metadata for the collection. It will be finalized when it is published to the server. +- If no root key is found, an initial `root` key is generated. This key is + used as the default root of trust for all your trusted collections. +- A `targets` key and a `snapshot` key. The same password encrypts both of these + as the security profile of them (when both held by the author of the trusted + collection) is identical. This is why are not prompted for a snapshot key + password. +- A `timestamp` key. This is generated by the server on a request from the + client, returning just the public key. The server holds the private key and + signs timestamps on behalf of the user. +- Stub signed notary metadata. This stages the base version of the trust + metadata for the collection. It is finalized when it is published to the + server. ## Add and remove Targets -It's simple to add targets to a trusted collection with notary CLI: +To add targets to a trusted collection with notary CLI: -``` +```bash $ notary add example.com/collection v1 my_file.txt ``` -The above command adds the local file `my_file.txt` (this file must exist relative to the current working directory) under the target name `v1` to the `example.com/collection` collection we set up. The contents of the local file are not actually added to the collection - a "target" consists of the -file path and one or more checksums of the contents. +This adds the local file `my_file.txt` (which must exist relative to the +current working directory), under the target name `v1`, to the +`example.com/collection` collection we set up. The contents of the local file +are not actually added to the collection - a "target" consists of the file path +and one or more checksums of the contents. -Note that this is an offline command, and we must run a `notary publish example.com/collection` for the add to take effect. +This is an offline command, and we must run a +`notary publish example.com/collection` for the add to take effect. -To remove targets, we use the `notary remove` command, specifying the GUN and target name. +To remove targets, we use the `notary remove` command, specifying the GUN and +target name. -``` +```bash $ notary remove example.com/collection v1 ``` -Removing a target is also an offline command that requires a `notary publish example.com/collection` to take effect. +Removing a target is also an offline command that requires a +`notary publish example.com/collection` to take effect. ## Manage keys @@ -82,26 +96,32 @@ subsection. ### Rotate keys -In case of potential compromise, notary provides a CLI command for rotating keys. Currently, you can use the `notary key rotate` command to rotate the targets or snapshot keys. +In case of potential compromise, notary provides a CLI command for rotating keys. +You can use the `notary key rotate` command to rotate the targets or snapshot +keys. While the snapshot key is managed by the notary client by default, use the `notary key rotate snapshot -r` command to rotate the snapshot key to the server, such that the -notary server will then sign snapshots. This is particularly useful when using -delegations with a trusted collection, so that delegates will never need access to the +notary server can sign snapshots. This is particularly useful when using +delegations with a trusted collection, so that delegates never need access to the snapshot key to push their updates to the collection. -Note that new collections created by a Docker 1.11 Engine client will have the server manage the snapshot key by default. -To reclaim control of the snapshot key on the client, use the `notary key rotate` command without the `-r` flag. +New collections created by a Docker 1.11 Engine client cause the server +manage to the snapshot key by default. To reclaim control of the snapshot key on +the client, use the `notary key rotate` command without the `-r` flag. -The targets key must be locally managed - to rotate the targets key, for instance in case of compromise, use the `notary key rotate targets` command without the `-r` flag. +The targets key must be locally managed - to rotate the targets key, for +instance in case of compromise, use the `notary key rotate targets` command +without the `-r` flag. ### Use a Yubikey Notary can be used with [Yubikey 4](https://www.yubico.com/products/yubikey-hardware/yubikey4/){: target="_blank" class="_"} keys, via a PKCS11 interface when the Yubikey has CCID mode enabled. -The Yubikey will be prioritized to store root keys, and will require user touch-input for signing. -Note that Yubikey support is included with the Docker Engine 1.11 client for use with Docker Content Trust. +The Yubikey is prioritized to store root keys, and requires user +touch-input for signing. Yubikey support is included with the Docker Engine 1.11 +client for use with Docker Content Trust. Yubikey support requires [Yubico PIV libraries](https://www.yubico.com/support/knowledge-base/categories/downloads/){: target="_blank" class="_"} @@ -120,25 +140,25 @@ targets key or allow a collaborator write access to all targets of the collection. Before adding any delegations, you should rotate the snapshot key to the server. -Note that this is done by default for new collections created with a Docker Engine 1.11 client. -This is such that delegation roles will not require the snapshot key to publish +This is done by default for new collections created with a Docker Engine 1.11 client. +Delegation roles do not require the snapshot key to publish their own targets to the collection, since the server can publish the valid snapshot with the delegation targets: -``` +```bash $ notary key rotate example.com/collection snapshot -r ``` Here, `-r` specifies to rotate the key to the remote server. When adding a delegation, your must acquire a x509 certificate with the public -key of the user you wish to delegate to. The user who will assume this +key of the user you wish to delegate to. The user who assumes this delegation role must hold the private key to sign content with notary. Once you've acquired the delegate's x509 certificate, you can add a delegation for this user: -``` +```bash $ notary delegation add example.com/collection targets/releases cert.pem --paths="delegation/path" ``` @@ -152,14 +172,14 @@ For the `targets/releases` delegation role to sign content, the delegation user must possess the private key corresponding to this public key. This command restricts this delegation to only publish content under pathnames prefixed by `delegation/path`. With the given path of "delegation/path", the `targets/releases` -role would be able to sign paths like "delegation/path/content.txt", "delegation/path_file.txt" +role could sign paths like "delegation/path/content.txt", "delegation/path_file.txt" and "delegation/path.txt". You can add more paths in a comma-separated list under `--paths`, or pass the `--all-paths` flag to allow this delegation to publish content under any pathname. After publishing, you can view delegations using a list command: -``` +```bash $ notary delegation list example.com/collection ROLE PATHS KEY IDS THRESHOLD @@ -171,7 +191,7 @@ You can see the `targets/releases` with its paths and key IDs. If you wish to mo A threshold of `1` indicates that only one of the keys specified in `KEY IDS` is required to publish to this delegation. Thresholds other than 1 are not currently supported. To remove a delegation role entirely, or just individual keys and/or paths, use the `notary delegation remove` command: -``` +```bash $ notary delegation remove example.com/user targets/releases Are you sure you want to remove all data for this delegation? (yes/no) @@ -182,7 +202,7 @@ Forced removal (including all keys and paths) of delegation role targets/release You can remove individual keys and/or paths by passing keys as arguments, and/or paths under the `--paths` flag. Use `--all-paths` to clear all paths for this -role. If you specify all key IDs currently in the delegation role, you will +role. If you specify all key IDs currently in the delegation role, you delete the role entirely. To add targets to a specified delegation role, we can use the `notary add` @@ -193,7 +213,7 @@ you can run `notary key import --role user` with the private key PEM file, or drop the private key PEM in `private/tuf_keys` as `.key` with the `role` PEM header set to `user`. -``` +```bash $ notary add example/collections delegation/path/target delegation_file.txt --roles=targets/releases ``` @@ -210,7 +230,7 @@ the base `targets` role. To remove this target from our delegation, use the `notary remove` command with the same flag: -``` +```bash $ notary remove example/collections delegation/path/target --roles=targets/releases ``` @@ -220,21 +240,21 @@ Docker Engine 1.10 and above supports the usage of the `targets/releases` delegation as the canonical source of a trusted image tag, if it exists. When running `docker pull` with Docker Content Trust on Docker Engine 1.10, -Docker will attempt to search the `targets/releases` role for the signed image tag, -and will fall back to the default `targets` role if it does not exist. Please note -that when searching the default `targets` role, Docker 1.10 may pick up on other +Docker attempts to search the `targets/releases` role for the signed image tag, +and falls back to the default `targets` role if it does not exist. When +searching the default `targets` role, Docker 1.10 may pick up on other non-`targets/releases` delegation roles' signed images if they exist for this tag. In Docker 1.11, this behavior is changed such that all `docker pull` commands with Docker Content Trust must pull tags only signed by the `targets/releases` delegation role or the `targets` base role. -When running `docker push` with Docker Content Trust, Docker Engine 1.10 will -attempt to sign and push with the `targets/releases` delegation role if it exists, -otherwise falling back to the `targets` role. In Docker 1.11, a `docker push` will -instead attempt to sign and push with all delegation roles directly under targets +When running `docker push` with Docker Content Trust, Docker Engine 1.10 +attempts to sign and push with the `targets/releases` delegation role if it exists, +otherwise falling back to the `targets` role. In Docker 1.11, a `docker push` +attempts to sign and push with all delegation roles directly under targets (ex: `targets/role` but not `targets/nested/role`) that the user has signing keys for. -If delegation roles exist but the user does not have signing keys, the push will fail. -If no delegation roles exist, the push will attempt to sign with the base `targets` role. +If delegation roles exist but the user does not have signing keys, the push fails. +If no delegation roles exist, the push attempts to sign with the base `targets` role. To use the `targets/releases` role for pushing and pulling images with content trust, follow the steps above to add and publish the delegation role with notary. diff --git a/notary/getting_started.md b/notary/getting_started.md index 76ebc5c118c..2e85e4e12d2 100644 --- a/notary/getting_started.md +++ b/notary/getting_started.md @@ -5,9 +5,9 @@ title: Getting started with Docker Notary --- This document describes basic use of the Notary CLI as a tool supporting Docker -Content Trust. For more advanced use cases, you must [run your own Notary -service](running_a_service.md) and should read the [use the Notary client for -advanced users](advanced_usage.md) documentation. +Content Trust. For more advanced use cases, you must +[run your own Notary service](running_a_service.md). Read the +[use the Notary client for advanced users](advanced_usage.md) documentation. ## What is Notary @@ -17,7 +17,7 @@ and origin of content. This ability is built on a straightforward key management and signing interface to create signed collections and configure trusted publishers. With Notary anyone can provide trust over arbitrary collections of data. Using -The Update Framework (TUF) +[The Update Framework (TUF)](https://www.theupdateframework.com/) as the underlying security framework, Notary takes care of the operations necessary to create, manage, and distribute the metadata necessary to ensure the integrity and freshness of your content. @@ -26,10 +26,9 @@ freshness of your content. You can download precompiled notary binary for 64 bit Linux or macOS from the Notary repository's -releases page on -GitHub. Windows is not officially -supported, but if you are a developer and Windows user, we would appreciate any -insight you can provide regarding issues. +[Releases page on Github](https://github.com/docker/notary/releases). +Windows is not officially tested, but if you are a developer and Windows user, +we would appreciate any insight you can provide regarding issues. ## Understand Notary naming @@ -39,9 +38,9 @@ when interacting with Docker Hub through the Notary client. When specifying Docker image names for the Notary client, the GUN format is: - For official images (identifiable by the "Official Repository" moniker), the -image name as displayed on Docker Hub, prefixed with `docker.io/library/`. For -example, if you would normally type `docker pull ubuntu` you must enter `notary -{cmd} docker.io/library/ubuntu`. + image name as displayed on Docker Hub, prefixed with `docker.io/library/`. For + example, if you would normally type `docker pull ubuntu` you must enter `notary + {cmd} docker.io/library/ubuntu`. - For all other images, the image name as displayed on Docker Hub, prefixed by `docker.io`. The Docker Engine client takes care of these name expansions for you so do not @@ -58,7 +57,8 @@ tell the client which repository server it should communicate with. The official Docker Hub Notary servers are located at `https://notary.docker.io`. If you would like to use your own Notary server, -it is important to use the same or a newer Notary version +it is important to use the same or a newer +[Notary version](https://github.com/docker/notary/releases), as the client for feature compatibility (ex: client version 0.2, server/signer version >= 0.2). Additionally, Notary stores your own signing keys, and a cache of previously downloaded trust metadata in a directory, provided @@ -68,7 +68,7 @@ found at `.docker/trust` within the calling user's home directory (failing to use this directory may result in errors when publishing updates to your trust data): -``` +```bash $ notary -s https://notary.docker.io -d ~/.docker/trust list docker.io/library/alpine NAME DIGEST SIZE (BYTES) ROLE ------------------------------------------------------------------------------------------------------ @@ -92,9 +92,9 @@ collaborators. When you run a `docker pull` command, Docker Engine is using an integrated Notary library (the same one as Notary CLI) to request the mapping of tag to sha256 digest for the one tag you are interested in (or if you passed the -`--all` flag, the client will use the list operation to efficiently retrieve all +`--all` flag, the client uses the list operation to efficiently retrieve all the mappings). Having validated the signatures on the trust data, the client -will then instruct the Engine to do a "pull by digest". During this pull, the +instructs the Engine to do a "pull by digest". During this pull, the Engine uses the sha256 checksum as a content address to request and validate the image manifest from the Docker registry. @@ -103,11 +103,11 @@ image manifest from the Docker registry. Notary generates and stores signing keys on the host it's running on. This means that the Docker Hub cannot delete tags from the trust data, they must be deleted using the Notary client. You can do this with the `notary remove` command. -Again, you must direct it to speak to the correct Notary server (N.B. neither -you nor the author has permissions to delete tags from the official alpine -repository, the output below is for demonstration only): +Again, you must direct it to speak to the correct Notary server. Neither +you nor the author has permissions to delete tags from the official `alpine` +repository, so the output below is for demonstration only: -``` +```bash $ notary -s https://notary.docker.io -d ~/.docker/trust remove docker.io/library/alpine 2.6 Removal of 2.6 from docker.io/library/alpine staged for next publish. ``` @@ -119,11 +119,11 @@ a `notary publish` is run for that repository. You can see a pending change by running `notary status` for the modified repository. The `status` subcommand is an offline operation and as such, does -not require the `-s` flag, however it will silently ignore the flag if provided. +not require the `-s` flag, however it silently ignores the flag if provided. Failing to provide the correct value for the `-d` flag may show the wrong (probably empty) change list: -``` +```bash $ notary -d ~/.docker/trust status docker.io/library/alpine Unpublished changes for docker.io/library/alpine: @@ -135,12 +135,12 @@ $ notary -s https://notary.docker.io publish docker.io/library/alpine ## Configure the client -It is verbose and tedious to always have to provide the `-s` and `-d` flags +It is verbose and tedious to always provide the `-s` and `-d` flags manually to most commands. A simple way to create preconfigured versions of the Notary command is via aliases. Add the following to your `.bashrc` or equivalent: -``` +```bash alias dockernotary="notary -s https://notary.docker.io -d ~/.docker/trust" ``` diff --git a/notary/reference/client-config.md b/notary/reference/client-config.md index 46fb350e5b8..439d9a3f8ca 100644 --- a/notary/reference/client-config.md +++ b/notary/reference/client-config.md @@ -13,11 +13,11 @@ but the path to a different configuration file can be specified using the ## Overview of the file -In addition to the configuration file format, please see the optional password +In addition to the configuration file format, see the optional password [environment variables](client-config.md#environment-variables-optional) that the Notary client can take for ease of use. -Here is a full client configuration file example; please click on the top level +Here is a full client configuration file example; click on the top level JSON keys to learn more about the configuration section corresponding to that key: @@ -41,12 +41,12 @@ JSON keys to learn more about the configuration section corresponding to that ke The `trust_dir` specifies the location (as an absolute path or a path relative to the directory of the configuration file) where the TUF metadata -and private keys will be stored. +and private keys are stored. This is normally defaults to `~/.notary`, but specifying `~/.docker/trust` facilitates interoperability with content trust. -Note that this option can be overridden with the command line flag `--trustDir`. +This option can be overridden with the command line flag `--trustDir`. ## remote_server section (optional) @@ -116,21 +116,19 @@ Remote server example: The `trust_pinning` specifies how to bootstrap trust for the root of a Notary client's trusted collection. -This section is optional, Notary will use TOFU over HTTPS by default and +This section is optional, Notary uses TOFU over HTTPS by default and trust certificates in the downloaded root file. In this section, one can provide specific certificates to pin to, or a CA to pin to as a root of trust for a GUN. Multiple sections can be specified, -but the pinned certificates will take highest priority for validation, followed +but the pinned certificates take highest priority for validation, followed by the pinned CA, followed by TOFUS (TOFU over HTTPS). The diagram below describes this validation flow: -
- -
+![validation flow](https://cdn.rawgit.com/docker/notary/27469f01fe244bdf70f34219616657b336724bc3/docs/images/trust-pinning-flow.png") -Only one trust pinning option will be used to validate a GUN even if multiple -sections are specified, and any validation failure will result in a failed +Only one trust pinning option is used to validate a GUN even if multiple +sections are specified, and any validation failure results in a failed bootstrapping of the repo. @@ -167,8 +165,8 @@ bootstrapping of the repo. The following environment variables containing signing key passphrases can be used to facilitate [Notary client CLI interaction](../advanced_usage.md). -If provided, these passwords will be used initially to sign TUF metadata. -If the passphrase is incorrect, you will be prompted to enter the correct +If provided, these passwords are used initially to sign TUF metadata. +If the passphrase is incorrect, you are prompted to enter the correct passphrase. @@ -180,5 +178,5 @@ passphrase. |`NOTARY_DELEGATION_PASSPHRASE` | The delegation (an online) key passphrase | -Please note that if provided, the passphrase in `NOTARY_DELEGATION_PASSPHRASE` -will be attempted for all delegation roles that notary attempts to sign with. +If provided, the passphrase in `NOTARY_DELEGATION_PASSPHRASE` +is tried for all delegation roles that notary attempts to sign with. diff --git a/notary/reference/common-configs.md b/notary/reference/common-configs.md index ec9b076478e..b00b7364f61 100644 --- a/notary/reference/common-configs.md +++ b/notary/reference/common-configs.md @@ -25,7 +25,7 @@ Example: } ``` -Note that this entire section is optional. However, if you would like to +This entire section is optional. However, if you would like to specify a different log level, then you need the required parameters below to configure it. @@ -62,7 +62,7 @@ about these configuration parameters. } ``` -Note that this entire section is optional. If you want to report errors to +This entire section is optional. If you want to report errors to Bugsnag, then you need to include a `bugsnag` subsection, along with the required parameters below, to configure it. diff --git a/notary/reference/server-config.md b/notary/reference/server-config.md index 0ff9fc2dffe..70dd5ff1533 100644 --- a/notary/reference/server-config.md +++ b/notary/reference/server-config.md @@ -15,10 +15,11 @@ line. Notary server also allows you to [increase/decrease](server-config.md#hot-logging-level-reload) the logging level without having to restart. -Here is a full server configuration file example; please click on the top level JSON keys to +Here is a full server configuration file example; click on the top level JSON keys to learn more about the configuration section corresponding to that key: -
{
+```json
+{
   "server": {
     "http_addr": ":4443",
     "tls_key_file": "./fixtures/notary-server.key",
@@ -65,7 +66,7 @@ learn more about the configuration section corresponding to that key:
     "gun_prefixes": ["docker.io/", "my-own-registry.com/"]
   }
 }
-
+``` ## server section (required) @@ -94,7 +95,7 @@ Example: hence all interfaces, such as those listed when you run ifconfig)
  • "127.0.0.1:4443" means listen on port 4443 on - localhost only. That means that the server will not be + localhost only. That means that the server is not accessible except locally (via SSH tunnel, or just on a local terminal)
  • @@ -105,7 +106,7 @@ Example: @@ -114,7 +115,7 @@ Example: @@ -235,7 +236,7 @@ DB storage example: + ( include parseTime=true as part of the DSN)
    no The path to the private key to use for HTTPS. Must be provided together with tls_cert_file, - or not at all. If neither are provided, the server will use HTTP + or not at all. If neither are provided, the server uses HTTP instead of HTTPS. The path is relative to the directory of the configuration file.
    no The path to the certificate to use for HTTPS. Must be provided together with tls_key_file, or not - at all. If neither are provided, the server will use HTTP instead + at all. If neither are provided, the server uses HTTP instead of HTTPS. The path is relative to the directory of the configuration file.
    yes if not memory The the Data Source Name used to access the DB. - (note: please include parseTime=true as part of the DSN)
    @@ -259,15 +260,14 @@ Example: } ``` -Note that this entire section is optional. However, if you would like +This entire section is optional. However, if you would like authentication for your server, then you need the required parameters below to configure it. **Token authentication:** This is an implementation of the same authentication used by version 2 of the -Docker registry. (JWT token-based -authentication post login.) +[Docker Registry](https://github.com/docker/distribution). @@ -278,13 +278,13 @@ authentication post login.) - + - @@ -315,10 +315,10 @@ Example: -
    type yesMust be "token"; all other values will result in no - authentication (and the rest of the parameters will be ignored)Must be "token"; all other values result in no + authentication (and the rest of the parameters are ignored)
    options yesThe options for token auth. Please see + The options for token auth. See the registry token configuration documentation for the parameter details.no The max age, in seconds, for caching services to cache the latest metadata for a role and the metadata by checksum for a - role. This value will be set on the cache control headers for + role. This value is set on the cache control headers for GET-ting metadata. - Note that `must-revalidate` is also set on the cache control headers + `must-revalidate` is also set on the cache control headers for current metadata, as current metadata may change whenever new metadata is signed into a repo. @@ -347,15 +347,16 @@ Example:
    gun_prefixes noA list of GUN prefixes that will be accepted by this + A list of GUN prefixes accepted by this server. POST operations on an image beginning with any other prefix - will be rejected with a 400, and GET/DELETE operations will be rejected + are rejected with a 400, and GET/DELETE operations are rejected with a 404.
    ## Hot logging level reload + We don't support completely reloading notary configuration files yet at present. What we support for now is: - increase logging level by signaling `SIGUSR1` - decrease logging level by signaling `SIGUSR2` @@ -364,21 +365,25 @@ Example: To increase logging level -``` +```bash $ kill -s SIGUSR1 PID +``` or +```bash $ docker exec -i CONTAINER_ID kill -s SIGUSR1 PID ``` To decrease logging level -``` +```bash $ kill -s SIGUSR2 PID +``` or +```bash $ docker exec -i CONTAINER_ID kill -s SIGUSR2 PID ``` @@ -387,11 +392,13 @@ the container with some kind of wrapper startup script or something. You can get the PID of `notary-server` through -``` +```bash $ docker exec CONTAINER_ID ps aux +``` or +```bash $ ps aux | grep "notary-server -config" | grep -v "grep" ``` diff --git a/notary/reference/signer-config.md b/notary/reference/signer-config.md index 581a90b5635..4d9afc0889d 100644 --- a/notary/reference/signer-config.md +++ b/notary/reference/signer-config.md @@ -13,10 +13,11 @@ Notary signer [requires environment variables](signer-config.md#environment-vari to encrypt private keys at rest. It also requires a configuration file, the path to which is specified on the command line using the `-config` flag. -Here is a full signer configuration file example; please click on the top level JSON keys to +Here is a full signer configuration file example; click on the top level JSON keys to learn more about the configuration section corresponding to that key: -
    {
    +```json
    +{
       "server": {
         "http_addr": ":4444",
         "grpc_addr": ":7899",
    @@ -39,7 +40,7 @@ learn more about the configuration section corresponding to that key:
         }
       }
     }
    -
    +``` ## server section (required) @@ -74,7 +75,7 @@ Example: hence all interfaces, such as those listed when you run ifconfig)
  • "127.0.0.1:4444" means listen on port 4444 on - localhost only. That means that the server will not be + localhost only. That means that the server is not accessible except locally (via SSH tunnel, or just on a local terminal)
  • @@ -90,7 +91,7 @@ Example: hence all interfaces, such as those listed when you run ifconfig)
  • "127.0.0.1:7899" means listen on port 7899 on - localhost only. That means that the server will not be + localhost only. That means that the server is not accessible except locally (via SSH tunnel, or just on a local terminal)
  • @@ -115,8 +116,8 @@ Example: no The root certificate to trust for mutual authentication. If provided, any clients connecting to - Notary signer will have to have a client certificate signed by - this root. If not provided, mutual authentication will not be + Notary signer need a client certificate signed by + this root. If not provided, mutual authentication is not required. The path is relative to the directory of the configuration file. @@ -156,17 +157,17 @@ Example: yes if not memory The the Data Source Name used to access the DB. - (note: please include parseTime=true as part of the the DSN) + (include parseTime=true as part of the the DSN) default_alias yes if not memory This parameter specifies the alias of the current password used to encrypt the private keys in the DB. All new - private keys will be encrypted using this password, which + private keys are encrypted using this password, which must also be provided as the environment variable NOTARY_SIGNER_<DEFAULT_ALIAS_VALUE>. - Please see the environment variable + See the environment variable section for more information. @@ -184,7 +185,7 @@ For example, the configuration above specifies the default password alias to be If this configuration is used, then you must: -``` +```bash export NOTARY_SIGNER_PASSWORDALIAS1=mypassword ``` @@ -215,13 +216,13 @@ export NOTARY_SIGNER_PASSWORDALIAS1=mypassword export NOTARY_SIGNER_PASSWORDALIAS2=mynewfancypassword ``` -That way, all new keys will be encrypted and decrypted using the passphrase +That way, all new keys are encrypted and decrypted using the passphrase `mynewfancypassword`, but old keys that were encrypted using the passphrase `mypassword` can still be decrypted. The environment variables for the older passwords are optional, but Notary -Signer will not be able to decrypt older keys if they are not provided, and -attempts to sign data using those keys will fail. +Signer cannot decrypt older keys if they are not provided, and +attempts to sign data using those keys fail. ## Related information diff --git a/notary/running_a_service.md b/notary/running_a_service.md index 3c1db70d8d7..db8a377f06e 100644 --- a/notary/running_a_service.md +++ b/notary/running_a_service.md @@ -14,15 +14,15 @@ and [Docker Compose](/compose/overview/). The quickest way to spin up a full Notary service for testing and development purposes is to use the Docker compose file in the -Notary project. +[Notary project](https://github.com/docker/notary). -```plain +```bash $ git clone https://github.com/docker/notary.git $ cd notary $ docker-compose up ``` -This will build the development Notary server and Notary signer images, and +This builds the development Notary server and Notary signer images, and start up containers for the Notary server, Notary signer, and the MySQL database that both of them share. The MySQL data is stored in a volume. @@ -31,8 +31,8 @@ Notary server and Notary signer communicate over mutually authenticated TLS listens for HTTPS traffic on port 4443. By default, this development Notary server container runs with the testing -self-signed TLS certificates. In order to be able to successfully connect to -it, you will have to use the root CA file in `fixtures/root-ca.crt`. +self-signed TLS certificates. Before you can successfully connect to +it, you must use the root CA file in `fixtures/root-ca.crt`. For example, to connect using OpenSSL: @@ -40,15 +40,15 @@ For example, to connect using OpenSSL: $ openssl s_client -connect :4443 -CAfile fixtures/root-ca.crt -no_ssl3 -no_ssl2 ``` -To connect using the Notary Client CLI, please see [Getting Started](getting_started.md) -documentation. Please note that the version of Notary server and signer -should be greater than or equal to that of the Notary Client CLI to ensure feature compatibility, -i.e. if you are using Notary Client CLI 0.2, ensure you are using a server and signer tagged with -an equal or higher version than 0.2 from the releases page. +To connect using the Notary Client CLI, see [Getting Started](getting_started.md). +The version of the Notary server and signer +needs to be greater than or equal to that of the Notary Client CLI to ensure feature compatibility. +For instance, if you use Notary Client CLI 0.2, the server and signer each need +to be at least version 0.2 as well. The self-signed certificate's subject name and subject alternative names are `notary-server`, `notaryserver`, and `localhost`, so if your Docker host is not -on localhost (for example if you are using Docker Machine), you'll need to +on `localhost` (for example if you are using Docker Machine), update your hosts file such that the name `notary-server` is associated with the IP address of your Docker host. @@ -71,8 +71,9 @@ the following command line arguments: - `-config=` - specify the path to the JSON configuration file. - `-debug` - Passing this flag enables the debugging server on `localhost:8080`. - The debugging server provides pprof - and expvar endpoints. + The debugging server provides + [pprof](https://golang.org/pkg/net/http/pprof) and + [expvar](ttps://golang.org/pkg/expvar/) endpoints. (Remember, this is localhost with respect to the running container - this endpoint is not exposed from the container). @@ -114,11 +115,10 @@ You would need to set the environment variable `NOTARY_SERVER_STORAGE_DB_URL`, because the `db_url` is in the `storage` section of the Notary server configuration JSON. -Note that you cannot override a key whose value is another map. -For instance, setting -`NOTARY_SERVER_STORAGE='{"storage": {"backend": "memory"}}'` will not -set in-memory storage. It just fails to parse. You can only override keys -whose values are strings or numbers. +ou cannot override a key whose value is another map. For instance, setting +`NOTARY_SERVER_STORAGE='{"storage": {"backend": "memory"}}'` does not set +in-memory storage. It just fails to parse. You can only override keys whose +values are strings or numbers. For example, let's say that you wanted to run a single Notary server instance: @@ -155,8 +155,8 @@ One way to do this would be: } } - Note that we are including a remote trust service and a database storage - type in order to demonstrate how environment variables can override + NWe include a remote trust service and a database storage + type to demonstrate how environment variables can override configuration parameters. 3. Run the following command (assuming you've already built or pulled a Notary server docker image): @@ -177,8 +177,8 @@ One way to do this would be: {"level":"info","msg":"Enabling TLS","time":"2016-02-25T00:53:59Z"} {"level":"info","msg":"Starting on :4443","time":"2016-02-25T00:53:59Z"} -You can do the same using [Docker -Compose](/compose/overview/) by setting volumes, +You can do the same using +[Docker Compose](/compose/overview/) by setting volumes, environment variables, and overriding the default command for the Notary server containers in the Compose file. @@ -191,11 +191,11 @@ that must be made to ensure security and scalability. ### Certificates The Notary repository includes sample certificates in the fixtures directory. -When you initialize a development service using the provided docker-compose.yml +When you initialize a development service using the provided `docker-compose.yml` file, these sample certificates are used to create a more production like environment. -**You must acquire _your_ _own_ _certificates_ to use in a production deployment.** +**You must acquire your own certificates to use in a production deployment.** The sample private key files in the Notary repository are obviously public knowledge and using them in a production deployment is highly insecure. @@ -223,11 +223,11 @@ only their own databases: ### High Availability -Most production users will want to increase availability by running multiple instances -of both the server and signer applications. These can be scaled arbitrarily and -independently. The database may also be scaled independently but this is left as -and exercise for experienced DBAs and Operations teams. A typical deployment will -look like the below diagram: +To increase availability, you can run multiple instances +of both the server and signer applications. These can scale arbitrarily and +independently. The database can also scale independently but this is left as +an exercise for experienced DBAs and Operations teams. A typical deployment +looks like this: ![Notary server Deployment Diagram](https://cdn.rawgit.com/docker/notary/09f81717080f53276e6881ece57cbbbf91b8e2a7/docs/images/service-deployment.svg) diff --git a/notary/service_architecture.md b/notary/service_architecture.md index fd564345b58..6e7deb3381b 100644 --- a/notary/service_architecture.md +++ b/notary/service_architecture.md @@ -9,20 +9,20 @@ On this page, you get an overview of the Notary service architecture. ## Brief overview of TUF keys and roles This document assumes familiarity with -The Update Framework, +[The Update Framework](https://www.theupdateframework.com/){:target="_blank" class="_"}, but here is a brief recap of the TUF roles and corresponding key hierarchy: -
    TUF Key Hierarchy
    +!TUF Key Hierarchy[](https://cdn.rawgit.com/docker/notary/09f81717080f53276e6881ece57cbbbf91b8e2a7/docs/images/key-hierarchy.svg){:width="400px"} - The root key is the root of all trust. It signs the - root metadata file, + [root metadata file](https://github.com/theupdateframework/tuf/blob/1bed3e09a478c2c918ffbff10b9118f6e52ee129/docs/tuf-spec.txt#L489){:target="_blank" class="_"}, which lists the IDs of the root, targets, snapshot, and timestamp public keys. Clients use these public keys to verify the signatures on all the metadata files in the repository. This key is held by a collection owner, and should be kept offline and safe, more so than any other key. - The snapshot key signs the - snapshot metadata file, + [snapshot metadata file](https://github.com/theupdateframework/tuf/blob/1bed3e09a478c2c918ffbff10b9118f6e52ee129/docs/tuf-spec.txt#L604){:target="_blank" class="_"}, which enumerates the filenames, sizes, and hashes of the root, targets, and delegation metadata files for the collection. This file is used to verify the integrity of the other metadata files. The snapshot key is held by @@ -30,7 +30,7 @@ but here is a brief recap of the TUF roles and corresponding key hierarchy: [signing by multiple collaborators via delegation roles](advanced_usage.md#working-with-delegation-roles). - The timestamp key signs the - timestamp metadata file, + [timestamp metadata file](https://github.com/theupdateframework/tuf/blob/1bed3e09a478c2c918ffbff10b9118f6e52ee129/docs/tuf-spec.txt#L827){:target="_blank" class="_"}, which provides freshness guarantees for the collection by having the shortest expiry time of any particular piece of metadata and by specifying the filename, size, and hash of the most recent snapshot for the collection. It is used to verify the integrity of the snapshot @@ -39,32 +39,32 @@ but here is a brief recap of the TUF roles and corresponding key hierarchy: require that a collection owner come online before each timestamp expiry. - The targets key signs the - targets metadata file, + [targets metadata file](ttps://github.com/theupdateframework/tuf/blob/1bed3e09a478c2c918ffbff10b9118f6e52ee129/docs/tuf-spec.txt#L678){:target="_blank" class="_"}, which lists filenames in the collection, and their sizes and respective - hashes. + [hashes](https://en.wikipedia.org/wiki/Cryptographic_hash_function){:target="_blank" class="_"}. This file is used to verify the integrity of some or all of the actual contents of the repository. It is also used to [delegate trust to other collaborators via delegation roles](advanced_usage.md#working-with-delegation-roles). The targets key is held by the collection owner or administrator. - Delegation keys sign - delegation metadata files, + [delegation metadata files](https://github.com/theupdateframework/tuf/blob/1bed3e09a478c2c918ffbff10b9118f6e52ee129/docs/tuf-spec.txt#L678){:target="_blank" class="_"}, which lists filenames in the collection, and their sizes and respective - hashes. + [hashes](https://en.wikipedia.org/wiki/Cryptographic_hash_function){:target="_blank" class="_"}. These files are used to verify the integrity of some or all of the actual contents of the repository. - They are also used to [delegate trust to other collaborators via lower level delegation roles]( - advanced_usage.md#working-with-delegation-roles). + They are also used to [delegate trust to other collaborators via lower level + [delegation roles](advanced_usage.md#working-with-delegation-roles). Delegation keys are held by anyone from the collection owner or administrator to collection collaborators. ## Architecture and components Notary clients pull metadata from one or more (remote) Notary services. Some -Notary clients will push metadata to one or more Notary services. +Notary clients push metadata to one or more Notary services. A Notary service consists of a Notary server, which stores and updates the signed -TUF metadata files +[TUF metadata files](https://github.com/theupdateframework/tuf/blob/1bed3e09a478c2c918ffbff10b9118f6e52ee129/docs/tuf-spec.txt#L348){:target="_blank" class="_"} for multiple trusted collections in an associated database, and a Notary signer, which stores private keys for and signs metadata for the Notary server. The following diagram illustrates this architecture: @@ -82,10 +82,11 @@ responsible for: The Notary signer is responsible for: - storing the private signing keys - wrapped - and encrypted - using Javascript Object Signing and Encryption in a database separate from the - Notary server database + [wrapped](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-31#section-4.4){:target="_blank" class="_"} + and + [encrypted](https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-31#section-4.8){:target="_blank" class="_"} + using [Javascript Object Signing and Encryption](https://github.com/dvsekhvalnov/jose2go){:target="_blank" class="_"} + in a database separate from the Notary server database - performing signing operations with these keys whenever the Notary server requests ## Example client-server-signer interaction @@ -95,45 +96,45 @@ server, and signer: ![Notary Service Sequence Diagram](https://cdn.rawgit.com/docker/notary/27469f01fe244bdf70f34219616657b336724bc3/docs/images/metadata-sequence.svg) -1. Notary server optionally supports authentication from clients using - JWT tokens. This requires an authorization server that - manages access controls, and a cert bundle from this authorization server - containing the public key it uses to sign tokens. +1. Notary server optionally supports authentication from clients using + [JWT](http://jwt.io/){:target="_blank" class="_"} tokens. This requires an + authorization server that manages access controls, and a cert bundle from this + authorization server containing the public key it uses to sign tokens. If token authentication is enabled on Notary server, then any connecting - client that does not have a token will be redirected to the authorization + client that does not have a token is redirected to the authorization server. - Please see the docs for [Docker Registry v2 authentication]( - https://github.com/docker/distribution/blob/master/docs/spec/auth/token.md) + See the docs for + [Docker Registry v2 authentication]( https://github.com/docker/distribution/blob/master/docs/spec/auth/token.md) for more information. -2. The client will log in to the authorization server via basic auth over HTTPS, - obtain a bearer token, and then present the token to Notary server on future - requests. +2. The client logs in to the authorization server via basic auth over HTTPS, + obtain a bearer token, and then present the token to Notary server on future + requests. -3. When clients uploads new metadata files, Notary server checks them against - any previous versions for conflicts, and verifies the signatures, checksums, - and validity of the uploaded metadata. +3. When clients uploads new metadata files, Notary server checks them against + any previous versions for conflicts, and verifies the signatures, checksums, + and validity of the uploaded metadata. -4. Once all the uploaded metadata has been validated, Notary server - generates the timestamp (and maybe snapshot) metadata. It sends this - generated metadata to the Notary signer to be signed. +4. Once all the uploaded metadata has been validated, Notary server + generates the timestamp (and maybe snapshot) metadata. It sends this + generated metadata to the Notary signer to be signed. -5. Notary signer retrieves the necessary encrypted private keys from its database - if available, decrypts the keys, and uses them to sign the metadata. If - successful, it sends the signatures back to Notary server. +5. Notary signer retrieves the necessary encrypted private keys from its database + if available, decrypts the keys, and uses them to sign the metadata. If + successful, it sends the signatures back to Notary server. -6. Notary server is the source of truth for the state of a trusted collection of - data, storing both client-uploaded and server-generated metadata in the TUF - database. The generated timestamp and snapshot metadata certify that the - metadata files the client uploaded are the most recent for that trusted collection. +6. Notary server is the source of truth for the state of a trusted collection of + data, storing both client-uploaded and server-generated metadata in the TUF + database. The generated timestamp and snapshot metadata certify that the + metadata files the client uploaded are the most recent for that trusted collection. - Finally, Notary server will notify the client that their upload was successful. + Finally, Notary server notifies the client that their upload was successful. -7. The client can now immediately download the latest metadata from the server, - using the still-valid bearer token to connect. Notary server only needs to - obtain the metadata from the database, since none of the metadata has expired. +7. The client can now immediately download the latest metadata from the server, + using the still-valid bearer token to connect. Notary server only needs to + obtain the metadata from the database, since none of the metadata has expired. In the case that the timestamp has expired, Notary server would go through the entire sequence where it generates a new timestamp, request Notary signer @@ -157,39 +158,38 @@ used to communicate with Notary signer, and therefore, access to arbitrary signi operations with any key the Signer holds. - **Denial of Service** - An attacker could reject client requests and corrupt - or delete metadata from the database, thus preventing clients from being - able to download or upload metadata. + or delete metadata from the database, thus preventing clients from the ability + to download or upload metadata. - **Malicious Content** - An attacker can create, store, and serve arbitrary - metadata content for one or more trusted collections. However, they do not have - access to any client-side keys, such as root, targets, and potentially the - snapshot keys for the existing trusted collections. + metadata content for one or more trusted collections. However, the attacker + has no access to any client-side keys, such as root, targets, and potentially + the snapshot keys for the existing trusted collections. - Only clients who have never seen the trusted collections, and who do not have any - form of pinned trust, can be tricked into downloading and - trusting the malicious content for these trusted collections. + A client can only be tricked into downloading and trusting the malicious + content for these trusted collections if it has never seen the trusted + collections and does not have any form of pinned trust. - Clients that have previously interacted with any trusted collection, or that have - their trust pinned to a specific certificate for the collections will immediately - detect that the content is malicious and would not trust any root, targets, - or (maybe) snapshot metadata for these collections. + If a client has previously interacted with any trusted collection or has its + trust pinned to a specific certificate for the collections, the client + immediately detects that the content is malicious and doesn't trust any root, + targets, or (maybe) snapshot metadata for these collections. - **Rollback, Freeze, Mix and Match** - The attacker can request that - the Notary signer sign any arbitrary timestamp (and maybe snapshot) metadata - they want. Attackers can launch a freeze attack, and, depending on whether - the snapshot key is available, a mix-and-match attack up to the expiration - of the targets file. - - Clients both with and without pinned trust would be vulnerable to these - attacks, so long as the attacker ensures that the version number of their - malicious metadata is higher than the version number of the most recent - good metadata that any client may have. - - Note that the timestamp and snapshot keys cannot be compromised in a server-only - compromise, so a key rotation would not be necessary. Once the Server - compromise is mitigated, an attacker will not be - able to generate valid timestamp or snapshot metadata and serve them on a - malicious mirror, for example. + the Notary signer sign any arbitrary timestamp (and maybe snapshot) metadata + they want. Attackers can launch a freeze attack, and, depending on whether + the snapshot key is available, a mix-and-match attack up to the expiration + of the targets file. + + Clients both with and without pinned trust would be vulnerable to these + attacks, so long as the attacker ensures that the version number of their + malicious metadata is higher than the version number of the most recent + good metadata that any client may have. + +> **Note**: the timestamp and snapshot keys cannot be compromised in a server-only +> compromise, so a key rotation would not be necessary. Once the Server +> compromise is mitigated, an attacker cannot generate valid timestamp or +> snapshot metadata and serve them on a malicious mirror, for example. ### Notary signer compromise @@ -201,14 +201,14 @@ private material. - **Denial of Service** - An attacker could reject all Notary server requests and corrupt or delete keys from the database (or even delete keys from an - HSM), and thus prevent Notary servers from being able to sign generated + HSM), and thus prevent Notary servers from signing generated timestamps or snapshots. - **Key Compromise** - If the Notary signer uses a database as its backend, an attacker can exfiltrate all the (timestamp and snapshot) private material. - Note that the capabilities of an attacker are the same as of a Notary server + The capabilities of an attacker are the same as of a Notary server compromise in terms of signing arbitrary metadata, with the important detail - that in this particular case key rotations will be necessary to recover from + that in this particular case key rotations are necessary to recover from the attack. ### Notary client keys and credentials compromise @@ -222,146 +222,146 @@ It is up to the user to choose an appropriate password, and to protect their key from offline brute-force attacks. The severity of the compromise of a trust collection owner/administrator's -decrypted key depends on the type and combination of keys that were compromised -(e.g. the snapshot key and targets key, or just the targets key). +decrypted key depends on the type and combination of keys compromised. For +example, were the snapshot and targets key both compromised, or just the targets +key? #### Possible attacks given the credentials compromised: -##### **Decrypted Delegation Key, only** +##### Decrypted Delegation Key, only | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | Delegation key | no | no | no | -##### **Decrypted Delegation Key + Notary Service write-capable credentials** +##### Decrypted Delegation Key + Notary Service write-capable credentials | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | Delegation key | limited, maybe* | limited, maybe* | limited, maybe* | - *If the Notary Service holds the snapshot key and the attacker has Notary Service - write credentials, then they have effective access to the snapshot and timestamp - keys because the server will generate and sign the snapshot and timestamp for them. +If the Notary Service holds the snapshot key and the attacker has Notary Service +write credentials, then they have effective access to the snapshot and timestamp +keys because the server generates and signs the snapshot and timestamp for them. - An attacker can add malicious content, remove legitimate content from a collection, and - mix up the targets in a collection, but only within the particular delegation - roles that the key can sign for. Depending on the restrictions on that role, - they may be restricted in what type of content they can modify. They may also - add or remove the capabilities of other delegation keys below it on the key hierarchy - (e.g. if `DelegationKey2` in the above key hierarchy were compromised, it would only be - able to modify the capabilities of `DelegationKey4` and `DelegationKey5`). +An attacker can add malicious content, remove legitimate content from a collection, and +mix up the targets in a collection, but only within the particular delegation +roles that the key can sign for. Depending on the restrictions on that role, +they may be restricted in what type of content they can modify. They may also +add or remove the capabilities of other delegation keys below it on the key hierarchy +For example, if `DelegationKey2` in the above key hierarchy is compromised, the +compromised key could +only modify the capabilities of `DelegationKey4` and `DelegationKey5`. -##### **Decrypted Delegation Key + Decrypted Snapshot Key, only** +##### Decrypted Delegation Key + Decrypted Snapshot Key, only | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | Delegation key, Snapshot key | no | no | no | - The attacker does not have access to the timestamp key, which is always held by the Notary - Service, and will be unable to set up a malicious mirror. +The attacker does not have access to the timestamp key, which is always held by the Notary +Service, and cannot set up a malicious mirror. -##### **Decrypted Delegation Key + Decrypted Snapshot Key + Notary Service write-capable credentials** +##### Decrypted Delegation Key + Decrypted Snapshot Key + Notary Service write-capable credentials | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | Delegation key, Snapshot key | limited | limited | limited | - The Notary Service always holds the timestamp key. If the attacker has Notary Service - write credentials, then they have effective access to the timestamp key because the server - will generate and sign the timestamp for them. +The Notary Service always holds the timestamp key. If the attacker has Notary Service +write credentials, then they have effective access to the timestamp key because the server +generates and signs the timestamp for them. - An attacker can add malicious content, remove legitimate content from a collection, and - mix up the targets in a collection, but only within the particular delegation - roles that the key can sign for. Depending on the restrictions on that role, - they may be restricted in what type of content they can modify. They may also - add or remove the capabilities of other delegation keys below it on the key hierarchy - (e.g. if `DelegationKey2` in the above key hierarchy were compromised, it would only be - able to modify the capabilities of `DelegationKey4` and `DelegationKey5`). +An attacker can add malicious content, remove legitimate content from a collection, and +mix up the targets in a collection, but only within the particular delegation +roles that the key can sign for. Depending on the restrictions on that role, +they may be restricted in what type of content they can modify. A key may also +add or remove the capabilities of other delegation keys below it on the key hierarchy +For example, if `DelegationKey2` in the above key hierarchy is compromised, it can +only modify the capabilities of `DelegationKey4` and `DelegationKey5`. -##### **Decrypted Targets Key, only** +##### Decrypted Targets Key, only | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | Targets key | no | no | no | -##### **Decrypted Targets Key + Notary Service write-capable credentials** +##### Decrypted Targets Key + Notary Service write-capable credentials | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | Targets key | maybe* | maybe* | limited, maybe* | - *If the Notary Service holds the snapshot key and the attacker has Notary Service - write credentials, then they have effective access to the snapshot and timestamp - keys because the server will generate and sign the snapshot and timestamp for them. +If the Notary Service holds the snapshot key and the attacker has Notary Service +write credentials, then they have effective access to the snapshot and timestamp +keys because the server generates and signs the snapshot and timestamp for them. - An attacker can add any malicious content, remove any legitimate content from a - collection, and mix up the targets in a collection. They may also add or remove - the capabilities of any top level delegation key or role (e.g. `Delegation1`, - `Delegation2`, and `Delegation3` in the key hierarchy diagram). If they remove - the roles entirely, they'd break the trust chain to the lower delegation roles - (e.g. `Delegation4`, `Delegation5`). +An attacker can add any malicious content, remove any legitimate content from a +collection, and mix up the targets in a collection. They may also add or remove +the capabilities of any top level delegation key or role, such as `Delegation1`, +`Delegation2`, and `Delegation3` in the key hierarchy diagram. If they remove +the roles entirely, they break the trust chain to the lower delegation roles, +such as `Delegation4` or `Delegation5`. -##### **Decrypted Targets Key + Decrypted Snapshot Key, only** +##### Decrypted Targets Key + Decrypted Snapshot Key, only | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | Targets key, Snapshot key | no | no | no | - The attacker does not have access to the timestamp key, which is always held by the Notary - Service, and will be unable to set up a malicious mirror. +The attacker does not have access to the timestamp key, which is always held by the Notary +Service, and cannot set up a malicious mirror. -##### **Decrypted Targets Key + Decrypted Snapshot Key + Notary Service write-capable credentials** +##### Decrypted Targets Key + Decrypted Snapshot Key + Notary Service write-capable credentials | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | Targets key, Snapshot key | yes | yes | limited | - The Notary Service always holds the timestamp key. If the attacker has Notary Service - write credentials, then they have effective access to the timestamp key because the server - will generate and sign the timestamp for them. +The Notary Service always holds the timestamp key. If the attacker has Notary Service +write credentials, then they have effective access to the timestamp key because the server +generates and signs the timestamp for them. - An attacker can add any malicious content, remove any legitimate content from a - collection, and mix up the targets in a collection. They may also add or remove - the capabilities of any top level delegation key or role (e.g. `Delegation1`, - `Delegation2`, and `Delegation3` in the key hierarchy diagram). If they remove - the roles entirely, they'd break the trust chain to the lower delegation roles - (e.g. `Delegation4`, `Delegation5`). +An attacker can add any malicious content, remove any legitimate content from a +collection, and mix up the targets in a collection. They may also add or remove +the capabilities of any top level delegation key or role, for example, `Delegation1`, +`Delegation2`, and `Delegation3` in the key hierarchy diagram. If they remove +the roles entirely, they'd break the trust chain to the lower delegation roles, +such as `Delegation4` or `Delegation5`. -##### **Decrypted Root Key + none or any combination of decrypted keys, only** +##### Decrypted Root Key + none or any combination of decrypted keys, only | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | All keys | yes | yes | yes | - No other keys are needed, since the attacker can just any rotate or all of them to ones that they - generate. With these keys, they can set up a mirror to serve malicious data - any malicious data - at all, given that they have access to all the keys. +No other keys are needed, since the attacker can just any rotate or all of them to ones that they +generate. With these keys, they can set up a mirror to serve malicious data - any malicious data +at all, given that they have access to all the keys. -##### **Decrypted Root Key + none or any combination of decrypted keys + Notary Service write-capable credentials** +##### Decrypted Root Key + none or any combination of decrypted keys + Notary Service write-capable credentials | Keys compromised | Malicious Content | Rollback, Freeze, Mix and Match | Denial of Service | |------------------|-------------------|---------------------------------|-------------------| | All keys | yes | yes | yes | - *If the Notary Service holds the snapshot key and the attacker has Notary Service - write credentials, then they won't even have to rotate the snapshot and timestamp - keys because the server will generate and sign the snapshot and timestamp for them. +If the Notary Service holds the snapshot key and the attacker has Notary Service +write credentials, then they don't even need to rotate the snapshot and timestamp +keys because the server generates and signs the snapshot and timestamp for them. #### Mitigations If a root key compromise is detected, the root key holder should contact whomever runs the notary service to manually reverse any malicious changes to -the repository, and immediately rotate the root key. This will create a fork +the repository, and immediately rotate the root key. This creates a fork of the repository history, and thus break existing clients who have downloaded any of the malicious changes. If a targets key compromise is detected, the root key holder must rotate the compromised key and push a clean set of targets using the new key. -If a delegations key compromise is detected, a higher level key (e.g. if -`Delegation4` were compromised, then `Delegation2`; if -`Delegation2` were compromised, then the `Targets` key) +If a delegations key compromise is detected, a higher level key holder must rotate the compromised key, and push a clean set of targets using the new key. If a Notary Service credential compromise is detected, the credentials should be diff --git a/opensource/index.md b/opensource/index.md index 80be7708ab3..de4f316808a 100644 --- a/opensource/index.md +++ b/opensource/index.md @@ -59,7 +59,7 @@ at [docs.docker.com](https://docs.docker.com/). [https://github.com/docker/docker.github.io/issues](https://github.com/docker/docker.github.io/issues). This is similar to clicking **Request doc changes** on a published docs - page, but if you manually file an issue you have to fill in links to + page, but if you manually file an issue you need to fill in links to the related pages. - Fork the documentation, make changes or add new content on your local @@ -101,8 +101,8 @@ repository](https://github.com/docker/community/blob/master/README.md){: target="_blank" class="_"} for resources and information on the community. The topics in this guide on [Other ways to contribute](/opensource/ways/) -provide some additional information, but it's likely you'll find the community -information you are looking for on the GitHub repository. +provide some additional information, but the community +information you are looking for is probably available on the GitHub repository. ## Looking for Moby? diff --git a/opensource/ways.md b/opensource/ways.md index ef3ba41e9b1..46dc4fc36aa 100644 --- a/opensource/ways.md +++ b/opensource/ways.md @@ -26,10 +26,9 @@ by becoming a co-organizer of a Docker Meetup group. If a Meetup group does not already exist in your area and you are willing to start a new one, the best way to proceed is to contact us so that we can create -it for you. We will always agree to create a new Docker Meetup group as long as -it makes sense geographically speaking. +it for you, as long as it makes sense geographically speaking. -If you have already created a Docker Meetup group that is fine, we will simply +If you have already created a Docker Meetup group that is fine, we ask you to add us as a co-organizer so that we can ensure a consistent support to the group in terms of community and Meetup management. @@ -51,7 +50,7 @@ suggestions to help you get started: ### How Docker can help you organize -We can support the co-organizers of the Docker Meetup Groups based on their specific needs. For instance, we might / will be able to: +We can support the co-organizers of the Docker Meetup Groups based on their specific needs. For instance, we might be able to: * Send you Docker T-shirts and stickers * Put you in contact with other people interested in being a co-organizer of a Docker Meetup group, and which are in the same area @@ -67,7 +66,7 @@ We can support the co-organizers of the Docker Meetup Groups based on their spec ### Want to host a Docker Meetup? We are always looking for new office space to host Docker Meetups. If your -company is willing to host a Docker Meetup, please contact us by email at +company is willing to host a Docker Meetup, contact us by email at meetup@docker.com. Previous Docker Meetups have been hosted by companies such as Rackspace, Twitter, MongoDB, BrightCove, DigitalOcean, Viadeo and Edmodo. #### How many attendees? @@ -90,7 +89,7 @@ resources and information on the community. Those pages are the most up-to-date for finding out about the community and making connections. The topics below provide some additional links, but it's likely that everything -you need will be on the GitHub repository. +you need is on the GitHub repository. ### Become a Mentor diff --git a/registry/compatibility.md b/registry/compatibility.md index 90cef416c90..63c2128276b 100644 --- a/registry/compatibility.md +++ b/registry/compatibility.md @@ -5,20 +5,20 @@ title: Registry compatibility --- ## Synopsis -*If a manifest is pulled by _digest_ from a registry 2.3 with Docker Engine 1.9 +If a manifest is pulled by _digest_ from a registry 2.3 with Docker Engine 1.9 and older, and the manifest was pushed with Docker Engine 1.10, a security check -will cause the Engine to receive a manifest it cannot use and the pull will fail.* +causes the Engine to receive a manifest it cannot use and the pull fails. ## Registry manifest support Historically, the registry has supported a [single manifest type](./spec/manifest-v2-1.md) known as _Schema 1_. -With the move toward multiple architecture images the distribution project -introduced two new manifest types: Schema 2 manifests and manifest lists. The -registry 2.3 supports all three manifest types and in order to be compatible -with older Docker engines will, in certain cases, do an on-the-fly -transformation of a manifest before serving the JSON in the response. +With the move toward multiple architecture images, the distribution project +introduced two new manifest types: Schema 2 manifests and manifest lists. Registry +2.3 supports all three manifest types and sometimes performs an on-the-fly +transformation of a manifest before serving the JSON in the response, to +preserve compatibility with older versions of Docker Engine.. This conversion has some implications for pulling manifests by digest and this document enumerates these implications. @@ -28,7 +28,7 @@ document enumerates these implications. Manifests are stored and retrieved in the registry by keying off a digest representing a hash of the contents. One of the advantages provided by CAS is -security: if the contents are changed, then the digest will no longer match. +security: if the contents are changed, then the digest no longer matches. This prevents any modification of the manifest by a MITM attack or an untrusted third party. @@ -36,9 +36,9 @@ When a manifest is stored by the registry, this digest is returned in the HTTP response headers and, if events are configured, delivered within the event. The manifest can either be retrieved by the tag, or this digest. -For registry versions 2.2.1 and below, the registry will always store and -serve _Schema 1_ manifests. The Docker Engine 1.10 will first -attempt to send a _Schema 2_ manifest, falling back to sending a +For registry versions 2.2.1 and below, the registry always stores and +serves _Schema 1_ manifests. Engine 1.10 first +attempts to send a _Schema 2_ manifest, falling back to sending a Schema 1 type manifest when it detects that the registry does not support the new version. @@ -47,32 +47,32 @@ support the new version. ### Manifest push with Docker 1.10 -The docker engine will construct a _Schema 2_ manifest which the -registry will persist to disk. +The Engine constructs a _Schema 2_ manifest which the +registry persists to disk. When the manifest is pulled by digest or tag with Docker Engine 1.10, a -_Schema 2_ manifest will be returned. The Docker Engine 1.10 +_Schema 2_ manifest is returned. Docker Engine 1.10 understands the new manifest format. When the manifest is pulled by *tag* with Docker Engine 1.9 and older, the manifest is converted on-the-fly to _Schema 1_ and sent in the response. The Docker Engine 1.9 is compatible with this older format. -*When the manifest is pulled by _digest_ with Docker Engine 1.9 and older, the -same rewriting process will not happen in the registry. If this were to happen +When the manifest is pulled by _digest_ with Docker Engine 1.9 and older, the +same rewriting process does not happen in the registry. If it did, the digest would no longer match the hash of the manifest and would violate the -constraints of CAS.* +constraints of CAS. For this reason if a manifest is pulled by _digest_ from a registry 2.3 with Docker Engine 1.9 and older, and the manifest was pushed with Docker Engine 1.10, a -security check will cause the Engine to receive a manifest it cannot use and the -pull will fail. +security check causes the Engine to receive a manifest it cannot use and the +pull fails. ### Manifest push with Docker 1.9 and older -The Docker Engine will construct a _Schema 1_ manifest which the -registry will persist to disk. +The Docker Engine constructs a _Schema 1_ manifest which the +registry persists to disk. When the manifest is pulled by digest or tag with any docker version, a -_Schema 1_ manifest will be returned. +_Schema 1_ manifest is returned. diff --git a/registry/deploying.md b/registry/deploying.md index 1c9048b1404..9934075ce39 100644 --- a/registry/deploying.md +++ b/registry/deploying.md @@ -183,7 +183,7 @@ service](#run-a-registry-as-a-service) below. These examples assume the following: -- Your registry will be accessible on `https://myregistry.domain.com/`. +- Your registry URL is `https://myregistry.domain.com/`. - Your DNS, routing, and firewall settings allow access to the registry's host on port 5000. - You have already obtained a certificate from a certificate authority (CA). @@ -279,8 +279,8 @@ or a service with either only a single node or a node constraint. fully replicated service. Each worker can write to the storage back-end without causing write conflicts. -- If you use a local bind mount or volume, each worker node will write to its - own storage location, which means that each registry will contain a different +- If you use a local bind mount or volume, each worker node writes to its + own storage location, which means that each registry contains a different data set. You can solve this problem by using a single-replica service and a node constraint to ensure that only a single worker is writing to the bind mount. @@ -348,15 +348,15 @@ the following must be the same: - HTTP Secret - Redis Cache (if configured) -If any of these are different, the registry will have trouble serving requests. +Differences in any of the above cause problems serving requests. As an example, if you're using the filesystem driver, all registry instances -must have access to the same filesystem root, which means they should be in -the same machine. For other drivers, such as s3 or azure, they should be -accessing the same resource, and will likely share an identical configuration. +must have access to the same filesystem root, on +the same machine. For other drivers, such as S3 or Azure, they should be +accessing the same resource and share an identical configuration. The _HTTP Secret_ coordinates uploads, so also must be the same across -instances. Configuring different redis instances will work (at the time -of writing), but will not be optimal if the instances are not shared, causing -more requests to be directed to the backend. +instances. Configuring different redis instances works (at the time +of writing), but is not optimal if the instances are not shared, because +more requests are directed to the backend. ### Important/Required HTTP-Headers @@ -377,11 +377,11 @@ without credentials. The response should include a `WWW-Authenticate` challenge, providing guidance on how to authenticate, such as with basic auth or a token service. If the load balancer has health checks, it is recommended to configure it to consider a 401 response as healthy and any other as down. -This will secure your registry by ensuring that configuration problems with +This secures your registry by ensuring that configuration problems with authentication don't accidentally expose an unprotected registry. If you're using a less sophisticated load balancer, such as Amazon's Elastic Load Balancer, that doesn't allow one to change the healthy response code, health -checks can be directed at "/", which will always return a `200 OK` response. +checks can be directed at "/", which always returns a `200 OK` response. ## Restricting access @@ -436,7 +436,7 @@ secrets. ``` 4. Try to pull an image from the registry, or push an image to the registry. - These commands will fail. + These commands fail. 5. Log in to the registry. @@ -518,7 +518,7 @@ following: distributable. This means that when you push an image based on one of these images to your private registry, the non-distributable layers are **not** pushed, but are always fetched from their authorized location. This is fine - for internet-connected hosts, but will not work in an air-gapped set-up. + for internet-connected hosts, but not in an air-gapped set-up. In Docker 17.06 and higher, you can configure the Docker daemon to allow pushing non-distributable layers to private registries, in this scenario. @@ -546,7 +546,7 @@ following: 3. Restart the registry if it does not start automatically. 4. When you push images to the registries in the list, their - non-distributable layers will be pushed to the registry. + non-distributable layers are pushed to the registry. > **Warning**: Non-distributable artifacts typically have restrictions on > how and where they can be distributed and shared. Only use this feature @@ -557,7 +557,7 @@ following: ## Next steps -You will find more specific and advanced information in the following sections: +More specific and advanced information is available in the following sections: - [Configuration reference](configuration.md) - [Working with notifications](notifications.md) diff --git a/registry/garbage-collection.md b/registry/garbage-collection.md index f1fe0241e94..cc301c6a801 100644 --- a/registry/garbage-collection.md +++ b/registry/garbage-collection.md @@ -33,8 +33,8 @@ documentation [here](spec/api.md#deleting-a-layer) and to the target and makes them eligible for garbage collection. It also makes them unable to be read via the API. -If a layer is deleted it will be removed from the filesystem when garbage collection -is run. If a manifest is deleted the layers to which it refers will be removed from +If a layer is deleted, it is removed from the filesystem when garbage collection +is run. If a manifest is deleted the layers to which it refers are removed from the filesystem if no other manifests refers to them. @@ -58,9 +58,9 @@ A -----> a B ``` In this state layer `c` no longer has a reference and is eligible for garbage -collection. Layer `a` had one reference removed but will not be garbage +collection. Layer `a` had one reference removed but not garbage collected as it is still referenced by manifest `A`. The blob representing -manifest `B` will also be eligible for garbage collection. +manifest `B` is eligible for garbage collection. After garbage collection has been run, manifest `A` and its blobs remain. @@ -77,18 +77,14 @@ scans all the manifests in the registry. From these manifests, it constructs a set of content address digests. This set is the 'mark set' and denotes the set of blobs to *not* delete. Secondly, in the 'sweep' phase, the process scans all the blobs and if a blob's content address digest is not in the mark set, the -process will delete it. +process deletes it. > **Note**: You should ensure that the registry is in read-only mode or not running at > all. If you were to upload an image while garbage collection is running, there is the -> risk that the image's layers will be mistakenly deleted, leading to a corrupted image. - -This type of garbage collection is known as stop-the-world garbage collection. In future -registry versions the intention is that garbage collection will be an automated background -action and this manual process will no longer apply. - +> risk that the image's layers are mistakenly deleted leading to a corrupted image. +This type of garbage collection is known as stop-the-world garbage collection. ## Run garbage collection @@ -96,9 +92,9 @@ Garbage collection can be run as follows `bin/registry garbage-collect [--dry-run] /path/to/config.yml` -The garbage-collect command accepts a `--dry-run` parameter, which will print the progress +The garbage-collect command accepts a `--dry-run` parameter, which prints the progress of the mark and sweep phases without removing any data. Running with a log level of `info` -will give a clear indication of what will and will not be deleted. +gives a clear indication of items eligible for deletion. The config.yml file should be in the following format: diff --git a/registry/insecure.md b/registry/insecure.md index 933d27325d6..c7232576e2b 100644 --- a/registry/insecure.md +++ b/registry/insecure.md @@ -110,12 +110,12 @@ This sections lists some common failures and how to recover from them. ### Failing... Failing to configure the Engine daemon and trying to pull from a registry that is not using -TLS will results in the following message: +TLS results in the following message: ```none FATA[0000] Error response from daemon: v1 ping attempt failed with error: Get https://myregistrydomain.com:5000/v1/_ping: tls: oversized record received with length 20527. -If this private registry supports only HTTP or HTTPS with an unknown CA certificate,please add +If this private registry supports only HTTP or HTTPS with an unknown CA certificate, add `--insecure-registry myregistrydomain.com:5000` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/myregistrydomain.com:5000/ca.crt diff --git a/registry/notifications.md b/registry/notifications.md index 3f8632ef45f..79362bfe099 100644 --- a/registry/notifications.md +++ b/registry/notifications.md @@ -40,10 +40,10 @@ them to the configuration. A simple example follows: The above would configure the registry with an endpoint to send events to `https://mylistener.example.com/event`, with the header "Authorization: Bearer ". The request would timeout after 500 milliseconds. If -5 failures happen consecutively, the registry will backoff for 1 second before +5 failures happen consecutively, the registry backs off for 1 second before trying again. -For details on the fields, please see the [configuration documentation](configuration.md#notifications). +For details on the fields, see the [configuration documentation](configuration.md#notifications). A properly configured endpoint should lead to a log message from the registry upon startup: @@ -117,8 +117,8 @@ manifest: The target struct of events which are sent when manifests and blobs are deleted -will contain a subset of the data contained in Get and Put events. Specifically, -only the digest and repository will be sent. +contains a subset of the data contained in Get and Put events. Specifically, +only the digest and repository are sent. ```json "target": { @@ -148,7 +148,7 @@ group unrelated events and send them in the same envelope to reduce the total number of requests. The full package has the mediatype -"application/vnd.docker.distribution.events.v1+json", which will be set on the +"application/vnd.docker.distribution.events.v1+json", which is set on the request coming to an endpoint. An example of a full event may look as follows: @@ -244,7 +244,7 @@ Content-Type: application/vnd.docker.distribution.events.v1+json The registry is fairly accepting of the response codes from endpoints. If an endpoint responds with any 2xx or 3xx response code (after following -redirects), the message will be considered delivered and discarded. +redirects), the message is considered to have been delivered, and is discarded. In turn, it is recommended that endpoints are accepting of incoming responses, as well. While the format of event envelopes are standardized by media type, @@ -312,15 +312,15 @@ monitor the size ("Pending" above) of the endpoint queues. If failures or queue sizes are increasing, it can indicate a larger problem. The logs are also a valuable resource for monitoring problems. A failing -endpoint will lead to messages similar to the following: +endpoint leads to messages similar to the following: -``` +```none ERRO[0340] retryingsink: error writing events: httpSink{http://localhost:5003/callback}: error posting: Post http://localhost:5003/callback: dial tcp 127.0.0.1:5003: connection refused, retrying WARN[0340] httpSink{http://localhost:5003/callback} encountered too many errors, backing off ``` -The above indicates that several errors have led to a backoff and the registry -will wait before retrying. +The above indicates that several errors caused a backoff and the registry +waits before retrying. ## Considerations @@ -328,7 +328,7 @@ Currently, the queues are inmemory, so endpoints should be _reasonably reliable_. They are designed to make a best-effort to send the messages but if an instance is lost, messages may be dropped. If an endpoint goes down, care should be taken to ensure that the registry instance is not terminated before -the endpoint comes back up or messages will be lost. +the endpoint comes back up or messages are lost. This can be mitigated by running endpoints in close proximity to the registry instances. One could run an endpoint that pages to disk and then forwards a @@ -338,6 +338,6 @@ The notification system is designed around a series of interchangeable _sinks_ which can be wired up to achieve interesting behavior. If this system doesn't provide acceptable guarantees, adding a transactional `Sink` to the registry is a possibility, although it may have an effect on request service time. -Please see the +See the [godoc](http://godoc.org/github.com/docker/distribution/notifications#Sink) for more information. diff --git a/registry/recipes/apache.md b/registry/recipes/apache.md index 4d231ebe12a..4b165a0adf3 100644 --- a/registry/recipes/apache.md +++ b/registry/recipes/apache.md @@ -26,7 +26,7 @@ We also implement push restriction (to a limited user group) for the sake of the While this model gives you the ability to use whatever authentication backend you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. -Furthermore, introducing an extra http layer in your communication pipeline will make it more complex to deploy, maintain, and debug, and will possibly create issues. +Furthermore, introducing an extra http layer in your communication pipeline adds complexity when deploying, maintaining, and debugging. ## Setting things up @@ -40,7 +40,7 @@ Run the following script: mkdir -p auth mkdir -p data -# This is the main apache configuration you will use +# This is the main apache configuration cat < auth/httpd.conf LoadModule headers_module modules/mod_headers.so diff --git a/registry/recipes/index.md b/registry/recipes/index.md index e0e9b27aea4..f5d10390a59 100644 --- a/registry/recipes/index.md +++ b/registry/recipes/index.md @@ -4,15 +4,12 @@ keywords: registry, on-prem, images, tags, repository, distribution, recipes, ad title: Recipes overview --- -You will find here a list of "recipes", end-to-end scenarios for exotic or otherwise advanced use-cases. - -Most users are not expected to have a use for these. +This list of "recipes" provides end-to-end scenarios for exotic or otherwise advanced use-cases. +These recipes are not useful for most standard set-ups. ## Requirements -You should have followed entirely the basic [deployment guide](../deploying.md). - -If you have not, please take the time to do so. +Before following these steps, work through the [deployment guide](../deploying.md). At this point, it's assumed that: diff --git a/registry/recipes/mirror.md b/registry/recipes/mirror.md index 0bb37bbd443..9500309804e 100644 --- a/registry/recipes/mirror.md +++ b/registry/recipes/mirror.md @@ -8,12 +8,11 @@ redirect_from: ## Use-case -If you have multiple instances of Docker running in your environment (e.g., -multiple physical or virtual machines, all running the Docker daemon), each time -one of them requires an image that it doesn’t have it will go out to the -internet and fetch it from the public Docker registry. By running a local -registry mirror, you can keep most of the redundant image fetch traffic on your -local network. +If you have multiple instances of Docker running in your environment, such as +multiple physical or virtual machines all running Docker, each daemon goes out +to the internet and fetches an image it doesn't have locally, from the Docker +repository. You can run a local registry mirror and point all your daemons +there, to avoid this extra internet traffic. ### Alternatives @@ -30,7 +29,7 @@ Hub can be mirrored. ### Solution -The Registry can be configured as a pull through cache. In this mode a Registry +The Registry can be configured as a pull through cache. In this mode a Registry responds to all normal docker pull requests but stores all content locally. ## How does it work? @@ -42,15 +41,15 @@ serve the image from its own storage. ### What if the content changes on the Hub? -When a pull is attempted with a tag, the Registry will check the remote to -ensure if it has the latest version of the requested content. If it doesn't it -will fetch the latest content and cache it. +When a pull is attempted with a tag, the Registry checks the remote to +ensure if it has the latest version of the requested content. Otherwise, it +fetches and caches the latest content. ### What about my disk? In environments with high churn rates, stale data can build up in the cache. -When running as a pull through cache the Registry will periodically remove old -content to save disk space. Subsequent requests for removed content will cause a +When running as a pull through cache the Registry periodically removes old +content to save disk space. Subsequent requests for removed content causes a remote fetch and local re-caching. To ensure best performance and guarantee correctness the Registry cache should @@ -61,16 +60,16 @@ be configured to use the `filesystem` driver for storage. The easiest way to run a registry as a pull through cache is to run the official Registry image. -Multiple registry caches can be deployed over the same back-end. A single -registry cache will ensure that concurrent requests do not pull duplicate data, -but this property will not hold true for a registry cache cluster. +Multiple registry caches can be deployed over the same back-end. A single +registry cache ensures that concurrent requests do not pull duplicate data, +but this property does not hold true for a registry cache cluster. ### Configure the cache To configure a Registry to run as a pull through cache, the addition of a `proxy` section is required to the config file. -In order to access private images on the Docker Hub, a username and password can +To access private images on the Docker Hub, a username and password can be supplied. ```yaml @@ -81,11 +80,11 @@ proxy: ``` > **Warning**: If you specify a username and password, it's very important to -> understand that private resources that this user has access to Docker Hub will -> be made available on your mirror. **You must secure your mirror** by +> understand that private resources that this user has access to Docker Hub is +> made available on your mirror. **You must secure your mirror** by > implementing authentication if you expect these resources to stay private! -> **Warning**: In order for the scheduler to clean up old entries, `delete` must +> **Warning**: For the scheduler to clean up old entries, `delete` must > be enabled in the registry configuration. See > [Registry Configuration](/registry/configuration.md) for more details. @@ -114,7 +113,7 @@ Save the file and reload Docker for the change to take effect. > ``` > > It's telling you that the file doesn't exist yet in the local cache and is -> being pulled from upstream. +> being pulled from upstream. ## Use case: the China registry mirror @@ -130,7 +129,7 @@ $ docker pull registry.docker-cn.com/library/ubuntu You can add `"https://registry.docker-cn.com"` to the `registry-mirrors` array in [`/etc/docker/daemon.json`](/engine/reference/commandline/dockerd.md#daemon-configuration-file) -to pull from the China registry mirror by default. +to pull from the China registry mirror by default. ```json { diff --git a/registry/recipes/nginx.md b/registry/recipes/nginx.md index a8dbce4e707..73370f7e0f5 100644 --- a/registry/recipes/nginx.md +++ b/registry/recipes/nginx.md @@ -38,9 +38,9 @@ you want through the secondary authentication mechanism implemented inside your proxy, it also requires that you move TLS termination from the Registry to the proxy itself. -Furthermore, introducing an extra http layer in your communication pipeline will -make it more complex to deploy, maintain, and debug, and will possibly create -issues. Make sure the extra complexity is required. +Furthermore, introducing an extra http layer in your communication pipeline +makes it more complex to deploy, maintain, and debug. Make sure the extra +complexity is required. For instance, Amazon's Elastic Load Balancer (ELB) in HTTPS mode already sets the following client header: @@ -61,7 +61,7 @@ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto $scheme; ``` -Otherwise Nginx will reset the ELB's values, and the requests will not be routed +Otherwise Nginx resets the ELB's values, and the requests are not routed properly. For more information, see [#970](https://github.com/docker/distribution/issues/970). @@ -75,7 +75,7 @@ Review the [requirements](/registry/recipes/index.md#requirements), then follow mkdir -p auth data ``` -2. Create the main nginx configuration you will use. Paste this code block into a new file called `auth/nginx.conf`: +2. Create the main nginx configuration. Paste this code block into a new file called `auth/nginx.conf`: ```conf events { @@ -91,7 +91,7 @@ Review the [requirements](/registry/recipes/index.md#requirements), then follow ## Set a variable to help us decide if we need to add the ## 'Docker-Distribution-Api-Version' header. ## The registry always sets this header. - ## In the case of nginx performing auth, the header will be unset + ## In the case of nginx performing auth, the header is unset ## since nginx is auth-ing before proxying. map $upstream_http_docker_distribution_api_version $docker_distribution_api_version { '' 'registry/2.0'; @@ -128,7 +128,7 @@ Review the [requirements](/registry/recipes/index.md#requirements), then follow auth_basic "Registry realm"; auth_basic_user_file /etc/nginx/conf.d/nginx.htpasswd; - ## If $docker_distribution_api_version is empty, the header will not be added. + ## If $docker_distribution_api_version is empty, the header is not added. ## See the map directive above where this variable is defined. add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always; @@ -148,7 +148,7 @@ Review the [requirements](/registry/recipes/index.md#requirements), then follow ```bash $ docker run --rm --entrypoint htpasswd registry:2 -Bbn testuser testpassword > auth/nginx.htpasswd ``` - + > **Note**: If you do not want to use `bcrypt`, you can omit the `-B` parameter. 4. Copy your certificate files to the `auth/` directory. diff --git a/registry/storage-drivers/azure.md b/registry/storage-drivers/azure.md index 03b55498e9c..d79d3a4d043 100644 --- a/registry/storage-drivers/azure.md +++ b/registry/storage-drivers/azure.md @@ -12,7 +12,7 @@ An implementation of the `storagedriver.StorageDriver` interface which uses [Mic |:--------------|:---------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `accountname` | yes | Name of the Azure Storage Account. | | `accountkey` | yes | Primary or Secondary Key for the Storage Account. | -| `container` | yes | Name of the Azure root storage container in which all registry data will be stored. Must comply the storage container name [requirements](https://docs.microsoft.com/rest/api/storageservices/fileservices/naming-and-referencing-containers--blobs--and-metadata). | +| `container` | yes | Name of the Azure root storage container in which all registry data is stored. Must comply the storage container name [requirements](https://docs.microsoft.com/rest/api/storageservices/fileservices/naming-and-referencing-containers--blobs--and-metadata). | | `realm` | no | Domain name suffix for the Storage Service API endpoint. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. By default, this is `core.windows.net`. | diff --git a/registry/storage-drivers/gcs.md b/registry/storage-drivers/gcs.md index bc68dab9278..2c74f34c597 100644 --- a/registry/storage-drivers/gcs.md +++ b/registry/storage-drivers/gcs.md @@ -48,7 +48,7 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Goog no - This is a prefix that will be applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary. + This is a prefix that is applied to all Google Cloud Storage keys to allow you to segment data in your bucket if necessary. @@ -72,4 +72,4 @@ An implementation of the `storagedriver.StorageDriver` interface which uses Goog **Note** Instead of a key file you can use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). +`rootdirectory`: (optional) The root directory tree in which all registry files are stored. Defaults to the empty string (bucket root). diff --git a/registry/storage-drivers/index.md b/registry/storage-drivers/index.md index acc0fba61da..e8f612982e0 100644 --- a/registry/storage-drivers/index.md +++ b/registry/storage-drivers/index.md @@ -36,7 +36,7 @@ The preferred method of selecting a storage driver is using the `StorageDriverFa Storage driver factories may be registered by name using the `factory.Register` method, and then later invoked by calling `factory.Create` with a driver name and parameters map. If no such storage driver can be found, -`factory.Create` will return an `InvalidStorageDriverError`. +`factory.Create` returns an `InvalidStorageDriverError`. ## Driver contribution diff --git a/registry/storage-drivers/s3.md b/registry/storage-drivers/s3.md index b6ace9cdae7..7e201079842 100644 --- a/registry/storage-drivers/s3.md +++ b/registry/storage-drivers/s3.md @@ -92,7 +92,7 @@ Amazon S3 or S3 compatible services for object storage. Optional KMS key ID to use for encryption (encrypt must be true, or this - parameter will be ignored). The default is none. + parameter is ignored). The default is none. @@ -139,7 +139,7 @@ Amazon S3 or S3 compatible services for object storage. no - This is a prefix that will be applied to all S3 keys to allow you to segment data in your bucket if necessary. + This is a prefix that is applied to all S3 keys to allow you to segment data in your bucket if necessary. @@ -161,7 +161,7 @@ Amazon S3 or S3 compatible services for object storage. `secretkey`: Your aws secret key. > **Note** You can provide empty strings for your access and secret keys to run the driver -> on an ec2 instance and will handle authentication with the instance's credentials. If you +> on an ec2 instance and handles authentication with the instance's credentials. If you > use [IAM roles](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html), > omit these keys to fetch temporary credentials from IAM. @@ -173,15 +173,15 @@ Amazon S3 or S3 compatible services for object storage. `encrypt`: (optional) Whether you would like your data encrypted on the server side (defaults to false if not specified). -`keyid`: (optional) Whether you would like your data encrypted with this KMS key ID (defaults to none if not specified, will be ignored if encrypt is not true). +`keyid`: (optional) Whether you would like your data encrypted with this KMS key ID (defaults to none if not specified, is ignored if encrypt is not true). -`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. Note that while setting this to false will improve performance, it is not recommended due to security concerns. +`secure`: (optional) Whether you would like to transfer data to the bucket over ssl or not. Defaults to true (meaning transferring over ssl) if not specified. While setting this to false improves performance, it is not recommended due to security concerns. -`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to false if not specified (note that the eu-central-1 region does not work with version 2 signatures, so the driver will error out if initialized with this region and v4auth set to false) +`v4auth`: (optional) Whether you would like to use aws signature version 4 with your requests. This defaults to `false` if not specified. The `eu-central-1` region does not work with version 2 signatures, so the driver errors out if initialized with this region and v4auth set to `false`. -`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections will benefit from larger chunk sizes. +`chunksize`: (optional) The default part size for multipart uploads (performed by WriteStream) to S3. The default is 10 MB. Keep in mind that the minimum part size for S3 is 5MB. Depending on the speed of your connection to S3, a larger chunk size may result in better performance; faster connections benefit from larger chunk sizes. -`rootdirectory`: (optional) The root directory tree in which all registry files will be stored. Defaults to the empty string (bucket root). +`rootdirectory`: (optional) The root directory tree in which all registry files are stored. Defaults to the empty string (bucket root). `storageclass`: (optional) The storage class applied to each registry file. Defaults to STANDARD. Valid options are STANDARD and REDUCED_REDUNDANCY. @@ -223,10 +223,10 @@ See [the S3 policy documentation](http://docs.aws.amazon.com/AmazonS3/latest/dev ## Use Case Adding CloudFront as a middleware for your S3 backed registry can dramatically -improve pull times. Your registry will have the ability to retrieve your images +improve pull times. Your registry can retrieve your images from edge servers, rather than the geographically limited location of your S3 -bucket. The farther your registry is from your bucket, the more improvements you -will see. See [Amazon CloudFront](https://aws.amazon.com/cloudfront/details/). +bucket. The farther your registry is from your bucket, the more improvements are +possible. See [Amazon CloudFront](https://aws.amazon.com/cloudfront/details/). ## Configuring CloudFront for Distribution @@ -257,10 +257,10 @@ Defaults can be kept in most areas except: ## Registry configuration Here the `middleware` option is used. It is still important to keep the -`storage` option as CloudFront will only handle `pull` actions; `push` actions +`storage` option, because CloudFront only handles `pull` actions; `push` actions are still directly written to S3. -The following example shows what you will need at minimum: +The following example shows a minimum configuration: ``` ... @@ -281,5 +281,5 @@ middleware: ## CloudFront Key-Pair A CloudFront key-pair is required for all AWS accounts needing access to your -CloudFront distribution. You must have access to your AWS account's root credentials to create the required Cloudfront keypair. For information, please see [Creating CloudFront Key +CloudFront distribution. You must have access to your AWS account's root credentials to create the required Cloudfront keypair. For information, see [Creating CloudFront Key Pairs](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-trusted-signers.html#private-content-creating-cloudfront-key-pairs). diff --git a/registry/storage-drivers/swift.md b/registry/storage-drivers/swift.md index 44a3f4f79a3..30c1458b520 100644 --- a/registry/storage-drivers/swift.md +++ b/registry/storage-drivers/swift.md @@ -157,7 +157,7 @@ storage. no - This is a prefix that will be applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root. + This is a prefix that is applied to all Swift keys to allow you to segment data in your container if necessary. Defaults to the empty string which is the container's root. @@ -190,7 +190,7 @@ storage. no - Specify the OpenStack Auth's version,for example 3. By default the driver will autodetect the auth's version from the AuthURL. + Specify the OpenStack Auth's version,for example 3. By default the driver autodetects the auth's version from the AuthURL. diff --git a/release-notes/docker-ce.md b/release-notes/docker-ce.md index 3c7a9629326..2e9c1641cd8 100644 --- a/release-notes/docker-ce.md +++ b/release-notes/docker-ce.md @@ -544,12 +544,12 @@ Upgrading from Docker 1.13.1 to 17.03.0 is expected to be simple and low-risk. > **Important**: Docker CE 17.11 is the first Docker release based on [containerd 1.0 beta](https://github.com/containerd/containerd/releases/tag/v1.0.0-beta.2). -Docker CE 17.11 and later won't recognize containers started with +Docker CE 17.11 and later don't recognize containers started with previous Docker versions. If using [Live Restore](https://docs.docker.com/engine/admin/live-restore/#enable-the-live-restore-option), you must stop all containers before upgrading to Docker CE 17.11. If you don't, any containers started by Docker versions that predate -17.11 won't be recognized by Docker after the upgrade and will keep +17.11 aren't recognized by Docker after the upgrade and keep running, un-managed, on the system. {:.important} diff --git a/release-notes/docker-compose.md b/release-notes/docker-compose.md index 37b9bfae8e4..800590a31c3 100644 --- a/release-notes/docker-compose.md +++ b/release-notes/docker-compose.md @@ -25,7 +25,7 @@ toc_max: 2 - Added support for `extra_hosts` in build configuration - Added support for the [long syntax](/compose/compose-file.md#long-syntax-3) for volume entries, as previously introduced in the 3.2 format. - Note that using this syntax will create [mounts](/engine/admin/volumes/bind-mounts.md) instead of volumes. + Using this syntax will create [mounts](/engine/admin/volumes/bind-mounts.md) instead of volumes. #### Compose file version 2.1 and up @@ -322,7 +322,7 @@ toc_max: 2 - Added support for `scale` in service definitions. The configuration's value can be overridden using the `--scale` flag in `docker-compose up`. - Please note that the `scale` command is disabled for this file format + The `scale` command is disabled for this file format #### Compose file version 2.x @@ -419,8 +419,8 @@ toc_max: 2 to separate the `COMPOSE_FILE` environment value using the `COMPOSE_PATH_SEPARATOR` environment variable -- Added support for port range to single port in port mappings - (e.g. `8000-8010:80`) +- Added support for port range to single port in port mappings, such as + `8000-8010:80`. ### Bugfixes @@ -586,7 +586,7 @@ toc_max: 2 New Features - Interactive mode for `docker-compose run` and `docker-compose exec` is - now supported on Windows platforms. Please note that the `docker` binary + now supported on Windows platforms. The `docker` binary is required to be present on the system for this feature to work. - Introduced version 2.1 of the `docker-compose.yml` specification. This @@ -1147,7 +1147,7 @@ https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-sub ### New features -- You can now optionally pass a mode to `volumes_from`, e.g. +- You can now optionally pass a mode to `volumes_from`. For example, `volumes_from: ["servicename:ro"]`. - Since Docker now lets you create volumes with names, you can refer to those @@ -1178,10 +1178,10 @@ https://github.com/docker/compose/blob/8cc8e61/docs/compose-file.md#variable-sub - If you install Compose with pip or use it as a library, it now works with Python 3. -- `image` now supports image digests (in addition to ids and tags), e.g. +- `image` now supports image digests (in addition to ids and tags). For example, `image: "busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d"` -- `ports` now supports ranges of ports, e.g. +- `ports` now supports ranges of ports. For example, ports: - "3000-3005" @@ -1372,7 +1372,7 @@ Besides that, there’s a lot of new stuff in this release: - We’ve made a few small changes to ensure that Compose will work with Swarm, Docker’s new clustering tool (https://github.com/docker/swarm). Eventually you'll be able to point Compose at a Swarm cluster instead of a standalone Docker host and it’ll run your containers on the cluster with no extra work from you. As Swarm is still developing, integration is rough and lots of Compose features don't work yet. -- `docker-compose run` now has a `--service-ports` flag for exposing ports on the given service. This is useful for e.g. running your webapp with an interactive debugger. +- `docker-compose run` now has a `--service-ports` flag for exposing ports on the given service. This is useful for running your webapp with an interactive debugger, for example. - You can now link to containers outside your app with the `external_links` option in docker-compose.yml. @@ -1418,7 +1418,7 @@ The highlights: - There is a new `fig restart` command which restarts a service's containers. - - Fig creates multiple containers in service by appending a number to the service name (e.g. `db_1`, `db_2`, etc). As a convenience, Fig will now give the first container an alias of the service name (e.g. `db`). + - Fig creates multiple containers in service by appending a number to the service name. For example, `db_1`, `db_2`. As a convenience, Fig will now give the first container an alias of the service name. For example, `db`. This link alias is also a valid hostname and added to `/etc/hosts` so you can connect to linked services using their hostname. For example, instead of resolving the environment variables `DB_PORT_5432_TCP_ADDR` and `DB_PORT_5432_TCP_PORT`, you could just use the hostname `db` and port `5432` directly. diff --git a/release-notes/docker-engine.md b/release-notes/docker-engine.md index ec7f24eb416..3ae971c835d 100644 --- a/release-notes/docker-engine.md +++ b/release-notes/docker-engine.md @@ -779,7 +779,7 @@ systemctl restart docker` to reload changes and (re)start the docker daemon. **IMPORTANT**: With Docker 1.12, a Linux docker installation now has two additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for -installing docker, please make sure to update them accordingly. +installing docker, make sure to update them accordingly. ### Builder @@ -969,7 +969,7 @@ installing docker, please make sure to update them accordingly. ## 1.11.0 (2016-04-13) -**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. +**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. ### Builder @@ -1273,7 +1273,7 @@ Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker ### Distribution * Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) - Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. + A migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. Images no longer depend on the parent chain but contain a list of layer references. `docker load`/`docker save` tarballs now also contain content-addressable image configurations. For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration @@ -1320,7 +1320,7 @@ Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker + Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) * Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) Existing plugins need to make use of these new APIs to satisfy users' expectation - For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) + For that, use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) - Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) - Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) - Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) @@ -1470,9 +1470,9 @@ that allows to add build-time environment variables (#15182) + Add `awslogs` logging driver for Amazon CloudWatch (#15495) + Add generic `tag` log option to allow customizing container/image -information passed to driver (e.g. show container names) (#15384) +information passed to driver (#15384) - Implement the `docker logs` endpoint for the journald driver (#13707) -- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) +- DEPRECATE driver-specific log tags (#15384) ### Distribution diff --git a/release-notes/docker-machine.md b/release-notes/docker-machine.md index 1e90176be60..a4b16b0ea7b 100644 --- a/release-notes/docker-machine.md +++ b/release-notes/docker-machine.md @@ -456,15 +456,13 @@ This is a patch release to fix a regression with STDOUT/STDERR behavior (#2587). ## 0.5.3 (2015-12-14) -**Please note**: With this release Machine will be reverting back to distribution in a single binary, which is more efficient on bandwidth and hard disk space. All the core driver plugins are now included in the main binary. You will want to delete the old driver binaries that you might have in your path. - -e.g.: +With this release Machine reverts to distribution in a single binary, which is more efficient on bandwidth and hard disk space. All the core driver plugins are now included in the main binary. Delete the old driver binaries that you might have in your path. ```console $ rm /usr/local/bin/docker-machine-driver-{amazonec2,azure,digitalocean,exoscale,generic,google,hyperv,none,openstack,rackspace,softlayer,virtualbox,vmwarefusion,vmwarevcloudair,vmwarevsphere} ``` -Non-core driver plugins should still work as intended (in externally distributed binaries of the form `docker-machine-driver-name`. Please report any issues you encounter them with externally loaded plugins. +Non-core driver plugins should still work as intended (in externally distributed binaries of the form `docker-machine-driver-name`. Report any issues you encounter them with externally loaded plugins. ### General diff --git a/swarm/configure-tls.md b/swarm/configure-tls.md index d9a35f9acc9..f63a93f1cba 100644 --- a/swarm/configure-tls.md +++ b/swarm/configure-tls.md @@ -6,14 +6,14 @@ keywords: docker, swarm, TLS, discovery, security, certificates title: Configure Docker Swarm for TLS --- -In this procedure you create a two-node Swarm cluster, a Docker Engine CLI, a -Swarm Manager, and a Certificate Authority as shown below. All the Docker Engine +In this procedure you create a two-node swarm cluster, a Docker Engine CLI, a +swarm manager, and a Certificate Authority as shown below. All the Docker Engine hosts (`client`, `swarm`, `node1`, and `node2`) have a copy of the CA's certificate as well as their own key-pair signed by the CA. -![Swarm cluster, Docker client, CA server, Swarm Manager](images/tls-1.jpg) +![Swarm cluster, Docker client, CA server, Swarm manager](images/tls-1.jpg) -You will complete the following steps in this procedure: +This procedure includes the following steps: - [Step 1: Set up the prerequisites](configure-tls.md#step-1-set-up-the-prerequisites) @@ -21,9 +21,9 @@ You will complete the following steps in this procedure: - [Step 3: Create and sign keys](configure-tls.md#step-3-create-and-sign-keys) - [Step 4: Install the keys](configure-tls.md#step-4-install-the-keys) - [Step 5: Configure the Engine daemon for TLS](configure-tls.md#step-5-configure-the-engine-daemon-for-tls) -- [Step 6: Create a Swarm cluster](configure-tls.md#step-6-create-a-swarm-cluster) -- [Step 7: Create the Swarm Manager using TLS](configure-tls.md#step-7-create-the-swarm-manager-using-tls) -- [Step 8: Test the Swarm manager configuration](configure-tls.md#step-8-test-the-swarm-manager-configuration) +- [Step 6: Create a swarm cluster](configure-tls.md#step-6-create-a-swarm-cluster) +- [Step 7: Create the swarm manager using TLS](configure-tls.md#step-7-create-the-swarm-manager-using-tls) +- [Step 8: Test the swarm manager configuration](configure-tls.md#step-8-test-the-swarm-manager-configuration) - [Step 9: Configure the Engine CLI to use TLS](configure-tls.md#step-9-configure-the-engine-cli-to-use-tls) ### Before you begin @@ -44,15 +44,15 @@ or in the public cloud. The following table lists each server name and its purpo | Server name | Description | |-------------|------------------------------------------------| | `ca` | Acts as the Certificate Authority (CA) server. | -| `swarm` | Acts as the Swarm Manager. | -| `node1` | Acts as a Swarm node. | -| `node2` | Acts as a Swarm node. | +| `swarm` | Acts as the swarm manager. | +| `node1` | Acts as a swarm node. | +| `node2` | Acts as a swarm node. | | `client` | Acts as a remote Docker Engine client. | Make sure that you have SSH access to all 5 servers and that they can communicate with each other using DNS name resolution. In particular: -- Open TCP port 2376 between the Swarm Manager and Swarm nodes -- Open TCP port 3376 between the Docker Engine client and the Swarm Manager +- Open TCP port 2376 between the swarm manager and swarm nodes +- Open TCP port 3376 between the Docker Engine client and the swarm manager You can choose different ports if these are already in use. This example assumes you use these ports though. @@ -139,14 +139,14 @@ Certificate: ``` -Later, you'll use this certificate to sign keys for other servers in the +Later, you use this certificate to sign keys for other servers in the infrastructure. ## Step 3: Create and sign keys -Now that you have a working CA, you need to create key pairs for the Swarm -Manager, Swarm nodes, and remote Docker Engine client. The commands and process -to create key pairs is identical for all servers. You'll create the following keys: +Now that you have a working CA, you need to create key pairs for the swarm +manager, swarm nodes, and remote Docker Engine client. The commands and process +to create key pairs is identical for all servers. You create the following keys: | Key | Description | | --- | ----------- | @@ -164,7 +164,7 @@ The commands below show how to create keys for all of your nodes. You perform th $ sudo su ``` -2. Create a private key `swarm-priv-key.pem` for your Swarm Manager +2. Create a private key `swarm-priv-key.pem` for your swarm manager ``` # openssl genrsa -out swarm-priv-key.pem 2048 @@ -181,7 +181,7 @@ The commands below show how to create keys for all of your nodes. You perform th ``` Remember, this is only for demonstration purposes. The process to create a - CSR will be slightly different in real-world production environments. + CSR is slightly different in real-world production environments. 3. Create the certificate `swarm-cert.pem` based on the CSR created in the previous step. @@ -191,7 +191,7 @@ The commands below show how to create keys for all of your nodes. You perform th # openssl rsa -in swarm-priv-key.pem -out swarm-priv-key.pem ``` - You now have a keypair for the Swarm Manager. + You now have a keypair for the swarm manager. 4. Repeat the steps above for the remaining nodes in your infrastructure (`node1`, `node2`, and `client`). @@ -237,7 +237,7 @@ To inspect a public key (cert): openssl x509 -in -noout -text ``` -The following command shows the partial contents of the Swarm Manager's public +The following command shows the partial contents of the swarm manager's public `swarm-cert.pem` key. ``` @@ -266,7 +266,7 @@ infrastructure. Each server needs three files: - It's own public key (cert) The procedure below shows you how to copy these files from the CA server to each -server using `scp`. As part of the copy procedure, you'll rename each file as +server using `scp`. As part of the copy procedure, rename each file as follows on each node: | Original name | Copied name | @@ -281,13 +281,13 @@ follows on each node: $ sudo su ``` -2. Create a` ~/.certs` directory on the Swarm manager. Here we assume user account is ubuntu. +2. Create a` ~/.certs` directory on the swarm manager. Here we assume user account is ubuntu. ``` $ ssh ubuntu@swarm 'mkdir -p /home/ubuntu/.certs' ``` -2. Copy the keys from the CA to the Swarm Manager server. +2. Copy the keys from the CA to the swarm manager server. ``` $ scp ./ca.pem ubuntu@swarm:/home/ubuntu/.certs/ca.pem @@ -323,11 +323,11 @@ follows on each node: ## Step 5: Configure the Engine daemon for TLS In the last step, you created and installed the necessary keys on each of your -Swarm nodes. In this step, you configure them to listen on the network and only -accept connections using TLS. Once you complete this step, your Swarm nodes will +swarm nodes. In this step, you configure them to listen on the network and only +accept connections using TLS. Once you complete this step, your swarm nodes listen on TCP port 2376, and only accept connections using TLS. -On `node1` and `node2` (your Swarm nodes), do the following: +On `node1` and `node2` (your swarm nodes), do the following: 1. Open a terminal on `node1` and elevate to root. @@ -349,20 +349,20 @@ On `node1` and `node2` (your Swarm nodes), do the following: ``` Restart Docker for the changes to take effect. If the file is not valid JSON, - Docker will not start and will emit an error. + Docker fails to start and emits an error. 3. Repeat the procedure on `node2` as well. -## Step 6: Create a Swarm cluster +## Step 6: Create a swarm cluster -Next create a Swarm cluster. In this procedure you create a two-node Swarm +Next create a swarm cluster. In this procedure you create a two-node swarm cluster using the default *hosted discovery* backend. The default hosted discovery backend uses Docker Hub and is not recommended for production use. -1. Logon to the terminal of your Swarm manager node. +1. Logon to the terminal of your swarm manager node. 2. Create the cluster and export it's unique ID to the `TOKEN` environment variable. @@ -389,7 +389,7 @@ discovery backend uses Docker Hub and is not recommended for production use. db3f49d397bad957202e91f0679ff84f526e74d6c5bf1b6734d834f5edcbca6c -## Step 7: Start the Swarm Manager using TLS +## Step 7: Start the swarm manager using TLS 1. Launch a new container with TLS enables @@ -399,23 +399,23 @@ discovery backend uses Docker Hub and is not recommended for production use. and it maps port `3376` on the server to port `3376` inside the container. This mapping ensures that Docker Engine commands sent to the host on port `3376` are passed on to port `3376` inside the container. The - container runs the Swarm `manage` process with the `--tlsverify`, + container runs the swarm `manage` process with the `--tlsverify`, `--tlscacert`, `--tlscert` and `--tlskey` options specified. These options - force TLS verification and specify the location of the Swarm manager's TLS + force TLS verification and specify the location of the swarm manager's TLS keys. -2. Run a `docker ps` command to verify that your Swarm manager container is up +2. Run a `docker ps` command to verify that your swarm manager container is up and running. $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 035dbf57b26e swarm "/swarm manage --tlsv" 7 seconds ago Up 7 seconds 2375/tcp, 0.0.0.0:3376->3376/tcp compassionate_lovelace -Your Swarm cluster is now configured to use TLS. +Your swarm cluster is now configured to use TLS. -## Step 8: Test the Swarm manager configuration +## Step 8: Test the swarm manager configuration -Now that you have a Swarm cluster built and configured to use TLS, you'll test that it works with a Docker Engine CLI. +Now that you have a swarm cluster built and configured to use TLS, test that it works with a Docker Engine CLI. 1. Open a terminal onto your `client` server. @@ -441,11 +441,11 @@ Now that you have a Swarm cluster built and configured to use TLS, you'll test t OS/Arch: linux/amd64 The output above shows the `Server` version as "swarm/1.0.1". This means - that the command was successfully issued against the Swarm manager. + that the command was successfully issued against the swarm manager. 2. Verify that the same command does not work without TLS. - This time, do not pass your certs to the Swarm manager. + This time, do not pass your certs to the swarm manager. ``` $ sudo docker -H swarm:3376 version @@ -461,18 +461,20 @@ Now that you have a Swarm cluster built and configured to use TLS, you'll test t ``` The output above shows that the command was rejected by the server. This is - because the server (Swarm manager) is configured to only accept connections + because the server (swarm manager) is configured to only accept connections from authenticated clients using TLS. ## Step 9: Configure the Engine CLI to use TLS -You can configure the Engine so that you don't have to pass the TLS options when -you issue a command. To do this, you'll configure the `Docker Engine host` and +You can configure the Engine so that you don't need to pass the TLS options when +you issue a command. To do this, configure the `Docker Engine host` and `TLS` settings as defaults on your Docker Engine client. -To do this, you place the client's keys in your `~/.docker` configuration folder. If you have other users on your system using the Engine command line, you'll need to configure their account's `~/.docker` as well. The procedure below shows how to do this for the `ubuntu` user on -your Docker Engine client. +To do this, you place the client's keys in your `~/.docker` configuration +folder. If you have other users on your system using the Engine command line, +configure their account's `~/.docker` as well. The procedure below shows how to +do this for the `ubuntu` user on your Docker Engine client. 1. Open a terminal onto your `client` server. @@ -537,9 +539,9 @@ your Docker Engine client. ``` The server portion of the output above command shows that your Docker - client is issuing commands to the Swarm Manager and using TLS. + client is issuing commands to the swarm manager and using TLS. -Congratulations! You have configured a Docker Swarm cluster to use TLS. +Congratulations! You have configured a Docker swarm cluster to use TLS. ## Related Information diff --git a/swarm/discovery.md b/swarm/discovery.md index 824900fa59e..856d174e2b0 100644 --- a/swarm/discovery.md +++ b/swarm/discovery.md @@ -7,7 +7,7 @@ title: Docker Swarm discovery --- Docker Swarm comes with multiple discovery backends. You use a hosted discovery service with Docker Swarm. The service maintains a list of IPs in your cluster. -This page describes the different types of hosted discovery available to you. These are: +This page describes the different types of hosted discovery. These are: ## Using a distributed key/value store @@ -24,7 +24,7 @@ For details about libkv and a detailed technical overview of the supported backe 1. On each node, start the Swarm agent. - The node IP address doesn't have to be public as long as the Swarm manager can access it. In a large cluster, the nodes joining swarm may trigger request spikes to discovery. For example, a large number of nodes are added by a script, or recovered from a network partition. This may result in discovery failure. You can use `--delay` option to specify a delay limit. Swarm join will add a random delay less than this limit to reduce pressure to discovery. + The node IP address doesn't need to be public as long as the Swarm manager can access it. In a large cluster, the nodes joining swarm may trigger request spikes to discovery. For example, a large number of nodes are added by a script, or recovered from a network partition. This may result in discovery failure. You can use `--delay` option to specify a delay limit. The `swarm join` command adds a random delay less than this limit to reduce pressure to discovery. **Etcd**: @@ -38,7 +38,7 @@ For details about libkv and a detailed technical overview of the supported backe swarm join --advertise= zk://,/ -2. Start the Swarm manager on any machine or your laptop. +2. Start the swarm manager on any machine or your laptop. **Etcd**: @@ -92,15 +92,15 @@ swarm join \ consul:/// ``` -This works the same way for the Swarm `manage` and `list` commands. +This works the same way for the swarm `manage` and `list` commands. ## A static file or list of nodes -> **Note**: This discovery method is incompatible with replicating Swarm +> **Note**: This discovery method is incompatible with replicating swarm managers. If you require replication, you should use a hosted discovery key store. -You can use a static file or list of nodes for your discovery backend. The file must be stored on a host that is accessible from the Swarm manager. You can also pass a node list as an option when you start Swarm. +You can use a static file or list of nodes for your discovery backend. The file must be stored on a host that is accessible from the swarm manager. You can also pass a node list as an option when you start Swarm. Both the static file and the `nodes` option support an IP address range. To specify a range supply a pattern, for example, `10.0.0.[10:200]` refers to nodes starting from `10.0.0.10` to `10.0.0.200`. For example for the `file` discovery method. @@ -122,7 +122,7 @@ Or with node discovery: This example creates a file named `/tmp/my_cluster`. You can use any name you like. -2. Start the Swarm manager on any machine. +2. Start the swarm manager on any machine. swarm manage -H tcp:// file:///tmp/my_cluster @@ -183,11 +183,11 @@ swarm is connected to the public internet. To create your cluster: 2. Create each node and join them to the cluster. - On each of your nodes, start the swarm agent. The node IP address doesn't have to be public (eg. 192.168.0.X) but the Swarm manager must be able to access it. + On each of your nodes, start the swarm agent. The node IP address doesn't need to be public (eg. 192.168.0.X) but the swarm manager must be able to access it. $ swarm join --advertise= token:// -3. Start the Swarm manager. +3. Start the swarm manager. This can be on any machine or even your laptop. diff --git a/swarm/install-manual.md b/swarm/install-manual.md index ef406ec7afd..c34e67b4911 100644 --- a/swarm/install-manual.md +++ b/swarm/install-manual.md @@ -6,21 +6,21 @@ keywords: docker, swarm, clustering, examples, Amazon, AWS EC2 title: Build a Swarm cluster for production --- -This page teaches you to deploy a high-availability Docker Swarm cluster. +This page teaches you to deploy a high-availability swarm cluster. Although the example installation uses the Amazon Web Services (AWS) platform, -you can deploy an equivalent Docker Swarm cluster on many other platforms. In this example, you do the following: +you can deploy an equivalent swarm on many other platforms. In this example, you do the following: - [Verify you have the prerequisites](install-manual.md#prerequisites) - [Establish basic network security](install-manual.md#step-1-add-network-security-rules) - [Create your nodes](install-manual.md#step-2-create-your-instances) - [Install Engine on each node](install-manual.md#step-3-install-engine-on-each-node) - [Configure a discovery backend](install-manual.md#step-4-set-up-a-discovery-backend) -- [Create Swarm cluster](install-manual.md#step-5-create-swarm-cluster) -- [Communicate with the Swarm](install-manual.md#step-6-communicate-with-the-swarm) -- [Test the high-availability Swarm managers](install-manual.md#step-7-test-swarm-failover) +- [Create a swarm cluster](install-manual.md#step-5-create-swarm-cluster) +- [Communicate with the swarm](install-manual.md#step-6-communicate-with-the-swarm) +- [Test the high-availability swarm managers](install-manual.md#step-7-test-swarm-failover) - [Additional Resources](install-manual.md#additional-resources) -For a gentler introduction to Swarm, try the [Evaluate Swarm in a sandbox](install-w-machine.md) page. +For a quickstart for Docker Swarm, try the [Evaluate Swarm in a sandbox](install-w-machine.md) page. ## Prerequisites @@ -143,24 +143,23 @@ available on this host, it may be because the user doesn't have root privileges. If so, use `sudo` or give the user root privileges. * For this example, don't create an AMI image from one of your instances running -Docker Engine and then re-use it to create the other instances. Doing so will -produce errors. +Docker Engine and then re-use it to create the other instances. Doing so +produces errors. -* If your host cannot reach Docker Hub, the `docker run` commands that pull -container images may fail. In that case, check that your VPC is associated with -a security group with a rule that allows inbound traffic (e.g., -HTTP/TCP/80/0.0.0.0/0). Also Check the [Docker Hub status +* If your host cannot reach Docker Hub, `docker run` commands that pull +images fail. In that case, check that your VPC is associated with +a security group with a rule that allows inbound traffic. Also check the [Docker Hub status page](http://status.docker.com/) for service availability. ## Step 4. Set up a discovery backend -Here, you're going to create a minimalist discovery backend. The Swarm managers +Here, you're going to create a minimalist discovery backend. The swarm managers and nodes use this backend to authenticate themselves as members of the cluster. -The Swarm managers also use this information to identify which nodes are +The swarm managers also use this information to identify which nodes are available to run containers. To keep things simple, you are going to run a single consul daemon on the same -host as one of the Swarm managers. +host as one of the swarm managers. 1. Use SSH to connect to the `consul0` instance. @@ -183,15 +182,15 @@ using a trio of consul nodes using the link mentioned at the end of this page. (Before creating a cluster of consul nodes, update the VPC security group with rules to allow inbound traffic on the required port numbers.) -## Step 5. Create Swarm cluster +## Step 5. Create swarm cluster -After creating the discovery backend, you can create the Swarm managers. In this step, you are going to create two Swarm managers in a high-availability configuration. The first manager you run becomes the Swarm's *primary manager*. Some documentation still refers to a primary manager as a "master", but that term has been superseded. The second manager you run serves as a *replica*. If the primary manager becomes unavailable, the cluster elects the replica as the primary manager. +After creating the discovery backend, you can create the swarm managers. In this step, you are going to create two swarm managers in a high-availability configuration. The first manager you run becomes the swarm's *primary manager*. Some documentation still refers to a primary manager as a "master", but that term has been superseded. The second manager you run serves as a *replica*. If the primary manager becomes unavailable, the cluster elects the replica as the primary manager. 1. Use SSH to connect to the `manager0` instance and use `ifconfig` to get its IP address. $ ifconfig -2. To create the primary manager in a high-availability Swarm cluster, use the following syntax: +2. To create the primary manager in a high-availability swarm cluster, use the following syntax: $ docker run -d -p 4000:4000 swarm manage -H :4000 --replication --advertise :4000 consul://:8500 @@ -201,26 +200,26 @@ After creating the discovery backend, you can create the Swarm managers. In this 3. Enter `docker ps`. - From the output, verify that a Swarm cluster container is running. + From the output, verify that a swarm cluster container is running. Then, disconnect from the `manager0` instance. 4. Connect to the `manager1` node and use `ifconfig` to get its IP address. $ ifconfig -5. Start the secondary Swarm manager using following command. +5. Start the secondary swarm manager using following command. Replacing `` with the IP address from the previous command, for example: $ docker run -d -p 4000:4000 swarm manage -H :4000 --replication --advertise :4000 consul://172.30.0.161:8500 -6. Enter `docker ps`to verify that a Swarm container is running. Then disconnect from the `manager1` instance. +6. Enter `docker ps`to verify that a swarm container is running. Then disconnect from the `manager1` instance. 7. Connect to `node0` and `node1` in turn and join them to the cluster. a. Get the node IP addresses with the `ifconfig` command. - b. Start a Swarm container each using the following syntax: + b. Start a swarm container each using the following syntax: docker run -d swarm join --advertise=:2375 consul://:8500 @@ -228,16 +227,16 @@ After creating the discovery backend, you can create the Swarm managers. In this $ docker run -d swarm join --advertise=172.30.0.69:2375 consul://172.30.0.161:8500 - c. Enter `docker ps` to verify that the Swarm cluster container started from the previous command is running. + c. Enter `docker ps` to verify that the swarm cluster container started from the previous command is running. -Your small Swarm cluster is up and running on multiple hosts, providing you with a high-availability virtual Docker Engine. To increase its reliability and capacity, you can add more Swarm managers, nodes, and a high-availability discovery backend. +Your small swarm cluster is up and running on multiple hosts, providing you with a high-availability virtual Docker Engine. To increase its reliability and capacity, you can add more swarm managers, nodes, and a high-availability discovery backend. -## Step 6. Communicate with the Swarm +## Step 6. Communicate with the swarm -You can communicate with the Swarm to get information about the managers and +You can communicate with the swarm to get information about the managers and nodes using the Swarm API, which is nearly the same as the standard Docker API. In this example, you use SSL to connect to `manager0` and `consul0` host again. -Then, you address commands to the Swarm manager. +Then, you address commands to the swarm manager. 1. Get information about the manager and nodes in the cluster: @@ -246,11 +245,11 @@ Then, you address commands to the Swarm manager. The output gives the manager's role as primary (`Role: primary`) and information about each of the nodes. -2. Run an application on the Swarm: +2. Run an application on the swarm: $ docker -H :4000 run hello-world -3. Check which Swarm node ran the application: +3. Check which swarm node ran the application: $ docker -H :4000 ps @@ -271,7 +270,7 @@ replica. docker rm -f -4. Start the Swarm manager. For example: +4. Start the swarm manager. For example: $ docker run -d -p 4000:4000 swarm manage -H :4000 --replication --advertise 172.30.0.161:4000 consul://172.30.0.161:8500 @@ -279,7 +278,7 @@ replica. $ sudo docker logs - The output shows will show two entries like these ones: + The output shows two entries like these ones: time="2016-02-02T02:12:32Z" level=info msg="Leader Election: Cluster leadership lost" time="2016-02-02T02:12:32Z" level=info msg="New leader elected: 172.30.0.160:4000" @@ -289,7 +288,7 @@ replica. $ docker -H :4000 info You can connect to the `manager1` node and run the `info` and `logs` commands. -They will display corresponding entries for the change in leadership. +They display corresponding entries for the change in leadership. ## Additional Resources diff --git a/swarm/install-w-machine.md b/swarm/install-w-machine.md index bb0625435f8..b46ef97b139 100644 --- a/swarm/install-w-machine.md +++ b/swarm/install-w-machine.md @@ -95,7 +95,7 @@ Daemon running on each node. Other discovery service backends such as 5. Save the token in a safe place. - You'll use this token in the next step to create a Docker Swarm. + You use this token in the next step to create a Docker Swarm. ## Launch the Swarm manager diff --git a/swarm/multi-manager-setup.md b/swarm/multi-manager-setup.md index aacb648df24..015450d29ad 100644 --- a/swarm/multi-manager-setup.md +++ b/swarm/multi-manager-setup.md @@ -6,11 +6,11 @@ keywords: docker, swarm, clustering title: High availability in Docker Swarm --- -In Docker Swarm, the **Swarm manager** is responsible for the entire cluster and manages the resources of multiple *Docker hosts* at scale. If the Swarm manager dies, you must create a new one and deal with an interruption of service. +In Docker Swarm, the **swarm manager** is responsible for the entire cluster and manages the resources of multiple *Docker hosts* at scale. If the swarm manager dies, you must create a new one and deal with an interruption of service. -The *High Availability* feature allows a Docker Swarm to gracefully handle the failover of a manager instance. Using this feature, you can create a single **primary manager** instance and multiple **replica** instances. +The *High Availability* feature allows a swarm to gracefully handle the failover of a manager instance. Using this feature, you can create a single **primary manager** instance and multiple **replica** instances. -A primary manager is the main point of contact with the Docker Swarm cluster. You can also create and talk to replica instances that will act as backups. Requests issued on a replica are automatically proxied to the primary manager. If the primary manager fails, a replica takes away the lead. In this way, you always keep a point of contact with the cluster. +A primary manager is the main point of contact with the swarm cluster. You can also create and talk to replica instances that act as backups. Requests issued on a replica are automatically proxied to the primary manager. If the primary manager fails, a replica takes away the lead. In this way, you always keep a point of contact with the cluster. ## Setup primary and replicas @@ -18,7 +18,7 @@ This section explains how to set up Docker Swarm using multiple **managers**. ### Assumptions -You need either a `Consul`, `etcd`, or `Zookeeper` cluster. This procedure is written assuming a `Consul` server running on address `192.168.42.10:8500`. All hosts will have a Docker Engine configured to listen on port 2375. We will be configuring the Managers to operate on port 4000. The sample Swarm configuration has three machines: +You need either a `Consul`, `etcd`, or `Zookeeper` cluster. This procedure is written assuming a `Consul` server running on address `192.168.42.10:8500`. All hosts have a Docker Engine configured to listen on port 2375. The Managers operate on port 4000. The sample swarm configuration has three machines: - `manager-1` on `192.168.42.200` - `manager-2` on `192.168.42.201` @@ -60,7 +60,7 @@ Create an additional, third *manager* instance: INFO[0000] New leader elected: 192.168.42.200:4000 [...] -Once you have established your primary manager and the replicas, create **Swarm agents** as you normally would. +Once you have established your primary manager and the replicas, create **swarm agents** as you normally would. ### List machines in the cluster @@ -155,6 +155,6 @@ To switch the `DOCKER_HOST` to use `manager-2` as the primary, you do the follow Filters: affinity, health, constraint, port, dependency Nodes: 3 -You can use the `docker` command on any Docker Swarm primary manager or any replica. +You can use the `docker` command on any swarm manager or any replica. -If you like, you can use custom mechanisms to always point `DOCKER_HOST` to the current primary manager. Then, you never lose contact with your Docker Swarm in the event of a failover. +If you like, you can use custom mechanisms to always point `DOCKER_HOST` to the current primary manager. Then, you never lose contact with your swarm in the event of a failover. diff --git a/swarm/overview.md b/swarm/overview.md index 3d71db0de04..bc5f21522b7 100644 --- a/swarm/overview.md +++ b/swarm/overview.md @@ -20,15 +20,15 @@ are not limited to, the following: And of course, the Docker client itself is also supported. Like other Docker projects, Docker Swarm follows the "swap, plug, and play" -principle. As initial development settles, an API will develop to enable +principle. As initial development settles, an API develops to enable pluggable backends. This means you can swap out the scheduling backend Docker Swarm uses out-of-the-box with a backend you prefer. Swarm's swappable design provides a smooth out-of-box experience for most use cases, and allows large-scale production deployments to swap for more powerful backends, like Mesos. ## Understand Swarm cluster creation -The first step to creating a Swarm cluster on your network is to pull the Docker Swarm image. Then, using Docker, you configure the Swarm manager and all the nodes to run Docker Swarm. This method requires that you: +The first step to creating a swarm cluster on your network is to pull the Docker Swarm image. Then, using Docker, you configure the swarm manager and all the nodes to run Docker Swarm. This method requires that you: -* open a TCP port on each node for communication with the Swarm manager +* open a TCP port on each node for communication with the swarm manager * install Docker on each node * create and manage TLS certificates to secure your cluster @@ -44,7 +44,7 @@ cluster. Using Docker Machine is the best method for users getting started with Swarm for the first time. To try the recommended method of getting started, see [Get Started with Docker Swarm](install-w-machine.md). -If you are interested manually installing or interested in contributing, see [Build a Swarm cluster for production](install-manual.md). +If you are interested manually installing or interested in contributing, see [Build a swarm cluster for production](install-manual.md). ## Discovery services @@ -68,10 +68,10 @@ Docker Swarm is still in its infancy and under active development. If you need help, would like to contribute, or simply want to talk about the project with like-minded individuals, we have a number of open channels for communication. -* To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/swarm/issues). +* To report bugs or file feature requests, use the [issue tracker on Github](https://github.com/docker/swarm/issues). -* To talk about the project with people in real time: please join the `#docker-swarm` channel on IRC. +* To talk about the project with people in real time, join the `#docker-swarm` channel on IRC. -* To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/swarm/pulls). +* To contribute code or documentation changes, submit a [pull request on Github](https://github.com/docker/swarm/pulls). -For more information and resources, please visit the [Getting Help project page](/opensource/get-help/). +For more information and resources, visit the [Getting Help project page](/opensource/get-help/). diff --git a/swarm/plan-for-production.md b/swarm/plan-for-production.md index 494dc2c714a..7d01a519289 100644 --- a/swarm/plan-for-production.md +++ b/swarm/plan-for-production.md @@ -7,7 +7,7 @@ title: Plan for Swarm in production --- This article provides guidance to help you plan, deploy, and manage Docker -Swarm clusters in business critical production environments. The following high +swarm clusters in business critical production environments. The following high level topics are covered: - [Security](plan-for-production.md#security) @@ -28,16 +28,16 @@ control, technologies such as SELinux and AppArmor, strict auditing, and more. ### Configure Swarm for TLS -All nodes in a Swarm cluster must bind their Docker Engine daemons to a network +All nodes in a swarm cluster must bind their Docker Engine daemons to a network port. This brings with it all of the usual network related security implications such as man-in-the-middle attacks. These risks are compounded when the network in question is untrusted such as the internet. To mitigate these risks, Swarm and the Engine support Transport Layer Security(TLS) for authentication. -The Engine daemons, including the Swarm manager, that are configured to use TLS -will only accept commands from Docker Engine clients that sign their -communications. The Engine and Swarm support external 3rd party Certificate +The Engine daemons, including the swarm manager, that are configured to use TLS +only accepts commands from Docker Engine clients that sign their +communications. Engine and Swarm support external 3rd party Certificate Authorities (CA) as well as internal corporate CAs. The default Engine and Swarm ports for TLS are: @@ -57,7 +57,7 @@ configure your firewalls and other network access control lists. - **Swarm manager.** - **Inbound 80/tcp (HTTP)**. This allows `docker pull` commands to work. If you plan to pull images from Docker Hub, you must allow Internet connections through port 80. - **Inbound 2375/tcp**. This allows Docker Engine CLI commands direct to the Engine daemon. - - **Inbound 3375/tcp**. This allows Engine CLI commands to the Swarm manager. + - **Inbound 3375/tcp**. This allows Engine CLI commands to the swarm manager. - **Inbound 22/tcp**. This allows remote management via SSH - **Service Discovery**: - **Inbound 80/tcp (HTTP)**. This allows `docker pull` commands to work. If you plan to pull images from Docker Hub, you must allow Internet connections through port 80. @@ -76,25 +76,25 @@ configure your firewalls and other network access control lists. If your firewalls and other network devices are connection state aware, they -will allow responses to established TCP connections. If your devices are not -state aware, you will need to open up ephemeral ports from 32768-65535. For +allow responses to established TCP connections. If your devices are not +state aware, you need to open up ephemeral ports from 32768-65535. For added security you can configure the ephemeral port rules to only allow -connections from interfaces on known Swarm devices. +connections from interfaces on known swarm devices. -If your Swarm cluster is configured for TLS, replace `2375` with `2376`, and +If your swarm cluster is configured for TLS, replace `2375` with `2376`, and `3375` with `3376`. -The ports listed above are just for Swarm cluster operations such as; cluster +The ports listed above are just for swarm cluster operations such as; cluster creation, cluster management, and scheduling of containers against the cluster. You may need to open additional network ports for application-related communications. -It is possible for different components of a Swarm cluster to exist on separate +It is possible for different components of a swarm cluster to exist on separate networks. For example, many organizations operate separate management and production networks. Some Docker Engine clients may exist on a management -network, while Swarm managers, discovery service instances, and nodes might +network, while swarm managers, discovery service instances, and nodes might exist on one or more production networks. To offset against network failures, -you can deploy Swarm managers, discovery services, and nodes across multiple +you can deploy swarm managers, discovery services, and nodes across multiple production networks. In all of these cases you can use the list of ports above to assist the work of your network infrastructure teams to efficiently and securely configure your network. @@ -107,51 +107,51 @@ availability, an environment must survive failures of its individual component parts. The following sections discuss some technologies and best practices that can -enable you to build resilient, highly-available Swarm clusters. You can then use +enable you to build resilient, highly-available swarm clusters. You can then use these cluster to run your most demanding production applications and workloads. ### Swarm manager HA -The Swarm manager is responsible for accepting all commands coming in to a Swarm -cluster, and scheduling resources against the cluster. If the Swarm manager -becomes unavailable, some cluster operations cannot be performed until the Swarm +The swarm manager is responsible for accepting all commands coming in to a swarm +cluster, and scheduling resources against the cluster. If the swarm manager +becomes unavailable, some cluster operations cannot be performed until the swarm manager becomes available again. This is unacceptable in large-scale business critical scenarios. -Swarm provides HA features to mitigate against possible failures of the Swarm -manager. You can use Swarm's HA feature to configure multiple Swarm managers for -a single cluster. These Swarm managers operate in an active/passive formation -with a single Swarm manager being the *primary*, and all others being +Swarm provides HA features to mitigate against possible failures of the swarm +manager. You can use Swarm's HA feature to configure multiple swarm managers for +a single cluster. These swarm managers operate in an active/passive formation +with a single swarm manager being the *primary*, and all others being *secondaries*. Swarm secondary managers operate as *warm standby's*, meaning they run in the -background of the primary Swarm manager. The secondary Swarm managers are online -and accept commands issued to the cluster, just as the primary Swarm manager. +background of the primary swarm manager. The secondary swarm managers are online +and accept commands issued to the cluster, just as the primary swarm manager. However, any commands received by the secondaries are forwarded to the primary -where they are executed. Should the primary Swarm manager fail, a new primary is +where they are executed. Should the primary swarm manager fail, a new primary is elected from the surviving secondaries. -When creating HA Swarm managers, you should take care to distribute them over as +When creating HA swarm managers, you should take care to distribute them over as many *failure domains* as possible. A failure domain is a network section that can be negatively affected if a critical device or service experiences problems. For example, if your cluster is running in the Ireland Region of Amazon Web -Services (eu-west-1) and you configure three Swarm managers (1 x primary, 2 x +Services (eu-west-1) and you configure three swarm managers (1 x primary, 2 x secondary), you should place one in each availability zone as shown below. ![Swarm managers spread across availability zones](http://farm2.staticflickr.com/1657/24581727611_0a076b79de_b.jpg) -In this configuration, the Swarm cluster can survive the loss of any two +In this configuration, the swarm cluster can survive the loss of any two availability zones. For your applications to survive such failures, they must be architected across as many failure domains as well. -For Swarm clusters serving high-demand, line-of-business applications, you -should have 3 or more Swarm managers. This configuration allows you to take one +For swarm clusters serving high-demand, line-of-business applications, you +should have 3 or more swarm managers. This configuration allows you to take one manager down for maintenance, suffer an unexpected failure, and still continue to manage and operate the cluster. ### Discovery service HA -The discovery service is a key component of a Swarm cluster. If the discovery +The discovery service is a key component of a swarm cluster. If the discovery service becomes unavailable, this can prevent certain cluster operations. For example, without a working discovery service, operations such as adding new nodes to the cluster and making queries against the cluster configuration fail. @@ -168,36 +168,36 @@ Consul, etcd, and Zookeeper are all suitable for production, and should be configured for high availability. You should use each service's existing tools and best practices to configure these for HA. -For Swarm clusters serving high-demand, line-of-business applications, it is +For swarm clusters serving high-demand, line-of-business applications, it is recommended to have 5 or more discovery service instances. This due to the replication/HA technologies they use (such as Paxos/Raft) requiring a strong quorum. Having 5 instances allows you to take one down for maintenance, suffer -an unexpected failure, and still be able to achieve a strong quorum. +an unexpected failure, and still maintain a strong quorum. -When creating a highly available Swarm discovery service, you should take care +When creating a highly available swarm discovery service, you should take care to distribute each discovery service instance over as many failure domains as possible. For example, if your cluster is running in the Ireland Region of Amazon Web Services (eu-west-1) and you configure three discovery service instances, you should place one in each availability zone. -The diagram below shows a Swarm cluster configured for HA. It has three Swarm +The diagram below shows a swarm cluster configured for HA. It has three swarm managers and three discovery service instances spread over three failure -domains (availability zones). It also has Swarm nodes balanced across all three +domains (availability zones). It also has swarm nodes balanced across all three failure domains. The loss of two availability zones in the configuration shown - below does not cause the Swarm cluster to go down. + below does not cause the swarm cluster to go down. ![Swarm cluster configured for HA](http://farm2.staticflickr.com/1675/24380252320_999687d2bb_b.jpg) It is possible to share the same Consul, etcd, or Zookeeper containers between -the Swarm discovery and Engine container networks. However, for best +the swarm discovery and Engine container networks. However, for best performance and availability you should deploy dedicated instances – a discovery instance for Swarm and another for your container networks. ### Multiple clouds -You can architect and build Swarm clusters that stretch across multiple cloud +You can architect and build swarm clusters that stretch across multiple cloud providers, and even across public cloud and on premises infrastructures. The -diagram below shows an example Swarm cluster stretched across AWS and Azure. +diagram below shows an example swarm cluster stretched across AWS and Azure. ![Swarm cluster stretched across AWS and Azure](http://farm2.staticflickr.com/1493/24676269945_d19daf856c_b.jpg) @@ -214,7 +214,7 @@ like this, make sure you have good test coverage over your entire system. ### Isolated production environments It is possible to run multiple environments, such as development, staging, and -production, on a single Swarm cluster. You accomplish this by tagging Swarm +production, on a single swarm cluster. You accomplish this by tagging swarm nodes and using constraints to filter containers onto nodes tagged as `production` or `staging` etc. However, this is not recommended. The recommended approach is to air-gap production environments, especially high performance @@ -225,7 +225,7 @@ for production – such as networks, storage, compute and other systems. They also deploy separate management systems and policies. This results in things like users having separate accounts for logging on to production systems etc. In these types of environments, it is mandatory to deploy dedicated -production Swarm clusters that operate on the production hardware infrastructure +production swarm clusters that operate on the production hardware infrastructure and follow thorough production management, monitoring, audit and other policies. ### Operating system selection @@ -242,7 +242,7 @@ CentOS Linux can be downloaded and used for free, but commercial support options are few and far between. Whereas RHEL has an associated support and license cost, but comes with world class commercial support from Red Hat. -When choosing the production operating system to use with your Swarm clusters, +When choosing the production operating system to use with your swarm clusters, choose one that closely matches what you have used in development and staging environments. Although containers abstract much of the underlying OS, some features have configuration requirements. For example, to use Docker container @@ -257,16 +257,16 @@ patching your production operating systems. Performance is critical in environments that support business critical line of business applications. The following sections discuss some technologies and -best practices that can help you build high performance Swarm clusters. +best practices that can help you build high performance swarm clusters. ### Container networks Docker Engine container networks are overlay networks and can be created across multiple Engine hosts. For this reason, a container network requires a key-value (KV) store to maintain network configuration and state. This KV store can be -shared in common with the one used by the Swarm cluster discovery service. +shared in common with the one used by the swarm cluster discovery service. However, for best performance and fault isolation, you should deploy individual -KV store instances for container networks and Swarm discovery. This is +KV store instances for container networks and swarm discovery. This is especially so in demanding business critical production environments. Beginning with Docker Engine 1.9, Docker container networks require specific @@ -300,7 +300,7 @@ high performance clusters, as it spreads container workload across all resources in the cluster. These resources include CPU, RAM, storage, and network bandwidth. -If your Swarm nodes are balanced across multiple failure domains, the spread +If your swarm nodes are balanced across multiple failure domains, the spread strategy evenly balance containers across those failure domains. However, spread on its own is not aware of the roles of any of those containers, so has no intelligence to spread multiple instances of the same service across failure @@ -310,7 +310,7 @@ The **binpack** strategy runs as many containers as possible on a node, effectively filling it up, before scheduling containers on the next node. This means that binpack does not use all cluster resources until the cluster -fills up. As a result, applications running on Swarm clusters that operate the +fills up. As a result, applications running on swarm clusters that operate the binpack strategy might not perform as well as those that operate the spread strategy. However, binpack is a good choice for minimizing infrastructure requirements and cost. For example, imagine you have a 10-node cluster where @@ -324,21 +324,21 @@ potentially allowing you turn off the additional nodes and save on cost. The question of ownership is vital in production environments. It is therefore vital that you consider and agree on all of the following when planning, -documenting, and deploying your production Swarm clusters. +documenting, and deploying your production swarm clusters. -- Whose budget does the production Swarm infrastructure come out of? -- Who owns the accounts that can administer and manage the production Swarm +- Whose budget does the production swarm infrastructure come out of? +- Who owns the accounts that can administer and manage the production swarm cluster? -- Who is responsible for monitoring the production Swarm infrastructure? -- Who is responsible for patching and upgrading the production Swarm +- Who is responsible for monitoring the production swarm infrastructure? +- Who is responsible for patching and upgrading the production swarm infrastructure? - On-call responsibilities and escalation procedures? -The above is not a complete list, and the answers to the questions will vary +The above is not a complete list, and the answers to the questions vary depending on how your organization's and team's are structured. Some companies are along way down the DevOps route, while others are not. Whatever situation your company is in, it is important that you factor all of the above into the -planning, deployment, and ongoing management of your production Swarm clusters. +planning, deployment, and ongoing management of your production swarm clusters. ## Related information diff --git a/swarm/provision-with-machine.md b/swarm/provision-with-machine.md index 0bb6b348d70..085479069b4 100644 --- a/swarm/provision-with-machine.md +++ b/swarm/provision-with-machine.md @@ -34,8 +34,8 @@ instructions for [macOS](/docker-for-mac/) or Machine supports installing on AWS, Digital Ocean, Google Cloud Platform, IBM Softlayer, Microsoft Azure and Hyper-V, OpenStack, Rackspace, VirtualBox, VMware -Fusion®, vCloud® AirTM and vSphere®. In this example, -you'll use VirtualBox to run several VMs based on the `boot2docker.iso` image. +Fusion®, vCloud® AirTM and vSphere®. This example uses +VirtualBox to run several VMs based on the `boot2docker.iso` image. This image is a small-footprint Linux distribution for running Engine. The Toolbox installation gives you VirtualBox and the `boot2docker.iso` image @@ -86,15 +86,15 @@ Status: Downloaded newer image for swarm ``` The output of the `swarm create` command is a cluster token. Copy the token to a -safe place you will remember. Once you have the token, you can provision the -Swarm nodes and join them to the cluster_id. The rest of this documentation, +safe place. Once you have the token, you can provision the +swarm nodes and join them to the cluster_id. The rest of this documentation, refers to this token as the `SWARM_CLUSTER_TOKEN`. -## Provision Swarm nodes +## Provision swarm nodes -All Swarm nodes in a cluster must have Engine installed. With Machine and the +All swarm nodes in a cluster must have Engine installed. With Machine and the `SWARM_CLUSTER_TOKEN` you can provision a host with Engine and configure it as a -Swarm node with one Machine command. To create a Swarm manager node on a new VM +swarm node with one Machine command. To create a swarm manager node on a new VM called `swarm-manager`, you do the following: ``` @@ -136,7 +136,7 @@ eval "$(docker-machine env local)" ``` Docker Machine provides a special `--swarm` flag with its `env` command to -connect to Swarm nodes. +connect to swarm nodes. ```bash docker-machine env --swarm HOST_NODE_NAME @@ -148,7 +148,7 @@ export DOCKER_MACHINE_NAME="swarm-manager" # eval $(docker-machine env --swarm HOST_NODE_NAME) ``` -To set your SHELL connect to a Swarm node called `swarm-manager`, you would do +To set your SHELL connect to a swarm node called `swarm-manager`, you would do this: ``` diff --git a/swarm/reference/create.md b/swarm/reference/create.md index 5c279445a32..7eeac66b6d8 100644 --- a/swarm/reference/create.md +++ b/swarm/reference/create.md @@ -11,7 +11,7 @@ The `create` command uses Docker Hub's hosted discovery backend to create a uniq $ docker run --rm swarm create 86222732d62b6868d441d430aee4f055 -Later, when you use [`manage`](manage.md) or [`join`](join.md) to create Swarm managers and nodes, you use the discovery token in the `` argument (e.g., `token://86222732d62b6868d441d430aee4f055`). The discovery backend registers each new Swarm manager and node that uses the token as a member of your cluster. +Later, when you use [`manage`](manage.md) or [`join`](join.md) to create Swarm managers and nodes, you use the discovery token in the `` argument. For instance, `token://86222732d62b6868d441d430aee4f055`. The discovery backend registers each new Swarm manager and node that uses the token as a member of your cluster. Some documentation also refers to the discovery token as a *cluster_id*. diff --git a/swarm/reference/manage.md b/swarm/reference/manage.md index ed53179a859..5f73f86f6a5 100644 --- a/swarm/reference/manage.md +++ b/swarm/reference/manage.md @@ -82,7 +82,7 @@ Use `--filter ` or `-f ` to tell the Docker Swarm scheduler which Where `` is: * `health` — Use nodes that are running and communicating with the discovery backend. - * `port` — For containers that have a static port mapping, use nodes whose corresponding port number is available (i.e., not occupied by another container or process). + * `port` — For containers that have a static port mapping, use nodes whose corresponding port number is available and not occupied by another container or process. * `dependency` — For containers that have a declared dependency, use nodes that already have a container with the same dependency. * `affinity` — For containers that have a declared affinity, use nodes that already have a container with the same affinity. * `constraint` — For containers that have a declared constraint, use nodes that already have a container with the same constraint. diff --git a/swarm/scheduler/filter.md b/swarm/scheduler/filter.md index ecdf7f5e7cb..8d9dda885c9 100644 --- a/swarm/scheduler/filter.md +++ b/swarm/scheduler/filter.md @@ -45,7 +45,7 @@ $ swarm manage --filter=health --filter=dependency When creating a container or building an image, you use a `constraint` or `health` filter to select a subset of nodes to consider for scheduling. If a node in Swarm cluster has a label with key `containerslots` -and a number-value, Swarm will not launch more containers than the given number. +and a number-value, Swarm does not launch more containers than the given number. ### Use a constraint filter @@ -132,7 +132,7 @@ f8b693db9cd6 mysql:latest "mysqld" Up About a minute The scheduler selected `node-2` since it was started with the `storage=disk` label. Finally, build args can be used to apply node constraints to a `docker build`. -Again, you'll avoid flash drives. +This example shows how to avoid flash drives. ```bash $ mkdir sinatra @@ -175,10 +175,10 @@ You may give your Docker nodes the containerslots label $ docker daemon --label containerslots=3 ``` -Swarm will run up to 3 containers at this node, if all nodes are "full", +Swarm runs up to 3 containers at this node, if all nodes are "full", an error is thrown indicating no suitable node can be found. -If the value is not castable to an integer number or is not present, -there will be no limit on container number. +If the value cannot be cast to an integer number or is not present, +there is no limit on container number. ## Container filters @@ -329,7 +329,7 @@ Currently, dependencies are declared as follows: Swarm attempts to co-locate the dependent container on the same node. If it cannot be done (because the dependent container doesn't exist, or because the -node doesn't have enough resources), it will prevent the container creation. +node doesn't have enough resources), it prevents the container creation. The combination of multiple dependencies are honored if possible. For instance, if you specify `--volumes-from=A --net=container:B`, the scheduler @@ -373,7 +373,7 @@ CONTAINER ID IMAGE COMMAND PORTS 87c4376856a8 nginx:latest "nginx" 192.168.0.42:80->80/tcp node-1/prickly_engelbart ``` -Again, repeating the same command will result in the selection of `node-3`, +Again, repeating the same command results in the selection of `node-3`, since port `80` is neither available on `node-1` nor `node-2`: ```bash @@ -387,7 +387,7 @@ f8b693db9cd6 nginx:latest "nginx" 192.168.0.44:80->80/tcp 87c4376856a8 nginx:latest "nginx" 192.168.0.42:80->80/tcp node-1/prickly_engelbart ``` -Finally, Docker Swarm will refuse to run another container that requires port +Finally, Docker Swarm refuses to run another container that requires port `80`, because it is not available on any node in the cluster: ```bash diff --git a/swarm/scheduler/strategy.md b/swarm/scheduler/strategy.md index 375856fb8ff..0f3cc8ee65c 100644 --- a/swarm/scheduler/strategy.md +++ b/swarm/scheduler/strategy.md @@ -28,7 +28,7 @@ your company's needs. Under the `spread` strategy, Swarm optimizes for the node with the least number of containers. The `binpack` strategy causes Swarm to optimize for the -node which is most packed. Note that a container occupies resource during its life +node which is most packed. A container occupies resource during its life cycle, including `exited` state. Users should be aware of this condition to schedule containers. For example, `spread` strategy only checks number of containers disregarding their states. A node with no active containers but high number of diff --git a/swarm/secure-swarm-tls.md b/swarm/secure-swarm-tls.md index c447a125b68..0eeef94ff27 100644 --- a/swarm/secure-swarm-tls.md +++ b/swarm/secure-swarm-tls.md @@ -63,7 +63,7 @@ of the technologies, policies and procedures provided by PKI include: ## How does Docker Engine authenticate using TLS -In this section, you'll learn how Docker Engine and Swarm use PKI and +This section shows how Docker Engine and Swarm use PKI and certificates to increase security. @@ -72,7 +72,7 @@ You can configure both the Docker Engine CLI and the Docker Engine daemon to req TLS for authentication. Configuring TLS means that all communications between the Docker Engine CLI and the Docker Engine daemon must be accompanied with, and signed by a trusted digital certificate. The Docker Engine CLI must provide its digital certificate -before the Docker Engine daemon will accept incoming commands from it. +before the Docker Engine daemon accepts incoming commands from it. The Docker Engine daemon must also trust the certificate that the Docker Engine CLI uses. This trust is usually established by way of a trusted third party. The Docker Engine @@ -116,8 +116,8 @@ These configurations are differentiated by the type of entity acting as the Cert An external CA is a trusted 3rd party company that provides a means of creating, issuing, revoking, and otherwise managing certificates. They are *trusted* in -the sense that they have to fulfill specific conditions and maintain high levels -of security and business practices to win your business. You also have to +the sense that they need to fulfill specific conditions and maintain high levels +of security and business practices to win your business. You also need to install the external CA's root certificates for you computers and services to *trust* them. diff --git a/swarm/swarm-api.md b/swarm/swarm-api.md index 9ce3f4a0342..92935b81213 100644 --- a/swarm/swarm-api.md +++ b/swarm/swarm-api.md @@ -16,7 +16,7 @@ Engine API. ## Missing endpoints -Some endpoints have not yet been implemented and will return a 404 error. +Some endpoints have not yet been implemented and returns a 404 error. ``` POST "/images/create" : "docker import" flow not implement @@ -97,7 +97,7 @@ POST "/images/create" : "docker import" flow not implement ## Registry Authentication -During container create calls, the Swarm API will optionally accept an `X-Registry-Auth` header. +During container create calls, the Swarm API optionally accepts an `X-Registry-Auth` header. If provided, this header is passed down to the engine if the image must be pulled to complete the create operation. @@ -139,7 +139,7 @@ $ docker run --rm -it yourprivateimage:latest ``` -Be aware that tokens are short-lived and will expire quickly. +Be aware that tokens are short-lived and expire quickly. ### Authenticate using username and password diff --git a/swarm/swarm_at_scale/about.md b/swarm/swarm_at_scale/about.md index a18759d11bb..5377a2a6ee8 100644 --- a/swarm/swarm_at_scale/about.md +++ b/swarm/swarm_at_scale/about.md @@ -91,7 +91,7 @@ As the previous diagram shows, each node in the cluster runs the following conta - Container: Swarm agent -After deploying the application, you'll configure your local system so that you +After deploying the application, configure your local system so that you can test the application from your local browser. In production, of course, this step wouldn't be needed. diff --git a/swarm/swarm_at_scale/deploy-app.md b/swarm/swarm_at_scale/deploy-app.md index 188dc9e6675..f2dfb1b6ac5 100644 --- a/swarm/swarm_at_scale/deploy-app.md +++ b/swarm/swarm_at_scale/deploy-app.md @@ -1,7 +1,7 @@ --- advisory: swarm-standalone hide-from-sitemap: true -description: Try Swarm at scale +description: Try swarm at scale keywords: docker, swarm, scale, voting, application, certificates redirect_from: - /swarm/swarm_at_scale/04-deploy-app/ @@ -9,7 +9,7 @@ title: Deploy the application --- You've -[deployed the load balancer, the discovery backend, and a Swarm cluster](deploy-infra.md) +[deployed the load balancer, the discovery backend, and a swarm cluster](deploy-infra.md) so now you can build and deploy the voting application itself. You do this by starting a number of "Dockerized applications" running in containers. @@ -18,7 +18,7 @@ container network, `voteapp`. ![Voteapp deployment overview](/swarm/images/final-result.png) -In this procedure you will connect containers to this network. The `voteapp` +In this procedure you connect containers to this network. The `voteapp` network is available to all Docker hosts using the Consul discovery backend. Notice that the `interlock`, `nginx`, `consul`, and `swarm manager` containers on are not part of the `voteapp` overlay container network. @@ -26,10 +26,10 @@ on are not part of the `voteapp` overlay container network. ## Task 1. Set up volume and network This application relies on both an overlay container network and a container -volume. The Docker Engine provides these two features. You'll create them both -on the Swarm `manager` instance. +volume. The Docker Engine provides these two features. Create them both +on the swarm manager instance. -1. Direct your local environment to the Swarm manager host. +1. Direct your local environment to the swarm manager host. ```bash $ eval $(docker-machine env manager) @@ -84,7 +84,7 @@ images and which do not: You can launch these containers from any host in the cluster using the commands in this section. Each command includes a `-H `flag so that they execute against -the Swarm manager. +the swarm manager. The commands also all use the `-e` flag which is a Swarm constraint. The constraint tells the manager to look for a node with a matching function label. @@ -163,7 +163,7 @@ command below, look for the value constraint. ## Task 3. Check your work and update /etc/hosts In this step, you check your work to make sure the Nginx configuration recorded -the containers correctly. You'll update your local systems `/etc/hosts` file to +the containers correctly. Update your local system's `/etc/hosts` file to allow you to take advantage of the loadbalancer. 1. Change to the `loadbalancer` node. @@ -245,7 +245,7 @@ Now, you can test your application. 3. Navigate to the `http://results.myenterprise.example.com` site to see the results. 4. Try changing your vote. - You'll see both sides change as you switch your vote. + Both sides change as you switch your vote. ![Voting and results page](/swarm/images/votes.gif) @@ -266,7 +266,7 @@ the containers at once. This extra credit $ DOCKER_HOST=$(docker-machine ip manager):3376 - b. List all the application containers on the Swarm. + b. List all the application containers on the swarm. c. Stop and remove each container. @@ -306,7 +306,7 @@ the containers at once. This extra credit 4. When you are satisfied, save the `docker-compose.yml` file to your system. -5. Set `DOCKER_HOST` to the Swarm manager. +5. Set `DOCKER_HOST` to the swarm manager. ```bash $ DOCKER_HOST=$(docker-machine ip manager):3376 @@ -350,7 +350,7 @@ the containers at once. This extra credit Creating scale_result-app_1 ``` -7. Use the `docker ps` command to see the containers on the Swarm cluster. +7. Use the `docker ps` command to see the containers on the swarm cluster. ```bash $ docker -H $(docker-machine ip manager):3376 ps @@ -416,7 +416,7 @@ the containers at once. This extra credit ## Next steps Congratulations. You have successfully walked through manually deploying a -microservice-based application to a Swarm cluster. Of course, not every +microservice-based application to a swarm cluster. Of course, not every deployment goes smoothly. Now that you've learned how to successfully deploy an application at scale, you should learn [what to consider when troubleshooting -large applications running on a Swarm cluster](troubleshoot.md). +large applications running on a swarm cluster](troubleshoot.md). diff --git a/swarm/swarm_at_scale/deploy-infra.md b/swarm/swarm_at_scale/deploy-infra.md index 496d568aabc..1b745cfcb89 100644 --- a/swarm/swarm_at_scale/deploy-infra.md +++ b/swarm/swarm_at_scale/deploy-infra.md @@ -17,7 +17,7 @@ architecture](about.md). This example assumes you are running on a Mac or Windows system and enabling Docker Engine `docker` commands by provisioning local VirtualBox virtual -machines thru Docker Machine. For this evaluation installation, you'll need 6 (six) +machines using Docker Machine. For this evaluation installation, you need 6 (six) VirtualBox VMs. While this example uses Docker Machine, this is only one example of an @@ -59,8 +59,8 @@ href="https://www.consul.io/" target="blank">Consul container. --engine-opt="label=com.function=consul" keystore ``` - You can set options for the Engine daemon with the `--engine-opt` flag. You'll - use it to label this Engine instance. + You can set options for the Engine daemon with the `--engine-opt` flag. In + this command, you use it to label this Engine instance. 2. Set your local shell to the `keystore` Docker host. @@ -68,8 +68,7 @@ href="https://www.consul.io/" target="blank">Consul container. $ eval $(docker-machine env keystore) ``` -3. Run the -`consul` container. +3. Run [the consul container](https://hub.docker.com/r/progrium/consul/){:target="_blank" class="_"}. ```bash $ docker run --restart=unless-stopped -d -p 8500:8500 -h consul progrium/consul -server -bootstrap @@ -102,9 +101,9 @@ scheduling resources against the cluster. In a real-world production deployment, you should configure additional replica Swarm managers as secondaries for high availability (HA). -You'll use the `--eng-opt` flag to set the `cluster-store` and +Use the `--eng-opt` flag to set the `cluster-store` and `cluster-advertise` options to refer to the `keystore` server. These options -support the container network you'll create later. +support the container network you create later. 1. Create the `manager` host. @@ -167,10 +166,9 @@ support the container network you'll create later. ## Task 3. Add the load balancer -The application uses Interlock and Nginx as a -loadbalancer. Before you build the load balancer host, you'll create the -configuration you'll use for Nginx. +The application uses [Interlock](https://github.com/ehazlett/interlock) and Nginx as a +loadbalancer. Before you build the load balancer host, create the +configuration for Nginx. 1. On your local host, create a `config` directory. @@ -266,7 +264,7 @@ in Step 4. and get a Conflict error such as: ```bash - docker: Error response from daemon: Conflict. The name "/interlock" is already in use by container d846b801a978c76979d46a839bb05c26d2ab949ff9f4f740b06b5e2564bae958. You have to remove (or rename) that container to be able to reuse that name. + docker: Error response from daemon: Conflict. The name "/interlock" is already in use by container d846b801a978c76979d46a839bb05c26d2ab949ff9f4f740b06b5e2564bae958. You have to remove (or rename) that container to reuse that name. ``` Remove the interlock container with the `docker rm interlock` and try again. @@ -304,7 +302,7 @@ for example: --engine-opt="label=com.function=frontend01" ``` -You'll use these labels later when starting application containers. In the +These labels are used later when starting application containers. In the commands below, notice the label you are applying to each node. diff --git a/swarm/swarm_at_scale/index.md b/swarm/swarm_at_scale/index.md index 3a2f482a304..1275aa04b42 100644 --- a/swarm/swarm_at_scale/index.md +++ b/swarm/swarm_at_scale/index.md @@ -11,7 +11,7 @@ example illustrates a typical development process. After you establish an infrastructure, you create a Swarm cluster and deploy the application against the cluster. -After building and manually deploying the voting application, you'll construct a +After building and manually deploying the voting application, you can construct a Docker Compose file. You (or others) can use the file to deploy and scale the application further. The article also provides a troubleshooting section you can use while developing or deploying the voting application. diff --git a/swarm/swarm_at_scale/troubleshoot.md b/swarm/swarm_at_scale/troubleshoot.md index 46c859b2d48..a4c52692e4c 100644 --- a/swarm/swarm_at_scale/troubleshoot.md +++ b/swarm/swarm_at_scale/troubleshoot.md @@ -24,38 +24,38 @@ following sections cover different failure scenarios: ## Swarm manager failures -In it's current configuration, the Swarm cluster only has single manager +In its current configuration, the swarm cluster only has single manager container running on a single node. If the container exits or the node fails, -you will not be able to administer the cluster until you either fix it, or +you cannot administer the cluster until you either fix it, or replace it. -If the failure is the Swarm manager container unexpectedly exiting, Docker will -automatically attempt to restart it. This is because the container was started +If the failure is the swarm manager container unexpectedly exiting, Docker +automatically attempts to restart it. This is because the container was started with the `--restart=unless-stopped` switch. -While the Swarm manager is unavailable, the application will continue to work in -its current configuration. However, you will not be able to provision more nodes -or containers until you have a working Swarm manager. +While the swarm manager is unavailable, the application continues to work in +its current configuration. However, you cannot provision more nodes +or containers until you have a working swarm manager. -Docker Swarm supports high availability for Swarm managers. This allows a single -Swarm cluster to have two or more managers. One manager is elected as the +Docker Swarm supports high availability for swarm managers. This allows a single +swarm cluster to have two or more managers. One manager is elected as the primary manager and all others operate as secondaries. In the event that the primary manager fails, one of the secondaries is elected as the new primary, and -cluster operations continue gracefully. If you are deploying multiple Swarm +cluster operations continue gracefully. If you are deploying multiple swarm managers for high availability, you should consider spreading them across multiple failure domains within your infrastructure. ## Consul (discovery backend) failures -The Swarm cluster that you have deployed has a single Consul container on a +The swarm cluster that you have deployed has a single Consul container on a single node performing the cluster discovery service. In this setup, if the -Consul container exits or the node fails, the application will continue to +Consul container exits or the node fails, the application continues to operate in its current configuration. However, certain cluster management -operations will fail. These include registering new containers in the cluster +operations fail. These include registering new containers in the cluster and making lookups against the cluster configuration. -If the failure is the `consul` container unexpectedly exiting, Docker will -automatically attempt to restart it. This is because the container was started +If the failure is the `consul` container unexpectedly exiting, Docker +automatically attempts to restart it. This is because the container was started with the `--restart=unless-stopped` switch. The `Consul`, `etcd`, and `Zookeeper` discovery service backends support various @@ -65,9 +65,9 @@ discover service backend. If you are deploying multiple discovery service instances for high availability, you should consider spreading them across multiple failure domains within your infrastructure. -If you operate your Swarm cluster with a single discovery backend service and +If you operate your swarm cluster with a single discovery backend service and this service fails and is unrecoverable, you can start a new empty instance of -the discovery backend and the Swarm agents on each node in the cluster will +the discovery backend and the swarm agents on each node in the cluster repopulate it. @@ -77,9 +77,9 @@ There are many reasons why containers can fail. However, Swarm does not attempt to restart failed containers. One way to automatically restart failed containers is to explicitly start them -with the `--restart=unless-stopped` flag. This will tell the local Docker daemon -to attempt to restart the container if it unexpectedly exits. This will only -work in situations where the node hosting the container and it's Docker daemon +with the `--restart=unless-stopped` flag. This tells the local Docker daemon +to attempt to restart the container if it unexpectedly exits. This only +works in situations where the node hosting the container and it's Docker daemon are still up. This cannot restart a container if the node hosting it has failed, or if the Docker daemon itself has failed. @@ -87,11 +87,11 @@ Another way is to have an external tool (external to the cluster) monitor the state of your application, and make sure that certain service levels are maintained. These service levels can include things like "have at least 10 web server containers running". In this scenario, if the number of web containers -drops below 10, the tool will attempt to start more. +drops below 10, the tool attempts to start more. In our simple voting-app example, the front-end is scalable and serviced by a load balancer. In the event that one of the two web containers fails (or the -node that is hosting it fails), the load balancer will stop routing requests to it and +node that is hosting it fails), the load balancer stops routing requests to it and send all requests to the surviving web container. This solution is highly scalable meaning you can have up to *n* web containers behind the load balancer. @@ -100,18 +100,18 @@ meaning you can have up to *n* web containers behind the load balancer. The environment that you have provisioned has a single [interlock](https://github.com/ehazlett/interlock) load balancer container running on a single node. In this setup, if the container exits or node fails, -the application will no longer be able to service incoming requests and the -application will be unavailable. +the application cannot service incoming requests and the application is +unavailable. -If the failure is the `interlock` container unexpectedly exiting, Docker will -automatically attempt to restart it. This is because the container was started +If the failure is the `interlock` container unexpectedly exiting, Docker +automatically attempts to restart it. This is because the container was started with the `--restart=unless-stopped` switch. It is possible to build an HA Interlock load balancer configuration. One such way is to have multiple Interlock containers on multiple nodes. You can then use DNS round robin, or other technologies, to load balance across each Interlock container. That way, if one Interlock container or node goes down, the others -will continue to service requests. +continue to service requests. If you deploy multiple interlock load balancers, you should consider spreading them across multiple failure domains within your infrastructure. @@ -123,9 +123,9 @@ on two separate nodes. They operate behind an Interlock load balancer that distributes incoming connections across both. In the event that one of the web containers or nodes fails, the load balancer -will start directing all incoming requests to surviving instance. Once the -failed instance is back up, or a replacement is added, the load balancer will -add it to the configuration and start sending a portion of the incoming requests +starts directing all incoming requests to surviving instance. Once the +failed instance is back up, or a replacement is added, the load balancer +adds it to the configuration and starts sending a portion of the incoming requests to it. For highest availability you should deploy the two frontend web services @@ -134,7 +134,7 @@ infrastructure. You should also consider deploying more. ## Redis failures -If the `redis` container fails, its partnered `voting-app` container will +If the `redis` container fails, its partnered `voting-app` container does not function correctly. The best solution in this instance might be to configure health monitoring that verifies the ability to write to each Redis instance. If an unhealthy `redis` instance is encountered, remove the `voting-app` and @@ -143,12 +143,12 @@ an unhealthy `redis` instance is encountered, remove the `voting-app` and ## Worker (vote-worker) failures If the worker container exits, or the node that is hosting it fails, the redis -containers will queue votes until the worker container comes back up. This +containers queue votes until the worker container comes back up. This situation can prevail indefinitely, though a worker needs to come back at some point and process the votes. -If the failure is the `worker01` container unexpectedly exiting, Docker will -automatically attempt to restart it. This is because the container was started +If the failure is the `worker01` container unexpectedly exiting, Docker +automatically attempts to restart it. This is because the container was started with the `--restart=unless-stopped` switch. ## Postgres failures @@ -160,13 +160,12 @@ form of Postgres HA or replication. ## Results-app failures -If the results-app container exits, you will not be able to browse to the -results of the poll until the container is back up and running. Results will -continue to be collected and counted, you will just not be able to view results -until the container is back up and running. +If the results-app container exits, you cannot browse to the results of the poll +until the container is back up and running. Results continue to be collected and +counted, but you can't view results until the container is back up and running. The results-app container was started with the `--restart=unless-stopped` flag -meaning that the Docker daemon will automatically attempt to restart it unless +meaning that the Docker daemon automatically attempts to restart it unless it was administratively stopped. ## Infrastructure failures @@ -180,9 +179,9 @@ as possible. On a service such as AWS, this often translates into balancing infrastructure and services across multiple AWS Availability Zones (AZ) within a Region. -To increase the availability of our Swarm cluster you could: +To increase the availability of our swarm cluster you could: -* Configure the Swarm manager for HA and deploy HA nodes in different AZs +* Configure the swarm manager for HA and deploy HA nodes in different AZs * Configure the Consul discovery service for HA and deploy HA nodes in different AZs * Deploy all scalable components of the application across multiple AZs @@ -190,7 +189,7 @@ This configuration is shown in the diagram below. ![Swarm cluster configured for HA](../images/infrastructure-failures.jpg) -This will allow us to lose an entire AZ and still have our cluster and +This allows us to lose an entire AZ and still have our cluster and application operate. But it doesn't have to stop there. Some applications can be balanced across AWS diff --git a/test.md b/test.md index 22d0eb0bc36..db886af7d41 100644 --- a/test.md +++ b/test.md @@ -37,7 +37,7 @@ topics). By default, this is the highest heading included in the right navigation bar. To include more heading levels, set `toc_min: 1` in the page's front-matter (as is done on this `test.md` page). You can go all the way to 6, but if `toc_min` is -geater than `toc_max` then no headings will show. +geater than `toc_max` then no headings are shown. ### Heading 3 @@ -119,7 +119,7 @@ https://github.com/docker/docker.github.io/tree/master/docker-cloud/images - Go to the file in a web browser, grab everything after the domain name from the URL, and use that as the link in your docs file. - - Keep in mind that this link won't resolve until you merge the PR and + - Keep in mind that this link doesn't resolve until you merge the PR and your docs are published on [docs.docker.com](https://docs.docker.com/). {: id="custom-target-id"} @@ -298,13 +298,13 @@ page or displaying content as "cards". This paragraph is centered and colored green by setting CSS directly on the element. **Even though you can do this and it's sometimes the right way to go, remember that if -we re-skin the site, any inline styles will need to be dealt with manually!** +we re-skin the site, any inline styles need to be dealt with manually!** {: style="text-align:center; color: green" } {% assign my-text="foo" %} The Liquid assignment just before this line fills in the following token {{ my-text }}. -This will be effective for the rest of this file unless the token is reset. +This is effective for the rest of this file unless the token is reset. {% capture my-other-text %}foo{% endcapture %} Here is another way: {{ my-other-text }} @@ -348,7 +348,7 @@ The `
    `'s are included simply to add visual separation between tabbed content and the other topics on the page. If you have Markdown inside the content of the `
    `, just add `markdown="1"` -as an attribute in the HTML for the `
    ` and Kramdown will render it. +as an attribute in the HTML for the `
    ` so that Kramdown renders it.