diff --git a/.NOT_EDITED_HERE.txt b/.NOT_EDITED_HERE.txt new file mode 100644 index 00000000000..12164aede57 --- /dev/null +++ b/.NOT_EDITED_HERE.txt @@ -0,0 +1,31 @@ +# Files and directories here are not edited in the docker.github.io repo. +# Instead, they are edited in the appropriate upstream repo and pulled +# into this repo periodically. The intent is that if you submit a PR +# with changes to these files or directories, a CI job will fail in the +# PR, indicating that it should not be merged. + +# If you need to edit these files or directories, submit a PR in one of the +# following repos. The file will probably be located within the docs/ subdirectory. + +# docker-trusted-registry: n/a, file an issue +# engine: https://github.com/docker/docker +# compose: https://github.com/docker/compose +# notary: https://github.com/docker/notary +# registry: https://github.com/docker/distribution +# swarm: https://github.com/docker/swarm +# ucp: n/a, file an issue + +# Make sure directories have the trailing slash, keep the list alphabetical + +apidocs/ +compose/reference/ +docker-trusted-registry/reference/ +engine/deprecated.md +engine/extend/ +engine/reference/ +machine/reference/ +notary/reference/ +registry/configuration.md +registry/spec/ +swarm/reference/ +ucp/reference/ diff --git a/.arcconfig b/.arcconfig deleted file mode 100644 index 238fabe3ff3..00000000000 --- a/.arcconfig +++ /dev/null @@ -1,3 +0,0 @@ -{ - "phabricator.uri" : "https://docker-saas.phacility.com/" -} diff --git a/.babelrc b/.babelrc deleted file mode 100755 index ec32f9bff68..00000000000 --- a/.babelrc +++ /dev/null @@ -1,6 +0,0 @@ -{ - "presets": ["es2015", "react", "stage-0"], - "plugins": [ - ["transform-decorators-legacy"], - ] -} diff --git a/.bumpversion.cfg b/.bumpversion.cfg deleted file mode 100644 index 712b62a821a..00000000000 --- a/.bumpversion.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[bumpversion] -current_version = 1.1.0 -commit = True -tag = True - diff --git a/.dockercfg.template b/.dockercfg.template deleted file mode 100644 index 2ca7abf853c..00000000000 --- a/.dockercfg.template +++ /dev/null @@ -1 +0,0 @@ -{"https://index.docker.io/v1/":{"auth":"","email":""}} \ No newline at end of file diff --git a/.drone.yml b/.drone.yml deleted file mode 100644 index 79858ce04c1..00000000000 --- a/.drone.yml +++ /dev/null @@ -1,21 +0,0 @@ -image: go1.3 -env: - - RETHINKDB_TEST_PORT_28015_TCP_ADDR=127.0.0.1 - - RETHINKDB_TEST_PORT_28015_TCP_PORT=28015 - - RETHINKDB_TEST_DATABASE=shipyard_test - - DOCKER_TEST_ADDR=http://10.0.1.32:2375 -script: - - echo "Using env vars:" - - echo $RETHINKDB_TEST_PORT_28015_TCP_ADDR - - echo $RETHINKDB_TEST_PORT_28015_TCP_PORT - - echo $RETHINKDB_TEST_DATABASE - - echo $DOCKER_TEST_ADDR - - go get github.com/tools/godep - - pushd controller - - make test-drone - - popd - - pushd cli - - make test-drone - - popd -services: - - shipyard/rethinkdb diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index 561df815840..00000000000 --- a/.editorconfig +++ /dev/null @@ -1,29 +0,0 @@ -# EditorConfig helps developers define and maintain consistent -# coding styles between different editors and IDEs -# editorconfig.org - -root = true - - -[*] - -# Change these settings to your own preference -indent_style = space -indent_size = 4 - -# We recommend you to keep these unchanged -end_of_line = lf -charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true - -[*.md] -trim_trailing_whitespace = false - -[package.json] -indent_style = space -indent_size = 2 - -[bower.json] -indent_style = space -indent_size = 2 diff --git a/.flowconfig b/.flowconfig deleted file mode 100644 index d8c97510c36..00000000000 --- a/.flowconfig +++ /dev/null @@ -1,14 +0,0 @@ -[ignore] -.*/app/scripts/build.* -.*/.tmp/.* -.*/node_modules/webpack.* -.*/node_modules/gulp-sass/.* - -[include] -app/scripts -node_modules/hub-js-sdk - -[libs] -./flow-libs - -[options] diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 00000000000..ff1073d9e95 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,88 @@ +## Contributing + +We value your documentation contributions, and we want to make it as easy +as possible to work in this repository. One of the first things to decide is +which branch to base your work on. If you get confused, just ask and we will +help. If a reviewer realizes you have based your work on the wrong branch, we'll +let you know so that you can rebase it. + +>**Note**: To contribute code to Docker projects, see the +[Contribution guidelines](opensource/project/who-written-for). + +### Quickstart + +If you spot a problem while reading the documentation and want to try to fix it +yourself, click the **Edit this page** link at the bottom of that page. The +page will open in the Github editor, which means you don't need to know a lot +about Git, or even about Markdown. + +When you save, you will be prompted to create a fork if you don't already have +one, and to create a branch in your fork and submit the pull request. We hope +you give it a try! + +### Overall doc improvements + +Most commits will be made against the `master` branch. This include: + +- Conceptual and task-based information not specific to new features +- Restructuring / rewriting +- Doc bug fixing +- Typos and grammar errors + +One quirk of this project is that the `master` branch is where the live docs are +published from, so upcoming features can't be documented there. See +[Specific new features for a project](#specific-new-features-for-a-project) +for how to document upcoming features. These feature branches will be periodically +merged with `master`, so don't worry about fixing typos and documentation bugs +there. + +>Do you enjoy creating graphics? Good graphics are key to great documentation, +and we especially value contributions in this area. + +### Specific new features for a project + +Our docs cover many projects which release at different times. **If, and only if, +your pull request relates to a currently unreleased feature of a project, base +your work on that project's `vnext` branch.** These branches were created by +cloning `master` and then importing a project's `master` branch's docs into it +(at the time of the migration), in a way that preserved the commit history. When +a project has a release, its `vnext` branch will be merged into `master` and your +work will be visible on docs.docker.com. + +The following `vnext` branches currently exist: + +- **[vnext-engine](https://github.com/docker/docker.github.io/tree/vnext-engine):** + docs for upcoming features in the [docker/docker](https://github.com/docker/docker/) + project + +- **[vnext-compose](https://github.com/docker/docker.github.io/tree/vnext-compose):** + docs for upcoming features in the [docker/compose](https://github.com/docker/compose/) + project + +- **[vnext-distribution](https://github.com/docker/docker.github.io/tree/vnext-distribution):** + docs for upcoming features in the [docker/distribution](https://github.com/docker/distribution/) + project + +- **[vnext-opensource](https://github.com/docker/docker.github.io/tree/vnext-opensource):** + docs for upcoming features in the [docker/opensource](https://github.com/docker/opensource/) + project + +- **[vnext-swarm](https://github.com/docker/docker.github.io/tree/vnext-swarm):** + docs for upcoming features in the [docker/swarm](https://github.com/docker/swarm/) + project + +- **[vnext-toolbox](https://github.com/docker/docker.github.io/tree/vnext-toolbox):** + docs for upcoming features in the [docker/toolbox](https://github.com/docker/toolbox/) + project + +- **[vnext-kitematic](https://github.com/docker/docker.github.io/tree/vnext-kitematic):** + docs for upcoming features in the [docker/kitematic](https://github.com/docker/kitematic/) + project + +## Style guide + +If you have questions about how to write for Docker's documentation, please see +the [style guide](https://docs.docker.com/opensource/doc-style/). The style guide provides +guidance about grammar, syntax, formatting, styling, language, or tone. If +something isn't clear in the guide, please submit an issue to let us know or +submit a pull request to help us improve it. diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000000..e643696f57a --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,26 @@ +### Problem description + + + +### Problem location + + + +- I saw a problem on the following URL: + +- I couldn't find the information I wanted. I expected to find it near the following URL: + +- Other:
+ +### Project version(s) affected + + + +### Suggestions for a fix + + + + + diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000000..403812517b0 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,33 @@ + + + + +### Describe the proposed changes + + + +### Project version + + + +### Related issue + + + +### Related issue or PR in another project + + + +### Please take a look + + + + + diff --git a/.htmllintrc b/.htmllintrc deleted file mode 100644 index b00de14f394..00000000000 --- a/.htmllintrc +++ /dev/null @@ -1,8 +0,0 @@ -{ - // names of npm modules to load into htmllint - "plugins": [], - "indent-style": "spaces", - "indent-width": 2, - "attr-name-style": "dash", - "html-req-lang": true -} diff --git a/.lfsconfig b/.lfsconfig deleted file mode 100644 index b246691ccf8..00000000000 --- a/.lfsconfig +++ /dev/null @@ -1,3 +0,0 @@ -[lfs] - url = git@github.com:docker/pinata - concurrenttransfers = 8 diff --git a/.mailmap b/.mailmap deleted file mode 100644 index 0527b6d84d6..00000000000 --- a/.mailmap +++ /dev/null @@ -1,254 +0,0 @@ -# Generate AUTHORS: hack/generate-authors.sh - -# Tip for finding duplicates (besides scanning the output of AUTHORS for name -# duplicates that aren't also email duplicates): scan the output of: -# git log --format='%aE - %aN' | sort -uf -# -# For explanation on this file format: man git-shortlog - -Patrick Stapleton -Shishir Mahajan -Erwin van der Koogh -Ahmed Kamal -Tejesh Mehta -Cristian Staretu -Cristian Staretu -Cristian Staretu -Marcus Linke -Aleksandrs Fadins -Christopher Latham -Hu Keping -Wayne Chang -Chen Chao -Daehyeok Mun - - - - - - -Guillaume J. Charmes - - - - - -Thatcher Peskens -Thatcher Peskens -Thatcher Peskens dhrp -Jérôme Petazzoni jpetazzo -Jérôme Petazzoni -Joffrey F -Joffrey F -Joffrey F -Tim Terhorst -Andy Smith - - - - - - - - - -Walter Stanish - -Roberto Hashioka -Konstantin Pelykh -David Sissitka -Nolan Darilek - -Benoit Chesneau -Jordan Arentsen -Daniel Garcia -Miguel Angel Fernández -Bhiraj Butala -Faiz Khan -Victor Lyuboslavsky -Jean-Baptiste Barth -Matthew Mueller - -Shih-Yuan Lee -Daniel Mizyrycki root -Jean-Baptiste Dalido - - - - - - - - - - - - - - -Sven Dowideit -Sven Dowideit -Sven Dowideit -Sven Dowideit <¨SvenDowideit@home.org.au¨> -Sven Dowideit -Sven Dowideit -Sven Dowideit - -Alexander Morozov -Alexander Morozov - -O.S. Tezer - -Roberto G. Hashioka - - - - - -Sridhar Ratnakumar -Sridhar Ratnakumar -Liang-Chi Hsieh -Aleksa Sarai -Aleksa Sarai -Aleksa Sarai -Will Weaver -Timothy Hobbs -Nathan LeClaire -Nathan LeClaire - - - - -Matthew Heon - - - - -Francisco Carriedo - - - - -Brian Goff - - - -Hollie Teal - - - -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle -Jessica Frazelle - - - - -Thomas LEVEIL Thomas LÉVEIL - - -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Antonio Murdaca -Darren Shepherd -Deshi Xiao -Deshi Xiao -Doug Davis -Jacob Atzen -Jeff Nickoloff -John Howard (VM) -John Howard (VM) -John Howard (VM) -John Howard (VM) -Madhu Venugopal -Mary Anthony -Mary Anthony moxiegirl -Mary Anthony -mattyw -resouer -AJ Bowen soulshake -AJ Bowen soulshake -Tibor Vass -Tibor Vass -Vincent Bernat -Yestin Sun -bin liu -John Howard (VM) jhowardmsft -Ankush Agarwal -Tangi COLIN tangicolin -Allen Sun -Adrien Gallouët - -Anuj Bahuguna -Anusha Ragunathan -Avi Miller -Brent Salisbury -Chander G -Chun Chen -Ying Li -Daehyeok Mun - -Daniel, Dao Quang Minh -Daniel Nephin -Dave Tucker -Doug Tangren -Frederick F. Kautz IV -Ben Golub -Harold Cooper -hsinko <21551195@zju.edu.cn> -Josh Hawn -Justin Cormack - - -Kamil Domański -Lei Jitang - -Linus Heckemann - -Lynda O'Leary - -Marianna Tessel -Michael Huettermann -Moysés Borges - -Nigel Poulton -Qiang Huang - -Boaz Shuster -Shuwei Hao - -Soshi Katsuta - -Stefan Berger - -Stephen Day - -Toli Kuznets -Tristan Carel - -Vincent Demeester - -Vishnu Kannan -xlgao-zju xlgao -yuchangchun y00277921 - - - - -Hao Shu Wei - - - - - - - -Shengbo Song mYmNeo -Shengbo Song - -Sylvain Bellemare - diff --git a/.npmrc b/.npmrc deleted file mode 100644 index dc4c80cdd00..00000000000 --- a/.npmrc +++ /dev/null @@ -1,2 +0,0 @@ -save = true -save-exact = true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml deleted file mode 100644 index 0e7b9d5f3bb..00000000000 --- a/.pre-commit-config.yaml +++ /dev/null @@ -1,25 +0,0 @@ -- repo: git://github.com/pre-commit/pre-commit-hooks - sha: 'v0.4.2' - hooks: - - id: check-added-large-files - - id: check-docstring-first - - id: check-merge-conflict - - id: check-yaml - - id: check-json - - id: debug-statements - - id: end-of-file-fixer - - id: flake8 - - id: name-tests-test - exclude: 'tests/(integration/testcases\.py|helpers\.py)' - - id: requirements-txt-fixer - - id: trailing-whitespace -- repo: git://github.com/asottile/reorder_python_imports - sha: v0.1.0 - hooks: - - id: reorder-python-imports - language_version: 'python2.7' - args: - - --add-import - - from __future__ import absolute_import - - --add-import - - from __future__ import unicode_literals diff --git a/.stylelintrc b/.stylelintrc deleted file mode 100644 index 8f369f1834f..00000000000 --- a/.stylelintrc +++ /dev/null @@ -1,11 +0,0 @@ -{ - "extends": "stylelint-config-standard", - "rules": { - "color-hex-length": "long" - }, - "ignoreFiles": [ - "node_modules/**/**.css", - "coverage/**/**.css", - "dist/**/**.css" - ] -} diff --git a/404.md b/404.md index abe1184d040..53e1ccdfd35 100644 --- a/404.md +++ b/404.md @@ -5,27 +5,53 @@ permalink: /404.html --- diff --git a/Dockerfile b/Dockerfile index ae3276413f2..239d0ed7ab2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,62 @@ -FROM starefossen/github-pages:onbuild +FROM starefossen/github-pages -ONBUILD RUN git clone https://www.github.com/docker/docker.github.io docs +RUN git clone https://www.github.com/docker/docker.github.io allv +RUN jekyll build -s allv -d allvbuild -ONBUILD WORKDIR docs +RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.4 +RUN mkdir allvbuild/v1.4 +RUN jekyll build -s allv -d allvbuild/v1.4 +RUN find allvbuild/v1.4 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.4/#g' +RUN find allvbuild/v1.4 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.4/#g' +RUN find allvbuild/v1.4 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.4/#g' -ONBUILD COPY . /usr/src/app +RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.5 +RUN mkdir allvbuild/v1.5 +RUN jekyll build -s allv -d allvbuild/v1.5 +RUN find allvbuild/v1.5 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.5/#g' +RUN find allvbuild/v1.5 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.5/#g' +RUN find allvbuild/v1.5 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.5/#g' -CMD jekyll serve -d /_site --watch -H 0.0.0.0 -P 4000 +RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.6 +RUN mkdir allvbuild/v1.6 +RUN jekyll build -s allv -d allvbuild/v1.6 +RUN find allvbuild/v1.6 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.6/#g' +RUN find allvbuild/v1.6 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.6/#g' +RUN find allvbuild/v1.6 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.6/#g' + +RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.7 +RUN mkdir allvbuild/v1.7 +RUN jekyll build -s allv -d allvbuild/v1.7 +RUN find allvbuild/v1.7 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.7/#g' +RUN find allvbuild/v1.7 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.7/#g' +RUN find allvbuild/v1.7 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.7/#g' + +RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.8 +RUN mkdir allvbuild/v1.8 +RUN jekyll build -s allv -d allvbuild/v1.8 +RUN find allvbuild/v1.8 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.8/#g' +RUN find allvbuild/v1.8 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.8/#g' +RUN find allvbuild/v1.8 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.8/#g' + +RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.9 +RUN mkdir allvbuild/v1.9 +RUN jekyll build -s allv -d allvbuild/v1.9 +RUN find allvbuild/v1.9 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.9/#g' +RUN find allvbuild/v1.9 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.9/#g' +RUN find allvbuild/v1.9 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.9/#g' + +RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.10 +RUN mkdir allvbuild/v1.10 +RUN jekyll build -s allv -d allvbuild/v1.10 +RUN find allvbuild/v1.10 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.10/#g' +RUN find allvbuild/v1.10 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.10/#g' +RUN find allvbuild/v1.10 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.10/#g' + +RUN git --git-dir=./allv/.git --work-tree=./allv checkout v1.11 +RUN mkdir allvbuild/v1.11 +RUN jekyll build -s allv -d allvbuild/v1.11 +RUN find allvbuild/v1.11 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="/#href="/v1.11/#g' +RUN find allvbuild/v1.11 -type f -name '*.html' -print0 | xargs -0 sed -i 's#src="/#src="/v1.11/#g' +RUN find allvbuild/v1.11 -type f -name '*.html' -print0 | xargs -0 sed -i 's#href="https://docs.docker.com/#href="/v1.11/#g' + +CMD jekyll serve -s /usr/src/app/allvbuild -d /_site --no-watch -H 0.0.0.0 -P 4000 diff --git a/README.md b/README.md index df1d316d294..4310c6689a2 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,104 @@ # Docs @ Docker Welcome to the repo for our documentation. This is the source for the URL -served at docs.docker.com. +served at https://docs.docker.com/. Feel free to send us pull requests and file issues. Our docs are completely open source and we deeply appreciate contributions from our community! +## Providing feedback + +We really want your feedback, and we've made it easy. You can edit, rate, or +file an issue at the bottom of every page on docs.docker.com. + +**Please only file issues about the documentation in this repository.** One way +to think about this is that you should file a bug here if your issue is that you +don't see something that should be in the docs, or you see something incorrect +or confusing in the docs. + +- If your problem is a general question about how to configure or use Docker, + consider asking a question on https://forums.docker.com instead. + +- If you have an idea for a new feature or behavior change in a specific aspect + of Docker, or have found a bug in part of Docker, please file that issue in + the project's code repository. + +## Contributing + +We value your documentation contributions, and we want to make it as easy +as possible to work in this repository. One of the first things to decide is +which branch to base your work on. If you get confused, just ask and we will +help. If a reviewer realizes you have based your work on the wrong branch, we'll +let you know so that you can rebase it. + +>**Note**: To contribute code to Docker projects, see the +[Contribution guidelines](opensource/project/who-written-for). + +### Overall doc improvements + +Most commits will be made against the `master` branch. This include: + +- Conceptual and task-based information not specific to new features +- Restructuring / rewriting +- Doc bug fixing +- Typos and grammar errors + +One quirk of this project is that the `master` branch is where the live docs are +published from, so upcoming features can't be documented there. See +[Specific new features for a project](#specific-new-features-for-a-project) +for how to document upcoming features. These feature branches will be periodically +merged with `master`, so don't worry about fixing typos and documentation bugs +there. + +>Do you enjoy creating graphics? Good graphics are key to great documentation, +and we especially value contributions in this area. + +### Specific new features for a project + +Our docs cover many projects which release at different times. **If, and only if, +your pull request relates to a currently unreleased feature of a project, base +your work on that project's `vnext` branch.** These branches were created by +cloning `master` and then importing a project's `master` branch's docs into it +(at the time of the migration), in a way that preserved the commit history. When +a project has a release, its `vnext` branch will be merged into `master` and your +work will be visible on docs.docker.com. + +The following `vnext` branches currently exist: + +- **[vnext-engine](https://github.com/docker/docker.github.io/tree/vnext-engine):** + docs for upcoming features in the [docker/docker](https://github.com/docker/docker/) + project + +- **[vnext-compose](https://github.com/docker/docker.github.io/tree/vnext-compose):** + docs for upcoming features in the [docker/compose](https://github.com/docker/compose/) + project + +- **[vnext-distribution](https://github.com/docker/docker.github.io/tree/vnext-distribution):** + docs for upcoming features in the [docker/distribution](https://github.com/docker/distribution/) + project + +- **[vnext-opensource](https://github.com/docker/docker.github.io/tree/vnext-opensource):** + docs for upcoming features in the [docker/opensource](https://github.com/docker/opensource/) + project + +- **[vnext-swarm](https://github.com/docker/docker.github.io/tree/vnext-swarm):** + docs for upcoming features in the [docker/swarm](https://github.com/docker/swarm/) + project + +- **[vnext-toolbox](https://github.com/docker/docker.github.io/tree/vnext-toolbox):** + docs for upcoming features in the [docker/toolbox](https://github.com/docker/toolbox/) + project + +- **[vnext-kitematic](https://github.com/docker/docker.github.io/tree/vnext-kitematic):** + docs for upcoming features in the [docker/kitematic](https://github.com/docker/kitematic/) + project + + ## Staging You have three options: -1. (Most performant, slowest setup) Clone this repo, [install Ruby 2.3 or higher (required)](https://www.ruby-lang.org/en/documentation/installation/), [install the GitHub Pages Ruby gem](https://help.github.com/articles/setting-up-your-github-pages-site-locally-with-jekyll/), then run `jekyll serve` from within the directory. -2. (Slow performance on Mac/Windows, fast setup) Clone this repo and run our - staging container: +1. Clone this repo and run our staging container: ```bash git clone https://github.com/docker/docker.github.io.git @@ -20,12 +106,33 @@ You have three options: docker-compose up ``` - If you haven't got Docker Compose installed, [follow these installation instructions](https://docs.docker.com/compose/install/). -3. (Edit entirely in the browser, no local clone) Fork this repo in GitHub, change your fork's repository name to `YOUR_GITHUB_USERNAME.github.io`, and make any changes. + If you haven't got Docker Compose installed, + [follow these installation instructions](https://docs.docker.com/compose/install/). + + The container runs in the background and incrementally rebuilds the site each + time a file changes. You can keep your browser open to http://localhost:4000/ + and refresh to see your changes. The container runs in the foreground, but + you can use `CTRL+C` to get the command prompt back. To stop the container, + issue the following command: + + ```bash + docker-compose down + ``` + +2. Use Jekyll directly. Clone this repo, [install Ruby 2.3 or higher + (required)](https://www.ruby-lang.org/en/documentation/installation/), + [install the GitHub Pages Ruby gem](https://help.github.com/articles/setting-up-your-github-pages-site-locally-with-jekyll/), + then run `jekyll serve` from within the directory. -In the first two options, the site will be staged at `http://localhost:4000` (unless Jekyll is behaving in some non-default way). + The `jekyll serve` process runs in the foreground, and starts a web server + running on http://localhost:4000/ by default. To stop it, use `CTRL+C`. + You can continue working in a second terminal and Jekyll will rebuild the + website incrementally. Refresh the browser to preview your changes. -In the third option, the site will be viewable at `http://YOUR_GITHUB_USERNAME.github.io`, about a minute after your first change is merged into your fork. +3. Use Github Pages, with or without a local clone. Fork this repo in GitHub, + change your fork's repository name to `YOUR_GITHUB_USERNAME.github.io`, and + make changes to the Markdown files in your `master` branch. Browse to + https://.github.io/ to preview the changes. ## Important files diff --git a/_config.yml b/_config.yml index b924d4a473e..87023235029 100644 --- a/_config.yml +++ b/_config.yml @@ -9,14 +9,13 @@ incremental: true permalink: pretty safe: false lsi: false -exclude: - - vendor - - bin +url: https://docs.docker.com gems: - jekyll-redirect-from - - jekyll-sitemap - jekyll-gist + - jekyll-seo-tag + defaults: - diff --git a/_data/docsarchive/vnext-compose/Dockerfile b/_data/docsarchive/vnext-compose/Dockerfile index cd0bf3eebf9..aff08596f90 100644 --- a/_data/docsarchive/vnext-compose/Dockerfile +++ b/_data/docsarchive/vnext-compose/Dockerfile @@ -9,4 +9,4 @@ ONBUILD RUN git checkout vnext-compose ONBUILD COPY . /usr/src/app -CMD jekyll serve -d /_site --watch -H 0.0.0.0 -P 4000 +CMD jekyll serve -d /_site -H 0.0.0.0 -P 4000 diff --git a/_data/docsarchive/vnext-distribution/Dockerfile b/_data/docsarchive/vnext-distribution/Dockerfile new file mode 100644 index 00000000000..8461f31668b --- /dev/null +++ b/_data/docsarchive/vnext-distribution/Dockerfile @@ -0,0 +1,12 @@ + +FROM starefossen/github-pages:onbuild + +ONBUILD RUN git clone https://www.github.com/docker/docker.github.io docs + +ONBUILD WORKDIR docs + +ONBUILD RUN git checkout vnext-distribution + +ONBUILD COPY . /usr/src/app + +CMD jekyll serve -d /_site -H 0.0.0.0 -P 4000 diff --git a/_data/docsarchive/vnext-engine/Dockerfile b/_data/docsarchive/vnext-engine/Dockerfile new file mode 100644 index 00000000000..659b48723f1 --- /dev/null +++ b/_data/docsarchive/vnext-engine/Dockerfile @@ -0,0 +1,12 @@ + +FROM starefossen/github-pages:onbuild + +ONBUILD RUN git clone https://www.github.com/docker/docker.github.io docs + +ONBUILD WORKDIR docs + +ONBUILD RUN git checkout vnext-engine + +ONBUILD COPY . /usr/src/app + +CMD jekyll serve -d /_site -H 0.0.0.0 -P 4000 diff --git a/_data/docsarchive/vnext-kitematic/Dockerfile b/_data/docsarchive/vnext-kitematic/Dockerfile new file mode 100644 index 00000000000..60b61b64286 --- /dev/null +++ b/_data/docsarchive/vnext-kitematic/Dockerfile @@ -0,0 +1,12 @@ + +FROM starefossen/github-pages:onbuild + +ONBUILD RUN git clone https://www.github.com/docker/docker.github.io docs + +ONBUILD WORKDIR docs + +ONBUILD RUN git checkout vnext-kitematic + +ONBUILD COPY . /usr/src/app + +CMD jekyll serve -d /_site -H 0.0.0.0 -P 4000 diff --git a/_data/docsarchive/vnext-machine/Dockerfile b/_data/docsarchive/vnext-machine/Dockerfile index 75f04b61a75..ffb099e5d45 100644 --- a/_data/docsarchive/vnext-machine/Dockerfile +++ b/_data/docsarchive/vnext-machine/Dockerfile @@ -9,4 +9,4 @@ ONBUILD RUN git checkout vnext-machine ONBUILD COPY . /usr/src/app -CMD jekyll serve -d /_site --watch -H 0.0.0.0 -P 4000 +CMD jekyll serve -d /_site -H 0.0.0.0 -P 4000 diff --git a/_data/docsarchive/vnext-toolbox/Dockerfile b/_data/docsarchive/vnext-toolbox/Dockerfile new file mode 100644 index 00000000000..bda048ae6e9 --- /dev/null +++ b/_data/docsarchive/vnext-toolbox/Dockerfile @@ -0,0 +1,12 @@ + +FROM starefossen/github-pages:onbuild + +ONBUILD RUN git clone https://www.github.com/docker/docker.github.io docs + +ONBUILD WORKDIR docs + +ONBUILD RUN git checkout vnext-toolbox + +ONBUILD COPY . /usr/src/app + +CMD jekyll serve -d /_site -H 0.0.0.0 -P 4000 diff --git a/_data/toc.yaml b/_data/toc.yaml index dd3fd854c4a..555ee55dc3f 100644 --- a/_data/toc.yaml +++ b/_data/toc.yaml @@ -6,7 +6,7 @@ toc: - sectiontitle: Install section: - path: /engine/installation/mac/ - title: Installation on Mac OS X + title: Installation on macOS - path: /engine/installation/windows/ title: Installation on Windows - sectiontitle: On Linux distributions @@ -25,8 +25,6 @@ toc: title: Installation on Arch Linux - path: /engine/installation/linux/cruxlinux/ title: Installation on CRUX Linux - - path: /engine/installation/linux/frugalware/ - title: Installation on FrugalWare - path: /engine/installation/linux/gentoolinux/ title: Installation on Gentoo - path: /engine/installation/linux/oracle/ @@ -111,6 +109,8 @@ toc: title: Work with network commands - path: /engine/userguide/networking/get-started-overlay/ title: Get started with multi-host networking + - path: /engine/userguide/networking/get-started-macvlan/ + title: Get started with macvlan network driver - path: /engine/userguide/networking/overlay-security-model/ title: Swarm mode overlay network security model - path: /engine/userguide/networking/configure-dns/ @@ -167,6 +167,8 @@ toc: title: Splunk logging driver - path: /engine/admin/dsc/ title: PowerShell DSC Usage + - path: /engine/admin/ansible/ + title: Using Ansible - path: /engine/admin/chef/ title: Using Chef - path: /engine/admin/puppet/ @@ -253,6 +255,8 @@ toc: title: Seccomp security profiles for Docker - sectiontitle: Extend Engine section: + - path: /engine/extend/ + title: Managed plugin system - path: /engine/extend/plugins_authorization/ title: Access authorization plugin - path: /engine/extend/plugins/ @@ -630,7 +634,9 @@ toc: - path: /ucp/overview/ title: Universal Control Plane overview - path: /ucp/install-sandbox/ - title: Evaluate UCP in a sandbox + title: Install DDC in a sandbox for evaluation + - path: /ucp/install-sandbox-2/ + title: Evaluate DDC in a sandbox deployment - path: /ucp/architecture/ title: Architecture - sectiontitle: Installation @@ -769,6 +775,8 @@ toc: title: backup - path: /docker-trusted-registry/reference/dumpcerts/ title: dumpcerts + - path: /docker-trusted-registry/reference/images/ + title: images - path: /docker-trusted-registry/reference/install/ title: install - path: /docker-trusted-registry/reference/join/ @@ -781,6 +789,8 @@ toc: title: remove - path: /docker-trusted-registry/reference/restore/ title: restore + - path: /docker-trusted-registry/reference/upgrade/ + title: upgrade - sectiontitle: Configuration section: - path: /docker-trusted-registry/configure/configuration/ @@ -1095,6 +1105,8 @@ toc: section: - path: /docker-store/ title: Docker Store Overview + - path: /docker-store/ + title: Submit a product to Docker Store - path: /docker-store/faq/ title: Docker Store FAQs - sectiontitle: Docker Toolbox @@ -1206,7 +1218,7 @@ toc: - path: /registry/recipes/mirror/ title: Mirroring Docker Hub - path: /registry/recipes/osx-setup-guide/ - title: Running on OS X + title: Running on macOS - sectiontitle: Reference section: - path: /registry/spec/ diff --git a/_includes/rightnav.md b/_includes/rightnav.md deleted file mode 100644 index c69dcf472c3..00000000000 --- a/_includes/rightnav.md +++ /dev/null @@ -1,2 +0,0 @@ -* TOC -{: toc} diff --git a/_layouts/docs.html b/_layouts/docs.html index dc5ff90711d..da152b93151 100644 --- a/_layouts/docs.html +++ b/_layouts/docs.html @@ -30,23 +30,17 @@ - - - - + - + - - + -Docker Docs{% if page.title %}: {{ page.title }}{% endif %} - - + @@ -81,6 +75,7 @@ +{% seo %}
diff --git a/compose/compose-file.md b/compose/compose-file.md index 7b38d8700cd..2b35a6a6082 100644 --- a/compose/compose-file.md +++ b/compose/compose-file.md @@ -442,7 +442,7 @@ Containers for the linked service will be reachable at a hostname identical to the alias, or the service name if no alias was specified. Links also express dependency between services in the same way as -[depends_on](compose-file.md#depends-on), so they determine the order of service startup. +[depends_on](compose-file.md#dependson), so they determine the order of service startup. > **Note:** If you define both links and [networks](compose-file.md#networks), services with > links between them must share at least one network in common in order to diff --git a/compose/django.md b/compose/django.md index 0982521aeeb..2f33a16835d 100644 --- a/compose/django.md +++ b/compose/django.md @@ -146,7 +146,7 @@ In this section, you set up the database connection for Django. DATABASES = { 'default': { - 'ENGINE': 'django.db.backends.postgresql_psycopg2', + 'ENGINE': 'django.db.backends.postgresql', 'NAME': 'postgres', 'USER': 'postgres', 'HOST': 'db', diff --git a/compose/gettingstarted.md b/compose/gettingstarted.md index beda5016c07..d7019d63101 100644 --- a/compose/gettingstarted.md +++ b/compose/gettingstarted.md @@ -68,21 +68,21 @@ dependencies the Python application requires, including Python itself. RUN pip install -r requirements.txt CMD python app.py - This tells Docker to: - - * Build an image starting with the Python 2.7 image. - * Add the current directory `.` into the path `/code` in the image. - * Set the working directory to `/code`. - * Install the Python dependencies. - * Set the default command for the container to `python app.py` - - For more information on how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md). - + This tells Docker to: + + * Build an image starting with the Python 2.7 image. + * Add the current directory `.` into the path `/code` in the image. + * Set the working directory to `/code`. + * Install the Python dependencies. + * Set the default command for the container to `python app.py` +
+ For more information on how to write Dockerfiles, see the [Docker user guide](/engine/tutorials/dockerimages.md#building-an-image-from-a-dockerfile) and the [Dockerfile reference](/engine/reference/builder.md). +
2. Build the image. $ docker build -t web . - This command builds an image named `web` from the contents of the current + This command builds an image named `web` from the contents of the current directory. The command automatically locates the `Dockerfile`, `app.py`, and `requirements.txt` files. @@ -91,7 +91,7 @@ dependencies the Python application requires, including Python itself. Define a set of services using `docker-compose.yml`: -1. Create a file called docker-compose.yml in your project directory and add +Create a file called docker-compose.yml in your project directory and add the following: diff --git a/compose/index.md b/compose/index.md index deb4fbe91b8..6c1fb143d5d 100644 --- a/compose/index.md +++ b/compose/index.md @@ -20,7 +20,7 @@ Compose is a tool for defining and running multi-container Docker applications. - [Get started with Rails](rails.md) - [Get started with WordPress](wordpress.md) - [Frequently asked questions](faq.md) -- [Command line reference](./reference/index.md) +- [Command-line reference](./reference/index.md) - [Compose file reference](compose-file.md) - [Environment file](env-file.md) diff --git a/compose/install.md b/compose/install.md index 990b6b0b64e..36c90413322 100644 --- a/compose/install.md +++ b/compose/install.md @@ -11,13 +11,13 @@ title: Install Compose # Install Docker Compose -You can run Compose on OS X, Windows and 64-bit Linux. To install it, you'll need to install Docker first. +You can run Compose on macOS, Windows and 64-bit Linux. To install it, you'll need to install Docker first. To install Compose, do the following: 1. Install Docker Engine: - * Mac OS X installation + * macOS installation * Windows installation @@ -38,7 +38,7 @@ which the release page specifies, in your terminal. The following is an example command illustrating the format: - curl -L https://github.com/docker/compose/releases/download/1.8.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose + curl -L "https://github.com/docker/compose/releases/download/1.8.1/docker-compose-$(uname -s)-$(uname -m)" > /usr/local/bin/docker-compose If you have problems installing with `curl`, see [Alternative Install Options](install.md#alternative-install-options). @@ -53,7 +53,7 @@ which the release page specifies, in your terminal. 7. Test the installation. $ docker-compose --version - docker-compose version: 1.8.0 + docker-compose version: 1.8.1 ## Alternative install options @@ -76,13 +76,13 @@ to get started. Compose can also be run inside a container, from a small bash script wrapper. To install compose as a container run: - $ curl -L https://github.com/docker/compose/releases/download/1.8.0/run.sh > /usr/local/bin/docker-compose + $ curl -L https://github.com/docker/compose/releases/download/1.8.1/run.sh > /usr/local/bin/docker-compose $ chmod +x /usr/local/bin/docker-compose ## Master builds If you're interested in trying out a pre-release build you can download a -binary from https://dl.bintray.com/docker-compose/master/. Pre-release +binary from [https://dl.bintray.com/docker-compose/master/](https://dl.bintray.com/docker-compose/master/). Pre-release builds allow you to try out new features before they are released, but may be less stable. diff --git a/compose/reference/envvars.md b/compose/reference/envvars.md index d405fdb6bb9..4887e6cdf8a 100644 --- a/compose/reference/envvars.md +++ b/compose/reference/envvars.md @@ -34,7 +34,7 @@ Specify the path to a Compose file. If not provided, Compose looks for a file na succession until a file by that name is found. This variable supports multiple compose files separate by a path separator (on -Linux and OSX the path separator is `:`, on Windows it is `;`). For example: +Linux and macOS the path separator is `:`, on Windows it is `;`). For example: `COMPOSE_FILE=docker-compose.yml:docker-compose.prod.yml` See also the `-f` [command-line option](overview.md). diff --git a/compose/reference/overview.md b/compose/reference/overview.md index 0e6a6b08445..b84a6b28d4c 100644 --- a/compose/reference/overview.md +++ b/compose/reference/overview.md @@ -77,7 +77,7 @@ add to their successors. For example, consider this command line: ``` -$ docker-compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db` +$ docker-compose -f docker-compose.yml -f docker-compose.admin.yml run backup_db ``` The `docker-compose.yml` file might specify a `webapp` service. diff --git a/compose/reference/ps.md b/compose/reference/ps.md index 385e53a4d6e..20aa45f2485 100644 --- a/compose/reference/ps.md +++ b/compose/reference/ps.md @@ -11,7 +11,7 @@ title: ps # ps -``` +```none Usage: ps [options] [SERVICE...] Options: @@ -19,3 +19,11 @@ Options: ``` Lists containers. + +```bash +$ docker-compose ps + Name Command State Ports +-------------------------------------------------------------------------------------------- +mywordpress_db_1 docker-entrypoint.sh mysqld Up 3306/tcp +mywordpress_wordpress_1 /entrypoint.sh apache2-for ... Restarting 0.0.0.0:8000->80/tcp +``` diff --git a/compose/startup-order.md b/compose/startup-order.md index f5a40ae8fa3..634c7bba991 100644 --- a/compose/startup-order.md +++ b/compose/startup-order.md @@ -53,9 +53,11 @@ script: db: image: postgres -- Write your own wrapper script to perform a more application-specific health +- Alternatively, write your own wrapper script to perform a more application-specific health check. For example, you might want to wait until Postgres is definitely ready to accept commands: + + wait-for-postgres.sh #!/bin/bash diff --git a/compose/swarm.md b/compose/swarm.md index 1cc86b53d6a..4b6a46569cb 100644 --- a/compose/swarm.md +++ b/compose/swarm.md @@ -89,15 +89,15 @@ all three services end up on the same node: image: foo volumes_from: ["bar"] network_mode: "service:baz" - environment: + labels: - "constraint:node==node-1" bar: image: bar - environment: + labels: - "constraint:node==node-1" baz: image: baz - environment: + labels: - "constraint:node==node-1" ### Host ports and recreating containers @@ -165,15 +165,15 @@ environment variables, so you can use Compose's `environment` option to set them. # Schedule containers on a specific node - environment: + labels: - "constraint:node==node-1" # Schedule containers on a node that has the 'storage' label set to 'ssd' - environment: + labels: - "constraint:storage==ssd" # Schedule containers where the 'redis' image is already pulled - environment: + labels: - "affinity:image==redis" For the full set of available filters and expressions, see the [Swarm diff --git a/compose/wordpress.md b/compose/wordpress.md index d595a9bb0bb..8bac097e14b 100644 --- a/compose/wordpress.md +++ b/compose/wordpress.md @@ -27,35 +27,40 @@ with Docker containers. This quick-start guide demonstrates how to use Compose t For example, if you named your directory `my_wordpress`: - $ cd my-wordpress/ - -3. Create a `docker-compose.yml` file that will start your `Wordpress` blog and a separate `MySQL` instance with a volume mount for data persistence: - - version: '2' - services: - db: - image: mysql:5.7 - volumes: - - "./.data/db:/var/lib/mysql" - restart: always - environment: - MYSQL_ROOT_PASSWORD: wordpress - MYSQL_DATABASE: wordpress - MYSQL_USER: wordpress - MYSQL_PASSWORD: wordpress - - wordpress: - depends_on: - - db - image: wordpress:latest - links: - - db - ports: - - "8000:80" - restart: always - environment: - WORDPRESS_DB_HOST: db:3306 - WORDPRESS_DB_PASSWORD: wordpress + $ cd my_wordpress/ + +3. Create a `docker-compose.yml` file that will start your + `Wordpress` blog and a separate `MySQL` instance with a volume + mount for data persistence: + + ```none + version: '2' + + services: + db: + image: mysql:5.7 + volumes: + - "./.data/db:/var/lib/mysql" + restart: always + environment: + MYSQL_ROOT_PASSWORD: wordpress + MYSQL_DATABASE: wordpress + MYSQL_USER: wordpress + MYSQL_PASSWORD: wordpress + + wordpress: + depends_on: + - db + image: wordpress:latest + links: + - db + ports: + - "8000:80" + restart: always + environment: + WORDPRESS_DB_HOST: db:3306 + WORDPRESS_DB_PASSWORD: wordpress + ``` **NOTE**: The folder `./.data/db` will be automatically created in the project directory alongside the `docker-compose.yml` which will persist any updates made by wordpress to the diff --git a/cs-engine/release-notes/release-notes.md b/cs-engine/release-notes/release-notes.md index ea5b7097e85..1441a70dcd4 100644 --- a/cs-engine/release-notes/release-notes.md +++ b/cs-engine/release-notes/release-notes.md @@ -30,14 +30,28 @@ cannot be adopted as quickly for consistency and compatibility reasons. These notes refer to the current and immediately prior releases of the CS Engine. For notes on older versions, see the [CS Engine prior release notes archive](prior-release-notes.md). +## CS Engine 1.12.3-cs3 +(27 Oct 2016) + +Refer to the [detailed list](https://github.com/docker/docker/releases) of all +changes since the release of CS Engine 1.12.2-cs2. + +## CS Engine 1.12.2-cs2 +(13 Oct 2016) + +Refer to the [detailed list](https://github.com/docker/docker/releases) of all +changes since the release of CS Engine 1.12.1-cs1. + ## CS Engine 1.12.1-cs1 (20 Sep 2016) -Refer to the [detailed list](https://github.com/docker/docker/releases) of all changes since the release of CS Engine 1.11.2-cs5 +Refer to the [detailed list](https://github.com/docker/docker/releases) of all +changes since the release of CS Engine 1.11.2-cs5 This release addresses the following issues: -* [#25962](https://github.com/docker/docker/pull/25962) Allow normal containers to connect to swarm-mode overlay network +* [#25962](https://github.com/docker/docker/pull/25962) Allow normal containers +to connect to swarm-mode overlay network * Various bug fixes in swarm mode networking ## CS Engine 1.11.2-cs5 @@ -99,4 +113,3 @@ https://github.com/docker/docker/issues/22486 (27 April 2016) In this release the CS Engine is supported on RHEL 7.2 OS - diff --git a/css/documentation.css b/css/documentation.css index 2fdab54e804..304b9d18ce8 100644 --- a/css/documentation.css +++ b/css/documentation.css @@ -336,7 +336,7 @@ color: #F04124; border: 0; text-indent: -9999px; background-color: transparent; - background-image: url("https://blog.docker.com/wp-content/themes/whale_roots/assets/img/search-icon.png"); + background-image: url("/images/search-icon.png"); /* background-size: 38px 38px; */ background-repeat: no-repeat; background-position: center center; diff --git a/docker-cloud/apps/menu.md b/docker-cloud/apps/menu.md deleted file mode 100644 index f43277d9a4d..00000000000 --- a/docker-cloud/apps/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Manage Applications in Docker Cloud -keywords: -- applications, images, Cloud -menu: - main: - identifier: apps - parent: docker-cloud - type: menu - weight: -80 -title: Manage Applications ---- - -If you can see this page, please file a bug by emailing docs@docker.com diff --git a/docker-cloud/apps/volumes.md b/docker-cloud/apps/volumes.md index c2123b2329b..72a9ea0688a 100644 --- a/docker-cloud/apps/volumes.md +++ b/docker-cloud/apps/volumes.md @@ -60,16 +60,16 @@ You might find it helpful to download or back up the data from volumes that are 1. Run an SSH service that mounts the volumes of the service you want to back up. - In the example snippet below, replace `mysql` with the actual service name. + In the example snippet below, replace `mysql` with the actual service name. - ``` - $ docker-cloud service run -n downloader -p 22:2222 -e AUTHORIZED_KEYS="$(cat ~/.ssh/id_rsa.pub)" --volumes-from mysql tutum/ubuntu - ``` + ``` + $ docker-cloud service run -n downloader -p 22:2222 -e AUTHORIZED_KEYS="$(cat ~/.ssh/id_rsa.pub)" --volumes-from mysql tutum/ubuntu + ``` 2. Run a `scp` (secure-copy) to download the files to your local machine. - In the example snippet below, replace `downloader-1.uuid.cont.dockerapp.io` with the container's Fully Qualified Domain Name (FQDN), and replace `/var/lib/mysql` with the path within the container from which you want to download the data. The data will be downloaded to the current local folder. + In the example snippet below, replace `downloader-1.uuid.cont.dockerapp.io` with the container's Fully Qualified Domain Name (FQDN), and replace `/var/lib/mysql` with the path within the container from which you want to download the data. The data will be downloaded to the current local folder. - ``` - $ scp -r -P 2222 root@downloader-1.uuid.cont.dockerapp.io:/var/lib/mysql . - ``` + ``` + $ scp -r -P 2222 root@downloader-1.uuid.cont.dockerapp.io:/var/lib/mysql . + ``` diff --git a/docker-cloud/builds/advanced.md b/docker-cloud/builds/advanced.md index f0545fb5af6..a8326410b82 100644 --- a/docker-cloud/builds/advanced.md +++ b/docker-cloud/builds/advanced.md @@ -41,6 +41,22 @@ sut: - SOURCE_BRANCH ``` +## Override build, test or push commands + +Docker Cloud allows you to override and customize the `build`, `test` and `push` +commands during automated build and test processes using hooks. For example, you +might use a build hook to set build arguments used only during the build +process. (You can also set up [custom build phase hooks](#custom-build-phase-hooks) to perform actions in between these commands.) + +**Use these hooks with caution.** The contents of these hook files replace the +basic `docker` commands, so you must include a similar build, test or push +command in the hook or your automated process will not complete. + +To override these phases, create a folder called `hooks` in your source code +repository at the same directory level as your Dockerfile. Create a file called +`hooks/build`, `hooks/test`, or `hooks/push` and include commands that the +builder process can execute, such as `docker` and `bash` commands (prefixed appropriately with `#!/bin/bash`). + ## Custom build phase hooks You can run custom commands between phases of the build process by creating @@ -49,7 +65,7 @@ autotest processes. Create a folder called `hooks` in your source code repository at the same directory level as your Dockerfile. Place files that define the hooks in that -folder. The builder executes them before and after each step. +folder. Hook files can include both `docker` commands, and `bash` commands as long as they are prefixed appropriately with `#!/bin/bash`. The builder executes the commands in the files before and after each step. The following hooks are available: @@ -61,13 +77,44 @@ The following hooks are available: * `hooks/pre_push` (only used when executing a build rule or [automated build](automated-build.md) ) * `hooks/post_push` (only used when executing a build rule or [automated build](automated-build.md) ) +### Build hook examples -## Override build, test or push commands +#### Override the "build" phase to set variables -In addition to the custom build phase hooks above, you can also use -`hooks/build`, `hooks/test`, and `hooks/push` to override and customize the -`build`, `test` and `push` commands during automated build and test processes. +Docker Cloud allows you to define build environment variables in the UI which you can then reference in hooks. -**Use these hooks with caution.** The contents of these hook files replace the -basic `docker` commands, so you must include a similar build, test or push -command in the hook or your automated process will not complete. +In the following example, we define a build hook that uses `docker build` arguments to set the variable `CUSTOM` based on the value of variable we defined using the Docker Cloud build settings. `$IMAGE_NAME` is a variable that we provide with the name of the image being built. + +```none +docker build --build-arg CUSTOM=$VAR -t $IMAGE_NAME +``` + +> **Caution**: A `hooks/build` file overrides the basic `docker build` command +used by the builder, so you must include a similar build command in the hook or +the automated build will fail. + +To learn more about Docker build-time variables, see the [docker build documentation]( https://docs.docker.com/engine/reference/commandline/build/#/set-build-time-variables---build-arg). + +#### Two-phase build + +If your build process requires a component that is not a dependency for your application, you can use a `pre-build` hook to collect and compile required components. In the example below, the hook uses a Docker container to compile a Golang binary required before the build. + +```bash +#!/bin/bash +echo "=> Building the binary" +docker run --privileged \ + -v $(pwd):/src \ + -v /var/run/docker.sock:/var/run/docker.sock \ + centurylink/golang-builder +``` + +#### Push to multiple tags + +By default the build process tags the resulting Docker image with a single tag and pushes the image only to the repository where the build settings are configured. + +If you needed to give the resulting image multiple tags, or push the same image to multiple repositories, you could set up a `post_push` hook to add additional tags and push to more repositories. + +```none +docker tag $IMAGE_NAME $DOCKER_REPO:$SOURCE_COMMIT +docker push $DOCKER_REPO:$SOURCE_COMMIT +``` diff --git a/docker-cloud/builds/automated-build.md b/docker-cloud/builds/automated-build.md index 8f3160fd77c..e323558a15c 100644 --- a/docker-cloud/builds/automated-build.md +++ b/docker-cloud/builds/automated-build.md @@ -47,40 +47,34 @@ Before you set up automated builds you need to [create a repository](repos.md) t 2. Click the **Builds** tab. -3. The first time you configure automated builds for a repository, you'll see -buttons that allow you to link to a hosted source code repository. Select the -repository service where the image's source code is stored. +3. If you are setting up automated builds for the first time, select +the code repository service where the image's source code is stored. - (If you haven't yet linked a source provider, follow the instructions - [here](link-source.md) to link your account.) - - If you are editing an existing the build settings for an existing automated + Otherwise, if you are editing the build settings for an existing automated build, click **Configure automated builds**. -4. If necessary, select the **source repository** to build the repository from. - -5. Select the **source repository** to build the Docker images from. +4. Select the **source repository** to build the Docker images from. You might need to specify an organization or user from the source code provider to find the code repository you want to build. -6. Choose where to run your build processes. +5. Choose where to run your build processes. You can either run the process on your own infrastructure and optionally - [set up specific nodes to build on](automated-build.md#set-up-builder-nodes), or use the + [set up specific nodes to build on](automated-build.md#set-up-builder-nodes), or you can use the hosted build service offered on Docker Cloud's infrastructure. If you use Docker's infrastructure, select a builder size to run the build process on. This hosted build service is free while it is in Beta. ![](images/edit-repository-builds.png) -7. Optionally, enable [autotests](automated-testing.md#enable-automated-tests-on-a-repository). +6. Optionally, enable [autotests](automated-testing.md#enable-automated-tests-on-a-repository). -8. In the **Tag mappings** section, enter one or more tags to build. +8. In the **Build Rules** section, enter one or more sources to build. - For each tag: + For each source: - * Select the **Source type** to build: either a **tag** or a + * Select the **Source type** to build either a **tag** or a **branch**. This tells the build system what to look for in the source code repository. @@ -90,13 +84,14 @@ repository service where the image's source code is stored. names to build. To learn more, see [regexes](automated-build.md#regexes-and-automated-builds). - * Specify the **Dockerfile location** as a path relative to the root of the source code repository. (If the Dockerfile is at the repository root, leave this path set to `/`.) - * Enter the tag to apply to Docker images built from this source. + If you configured a regex to select the source, you can reference the capture groups and use its result as part of the tag. To learn more, see [regexes](automated-build.md#regexes-and-automated-builds). + * Specify the **Dockerfile location** as a path relative to the root of the source code repository. (If the Dockerfile is at the repository root, leave this path set to `/`.) + 9. For each branch or tag, enable or disable the **Autobuild** toggle. Only branches or tags with autobuild enabled are built, tested, *and* have @@ -137,10 +132,10 @@ To disable an automated build: 2. Click **Configure automated builds** to edit the repository's build settings. -3. In the **Tag mappings** section, locate the branch or tag you no longer want +3. In the **Build Rules** section, locate the branch or tag you no longer want to automatically build. -4. Click the **autobuild** toggle next to the branch configuration line. +4. Click the **autobuild** toggle next to the configuration line. The toggle turns gray when disabled. diff --git a/docker-cloud/builds/automated-testing.md b/docker-cloud/builds/automated-testing.md index be2fba1562e..dbd6a9f01ad 100644 --- a/docker-cloud/builds/automated-testing.md +++ b/docker-cloud/builds/automated-testing.md @@ -73,15 +73,15 @@ Docker repository, regardless of the Autotest settings. * The source code repository * the build location - * at least one tag mapping + * at least one build rule 8. Choose your **Autotest** option. The following options are available: - * `Off`: no additional tests. Test commits only to branches that are using Autobuild to build and push images. - * `Source Repository`: test commits to all branches of the source code repository, regardless of their Autobuild setting. - * `Source Repository & External Pull Requests`: tests commits to all branches of the source code repository, including any pull requests opened against it. + * `Off`: No additional test builds. Tests only run if they're configured as part of an automated build. + * `Source repository`: Run a test build for any pull requests to branches that match a build rule, but only when the pull request comes from the same source repository. + * `Source repository & external pull requests`: Run a test build for any pull requests to branches that match a build rule, including when the pull request originated in an external source repository. > **Note**: For security purposes, autotest on _external pull requests_ is disabled on public repositories. If you select this option on a public diff --git a/docker-cloud/builds/link-source.md b/docker-cloud/builds/link-source.md index 02214545251..bccfad7d1d2 100644 --- a/docker-cloud/builds/link-source.md +++ b/docker-cloud/builds/link-source.md @@ -1,7 +1,6 @@ --- aliases: /docker-cloud/tutorials/link-source/ -description: -- Link to your source code repository +description: Link to your source code repository keywords: - sourcecode, github, bitbucket, Cloud menu: diff --git a/docker-cloud/builds/menu.md b/docker-cloud/builds/menu.md deleted file mode 100644 index 25247d3a903..00000000000 --- a/docker-cloud/builds/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Manage Builds and Images in Docker Cloud -keywords: -- builds, images, Cloud -menu: - main: - identifier: builds - parent: docker-cloud - type: menu - weight: -80 -title: Manage Builds and Images ---- - -If you can see this page, please file a bug by emailing docs@docker.com diff --git a/docker-cloud/dockerid.md b/docker-cloud/dockerid.md index 8dd7bbf29fd..4293c851edb 100644 --- a/docker-cloud/dockerid.md +++ b/docker-cloud/dockerid.md @@ -30,10 +30,22 @@ account. You can also link to source code repositories such as GitHub and Bitbucket from your Docker Cloud account settings. - +## Email addresses + +You can associate multiple email addresses with your Docker ID, and one of these addresses becomes the primary address for the account. The primary address is used by Docker to send password reset notifications and other important information, so be sure to keep it updated. + +To add another email address to your Docker ID: + +1. In Docker Cloud, click the user icon menu at top right, and click **Account Settings**. +2. In the **Emails** section, enter a new email address for the account. +3. Click the **plus sign** icon (**+**) to add the address and send a verification email. + +The new email address is not added to the account until you confirm it by +clicking the link in the verification email. This link is only good for a +limited time. To send a new verification email, click the envelope icon next to +the email address that you want to verify. + +If you have multiple verified email addresses associated with the account, you can click **Set as primary** to change the primary email address. ## Notifications diff --git a/docker-cloud/getting-started/deploy-app/1_introduction.md b/docker-cloud/getting-started/deploy-app/1_introduction.md index c46a14d68e2..ad46e690fb1 100644 --- a/docker-cloud/getting-started/deploy-app/1_introduction.md +++ b/docker-cloud/getting-started/deploy-app/1_introduction.md @@ -14,7 +14,7 @@ title: Introduction to Deploying an app in Docker Cloud # Introduction and tutorial prerequisites -In this tutorial you will an application to Docker Cloud using either Go or +In this tutorial you will bring an application to Docker Cloud using either Go or Python. This tutorial is intended for more advanced beginners who have some experience with web applications, and who want to learn more about multi-container services in Docker Cloud. @@ -23,7 +23,7 @@ This tutorial assumes that you have: - a free Docker ID account. - at least one node running. If you don't have any nodes set up in Docker Cloud yet, [start here](../../getting-started/your_first_node.md) to set these up. -- (optional) Docker Engine installed - see the installation guides for OS X, Windows and Linux. +- (optional) Docker Engine installed - see the installation guides for macOS, Windows and Linux. Let's get started! diff --git a/docker-cloud/getting-started/deploy-app/2_set_up.md b/docker-cloud/getting-started/deploy-app/2_set_up.md index 53d49010c1a..a21db0865a4 100644 --- a/docker-cloud/getting-started/deploy-app/2_set_up.md +++ b/docker-cloud/getting-started/deploy-app/2_set_up.md @@ -38,9 +38,9 @@ Open your shell or terminal application and execute the following command: $ pip install docker-cloud ``` -#### Install on Mac OS X +#### Install on macOS -We recommend installing Docker CLI for OS X using Homebrew. If you don't have `brew` installed, follow the instructions here: http://brew.sh +We recommend installing Docker CLI for macOS using Homebrew. If you don't have `brew` installed, follow the instructions here: http://brew.sh Once Homebrew is installed, open Terminal and run the following command: @@ -66,7 +66,7 @@ The documentation for the Docker Cloud CLI tool and API [here](/apidocs/docker-c Use the `login` CLI command to log in to Docker Cloud. Use the username and password you used when creating your Docker ID. If you use Docker Hub, you can use the same username and password you use to log in to Docker Hub. ``` -$ docker-cloud login +$ docker login Username: my-username Password: Login succeeded! diff --git a/docker-cloud/getting-started/deploy-app/3_prepare_the_app.md b/docker-cloud/getting-started/deploy-app/3_prepare_the_app.md index 93495972a0d..3de4fcb6bd4 100644 --- a/docker-cloud/getting-started/deploy-app/3_prepare_the_app.md +++ b/docker-cloud/getting-started/deploy-app/3_prepare_the_app.md @@ -23,14 +23,14 @@ install Python or Go to follow the tutorial. ```bash $ git clone https://github.com/docker/dockercloud-quickstart-python.git -$ cd quickstart-python +$ cd dockercloud-quickstart-python ``` **Go quickstart** ```bash $ git clone https://github.com/docker/dockercloud-quickstart-go.git -$ cd quickstart-go +$ cd dockercloud-quickstart-go ``` ## Build the application @@ -42,13 +42,13 @@ Next, we have to build this application to create an image. Run the following co **Python quickstart** ```bash -$ docker build --tag quickstart-python +$ docker build --tag quickstart-python . ``` **Go quickstart** ```bash -$ docker build --tag quickstart-go +$ docker build --tag quickstart-go . ``` Next, we [Push the Docker image to Docker Cloud's Registry](4_push_to_cloud_registry.md). diff --git a/docker-cloud/getting-started/deploy-app/9_load-balance_the_service.md b/docker-cloud/getting-started/deploy-app/9_load-balance_the_service.md index 11775c4d073..13ba6a26cab 100644 --- a/docker-cloud/getting-started/deploy-app/9_load-balance_the_service.md +++ b/docker-cloud/getting-started/deploy-app/9_load-balance_the_service.md @@ -29,7 +29,7 @@ $ docker-cloud service run \ -p 80:80/tcp \ --role global \ --autorestart ALWAYS \ ---link-service quickstart-python:web \ +--link-service web:web \ --name lb \ dockercloud/haproxy ``` @@ -40,7 +40,7 @@ dockercloud/haproxy **--autorestart ALWAYS** tells Docker Cloud to always [restart the containers](../../apps/autorestart.md) if they stop. -**--link-service quickstart-python:web** links your load balancer service *haproxy* with the service *quickstart-python*, and names the link *web*. (Learn more about Service Linking [here](../../apps/service-links.md).) +**--link-service web:web** links your load balancer service *haproxy* with the *web* service, and names the link *web*. (Learn more about Service Linking [here](../../apps/service-links.md).) **--name lb** names the service *lb* (short for *load balancer*). diff --git a/docker-cloud/infrastructure/cloud-on-aws-faq.md b/docker-cloud/infrastructure/cloud-on-aws-faq.md index 6864a3899f6..ea365f1b649 100644 --- a/docker-cloud/infrastructure/cloud-on-aws-faq.md +++ b/docker-cloud/infrastructure/cloud-on-aws-faq.md @@ -120,6 +120,7 @@ Following the example in the previous section, you have a node cluster deployed What if you have another VPC for some other purpose, (the components already exist) and you want to deploy a node cluster in that VPC. Docker Cloud: + 1. Looks for the selected VPC. Found! 2. Looks for selected subnets. If you do not select any subnets, Docker Cloud tries to create them using the rules previously described. 3. If you selected more than one subnet, Docker Cloud distributes the nodes in the cluster among those subnets. If not, all nodes are placed in the same subnet. diff --git a/docker-cloud/infrastructure/menu.md b/docker-cloud/infrastructure/menu.md deleted file mode 100644 index 3bf0d98ce1c..00000000000 --- a/docker-cloud/infrastructure/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Manage Infrastructure in Docker Cloud -keywords: -- nodes, hosts, infrastructure, Cloud -menu: - main: - identifier: infrastructure - parent: docker-cloud - type: menu - weight: -80 -title: Manage Infrastructure ---- - -If you can see this page, please file a bug by emailing docs@docker.com diff --git a/docker-cloud/installing-cli.md b/docker-cloud/installing-cli.md index d1b00947b0b..46470062035 100644 --- a/docker-cloud/installing-cli.md +++ b/docker-cloud/installing-cli.md @@ -3,7 +3,7 @@ aliases: - /docker-cloud/getting-started/intermediate/installing-cli/ - /docker-cloud/getting-started/installing-cli/ - /docker-cloud/tutorials/installing-cli/ -description: Using the Docker Cloud CLI on Linux, Windows, and Mac OS X, installing, +description: Using the Docker Cloud CLI on Linux, Windows, and macOS, installing, updating, uninstall keywords: - cloud, command-line, CLI @@ -42,10 +42,12 @@ Open your terminal or command shell and execute the following command: ```bash $ pip install docker-cloud ``` +If you encounter errors on Linux machines, make sure that `python-dev` is installed. +For example, on Ubuntu, run the following command: `apt-get install python-dev` -#### Install on Mac OS X +#### Install on macOS -We recommend installing Docker CLI for OS X using Homebrew. If you don't have `brew` installed, follow the instructions here: http://brew.sh +We recommend installing Docker CLI for macOS using Homebrew. If you don't have `brew` installed, follow the instructions here: http://brew.sh Once Homebrew is installed, open Terminal and run the following command: @@ -90,7 +92,7 @@ Periodically, Docker will add new features and fix bugs in the existing CLI. To $ pip install -U docker-cloud ``` -#### Upgrade the docker-cloud CLI on Mac OS X +#### Upgrade the docker-cloud CLI on macOS ``` $ brew update && brew upgrade docker-cloud @@ -109,7 +111,7 @@ Open your terminal or command shell and execute the following command: $ pip uninstall docker-cloud ``` -#### Uninstall on Mac OS X +#### Uninstall on macOS Open your Terminal application and execute the following command: diff --git a/docker-cloud/orgs.md b/docker-cloud/orgs.md index 42853d87345..6be8acc4675 100644 --- a/docker-cloud/orgs.md +++ b/docker-cloud/orgs.md @@ -171,8 +171,8 @@ Optionally, override the default access level for specific repositories. --> | ------------- | ------------- | | **Repositories** | | | Read | Pull | -| Read/Write | Pull, push, update description, delete | -| Admin | All of the above, plus create | +| Read/Write | Pull, push | +| Admin | All of the above, plus update description, create and delete | | **Build** | | | Read | View build and build settings | | Read/write | View, cancel build, retry or trigger build | diff --git a/docker-for-mac/docker-toolbox.md b/docker-for-mac/docker-toolbox.md index f28a4d58b55..43410bf46ad 100644 --- a/docker-for-mac/docker-toolbox.md +++ b/docker-for-mac/docker-toolbox.md @@ -33,7 +33,7 @@ Docker for Mac is a Mac native application, that you install in `/Applications`. Here are some key points to know about Docker for Mac before you get started: -* Docker for Mac does not use VirtualBox, but rather HyperKit, a lightweight OS X virtualization solution built on top of Hypervisor.framework in OS X 10.10 Yosemite and higher. +* Docker for Mac does not use VirtualBox, but rather HyperKit, a lightweight macOS virtualization solution built on top of Hypervisor.framework in macOS 10.10 Yosemite and higher. * Installing Docker for Mac does not affect machines you created with Docker Machine. The install offers to copy containers and images from your local `default` machine (if one exists) to the new Docker for Mac HyperKit VM. If chosen, content from `default` is copied to the new Docker for Mac HyperKit VM, and your original `default` machine is kept as is. diff --git a/docker-for-mac/faqs.md b/docker-for-mac/faqs.md index 348d95cc683..afde0dc7e16 100644 --- a/docker-for-mac/faqs.md +++ b/docker-for-mac/faqs.md @@ -53,8 +53,8 @@ Do the following each time: ### What is Docker.app? `Docker.app` is Docker for Mac, a bundle of Docker client, and Docker -Engine. `Docker.app` uses the OS X -Hypervisor.framework (part of MacOS X 10.10 Yosemite and higher) +Engine. `Docker.app` uses the macOS +Hypervisor.framework (part of macOS 10.10 Yosemite and higher) to run containers, meaning that _**no separate VirtualBox is required**_. ### What kind of feedback are we looking for? @@ -116,13 +116,13 @@ Networking topic. ### How do I add custom CA certificates? -Starting with Docker for Mac 1.12.1, 2016-09-16 (stable) and Beta 27 Release Notes (2016-09-28 1.12.2-rc1-beta27), all trusted certificate authorities (CAs) (root or intermediate) are supported. +Starting with Docker for Mac Beta 27 Release Notes (2016-09-28 1.12.2-rc1-beta27) and follow-on Beta releases, all trusted certificate authorities (CAs) (root or intermediate) are supported. (**Note:** Custom CA certificates are not yet supported on stable releases.) Docker for Mac creates a certificate bundle of all user-trusted CAs based on the Mac Keychain, and appends it to Moby trusted certificates. So if an enterprise SSL certificate is trusted by the user on the host, it will be trusted by Docker for Mac. ### What are system requirements for Docker for Mac? -Note that you need a Mac that supports hardware virtualization, which is most non ancient ones; i.e., use OS X `10.10.3+` or `10.11` (OS X Yosemite or OS X El Capitan). See also "What to know before you install" in [Getting Started](index.md). +Note that you need a Mac that supports hardware virtualization, which is most non ancient ones; i.e., use macOS `10.10.3+` or `10.11` (macOS Yosemite or macOS El Capitan). See also "What to know before you install" in [Getting Started](index.md). ### Do I need to uninstall Docker Toolbox to use Docker for Mac? @@ -140,7 +140,7 @@ Toolbox Mac topics. ### What is HyperKit? -HyperKit is a hypervisor built on top of the Hypervisor.framework in OS X 10.10 Yosemite and higher. It runs entirely in userspace and has no other dependencies. +HyperKit is a hypervisor built on top of the Hypervisor.framework in macOS 10.10 Yosemite and higher. It runs entirely in userspace and has no other dependencies. We use HyperKit to eliminate the need for other VM products, such as Oracle Virtualbox or VMWare Fusion. diff --git a/docker-for-mac/index.md b/docker-for-mac/index.md index 73bd8c713c9..b237fa80f98 100644 --- a/docker-for-mac/index.md +++ b/docker-for-mac/index.md @@ -5,6 +5,7 @@ aliases: - /mac/ - /mac/started/ - /docker-for-mac/started/ +- /installation/mac/ description: Getting Started keywords: - mac, beta, alpha, tutorial @@ -54,29 +55,36 @@ For more about stable and beta channels, see the [FAQs](faqs.md#stable-and-beta- >**Important Notes**: > ->* Docker for Mac requires OS X 10.10.3 Yosemite or newer running on a 2010 or newer Mac, with Intel's hardware support for MMU virtualization. Please see [What to know before you install](index.md#what-to-know-before-you-install) for a full list of prerequisites. +>- Docker for Mac requires macOS 10.10.3 Yosemite or newer running on a 2010 or +> newer Mac, with Intel's hardware support for MMU virtualization. Please see +> [What to know before you install](index.md#what-to-know-before-you-install) +> for a full list of prerequisites. > ->* You can switch between beta and stable versions, but _you must have only one app installed at a time_. Also, you will need to save images and export containers you want to keep before uninstalling the current version before installing another. For more about this, see the [FAQs about beta and stable channels](faqs.md#stable-and-beta-channels). +>- You can switch between beta and stable versions, but you must have only one +> app installed at a time. Also, you will need to save images and export +> containers you want to keep before uninstalling the current version before +> installing another. For more about this, see the +> [FAQs about beta and stable channels](faqs.md#stable-and-beta-channels). ## What to know before you install - * **README FIRST for Docker Toolbox and Docker Machine users**: If you are already running Docker on your machine, first read [Docker for Mac vs. Docker Toolbox](docker-toolbox.md) to understand the impact of this installation on your existing setup, how to set your environment for Docker for Mac, and how the two products can coexist. - +

* **Relationship to Docker Machine**: Installing Docker for Mac does not affect machines you created with Docker Machine. You'll get the option to copy containers and images from your local `default` machine (if one exists) to the new Docker for Mac HyperKit VM. When you are running Docker for Mac, you do not need Docker Machine nodes running at all locally (or anywhere else). With Docker for Mac, you have a new, native virtualization system running (HyperKit) which takes the place of the VirtualBox system. To learn more, see [Docker for Mac vs. Docker Toolbox](docker-toolbox.md). - +

* **System Requirements**: Docker for Mac will launch only if all these requirements are met. - Mac must be a 2010 or newer model, with Intel's hardware support for memory management unit (MMU) virtualization; i.e., Extended Page Tables (EPT) - - OS X 10.10.3 Yosemite or newer + - macOS 10.10.3 Yosemite or newer - At least 4GB of RAM - VirtualBox prior to version 4.3.30 must NOT be installed (it is incompatible with Docker for Mac) - > **Note**: If your system does not satisfy these requirements, you can install [Docker Toolbox](/toolbox/overview.md), which uses Oracle Virtual Box instead of HyperKit. + >**Note**: If your system does not satisfy these requirements, you can install [Docker Toolbox](/toolbox/overview.md), which uses Oracle Virtual Box instead of HyperKit. +

* **What the install includes**: The installation provides [Docker Engine](https://docs.docker.com/engine/userguide/intro/), Docker CLI client, [Docker Compose](https://docs.docker.com/compose/overview/), and [Docker Machine](https://docs.docker.com/machine/overview/). ## Step 1. Install and Run Docker for Mac @@ -130,20 +138,19 @@ Run these commands to test if your versions of `docker`, `docker-compose`, and ` Some good commands to try are `docker version` to check that you have the latest release installed, and `docker ps` and `docker run hello-world` to verify that Docker is running. -2. For something more adventurous, start a Dockerized web server. +2. For something more adventurous, start a Dockerized web server. - ```shell - docker run -d -p 80:80 --name webserver nginx - ``` + ``` + docker run -d -p 80:80 --name webserver nginx + ``` - If the image is not found locally, Docker will pull it from Docker Hub. + If the image is not found locally, Docker will pull it from Docker Hub. - In a web browser, go to `http://localhost/` to bring up the home page. (Since you specified the default HTTP port, it isn't necessary to append `:80` at the end of the URL.) + In a web browser, go to `http://localhost/` to bring up the home page. (Since you specified the default HTTP port, it isn't necessary to append `:80` at the end of the URL.) - ![nginx home page](images/hello-world-nginx.png) + ![nginx home page](images/hello-world-nginx.png) - >**Note:** Early beta releases used `docker` as the hostname to build the URL. Now, ports are exposed on the private IP addresses of the VM and forwarded to `localhost` with no other host name set. See also, [Release Notes](release-notes.md) for Beta 9. - > + >**Note:** Early beta releases used `docker` as the hostname to build the URL. Now, ports are exposed on the private IP addresses of the VM and forwarded to `localhost` with no other host name set. See also, [Release Notes](release-notes.md) for Beta 9. 3. Run `docker ps` while your web server is running to see details on the webserver container. @@ -180,7 +187,13 @@ Choose --> **Preferences** from the menu bar. You ![Advanced Preference settings-advanced](images/settings-advanced.png) -* **Adding registries** - As an alternative to using [Docker Hub](https://hub.docker.com/) to store your public or private images or [Docker Trusted Registry](https://docs.docker.com/docker-trusted-registry/overview/), you can use Docker to set up your own insecure [registry](https://docs.docker.com/registry/introduction/). Add URLs for insecure registries and registry mirrors on which to host your images. +* **Adding registries** - As an alternative to using [Docker Hub](https://hub.docker.com/) to store your public or private images or [Docker +Trusted Registry](https://docs.docker.com/docker-trusted-registry/overview/), +you can use Docker to set up your own insecure +[registry](https://docs.docker.com/registry/introduction/). Add URLs for +insecure registries and registry mirrors on which to host your images. (See +also, [How do I add custom CA +certificates?](faqs.md#how-do-i-add-custom-ca-certificates) in the FAQs.) * **HTTP proxy settings** - Docker for Mac will detect HTTP/HTTPS Proxy Settings and automatically propagate these to Docker and to your containers. For example, if you set your proxy settings to `http://proxy.example.com`, Docker will use this proxy when pulling containers. diff --git a/docker-for-mac/menu.md b/docker-for-mac/menu.md deleted file mode 100644 index 522c5f04130..00000000000 --- a/docker-for-mac/menu.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -aliases: [] -description: Docker Mac -keywords: -- docker, mac, desktop, editions -menu: - main: - identifier: pinata_mac_menu - weight: -80 -title: Docker for Mac ---- - -# Docker for Mac diff --git a/docker-for-mac/networking.md b/docker-for-mac/networking.md index d0c68c9ea75..42a69a9de42 100644 --- a/docker-for-mac/networking.md +++ b/docker-for-mac/networking.md @@ -21,7 +21,7 @@ Docker for Mac provides several networking features to make it easier to use. ### VPN Passthrough Docker for Mac's networking can work when attached to a VPN. -To do this, Docker for Mac intercepts traffic from the `HyperKit` and injects it into OSX as if it originated from the Docker application. +To do this, Docker for Mac intercepts traffic from the `HyperKit` and injects it into macOS as if it originated from the Docker application. ### Port Mapping @@ -33,10 +33,10 @@ Docker for Mac will make the container port available at `localhost`. ### HTTP/HTTPS Proxy Support -Docker for Mac will detect HTTP/HTTPS Proxy Settings from OSX and automatically propagate these to Docker and to your containers. -For example, if you set your proxy settings to `http://proxy.example.com` in OSX, Docker will use this proxy when pulling containers. +Docker for Mac will detect HTTP/HTTPS Proxy Settings from macOS and automatically propagate these to Docker and to your containers. +For example, if you set your proxy settings to `http://proxy.example.com` in macOS, Docker will use this proxy when pulling containers. -![OSX Proxy Settings](images/proxy-settings.png) +![macOS Proxy Settings](images/proxy-settings.png) When you start a container, you will see that your proxy settings propagate into the containers. For example: @@ -59,18 +59,18 @@ If you have containers that you wish to keep running across restarts, you should Following is a summary of current limitations on the Docker for Mac networking stack, along with some ideas for workarounds. -### There is no docker0 bridge on OSX +### There is no docker0 bridge on macOS -Because of the way networking is implemented in Docker for Mac, you cannot see a `docker0` interface in OSX. +Because of the way networking is implemented in Docker for Mac, you cannot see a `docker0` interface in macOS. This interface is actually within `HyperKit`. ### I cannot ping my containers -Unfortunately, due to limitations in OSX, we're unable to route traffic to containers, and from containers back to the host. +Unfortunately, due to limitations in macOS, we're unable to route traffic to containers, and from containers back to the host. ### Per-container IP addressing is not possible -The docker (Linux) bridge network is not reachable from the OSX host. +The docker (Linux) bridge network is not reachable from the macOS host. ### Use cases and workarounds @@ -105,6 +105,6 @@ See the [run commmand](/engine/reference/commandline/run.md) for more details on #### A view into implementation -We understand that these workarounds are not ideal, but there are several problems. In particular, there is a bug in OSX that is only fixed in 10.12 and is not being backported as far as we can tell, which means that we could not support this in all supported OSX versions. In addition, this network setup would require root access which we are trying to avoid entirely in Docker for Mac (we currently have a very small root helper that we are trying to remove). +We understand that these workarounds are not ideal, but there are several problems. In particular, there is a bug in macOS that is only fixed in 10.12 and is not being backported as far as we can tell, which means that we could not support this in all supported macOS versions. In addition, this network setup would require root access which we are trying to avoid entirely in Docker for Mac (we currently have a very small root helper that we are trying to remove). diff --git a/docker-for-mac/osxfs.md b/docker-for-mac/osxfs.md index d0dc9e5a1b9..f3fb44580f7 100644 --- a/docker-for-mac/osxfs.md +++ b/docker-for-mac/osxfs.md @@ -20,17 +20,6 @@ user experience for bind mounting OS X file system trees into Docker containers. To this end, `osxfs` features a number of unique capabilities as well as differences from a classical Linux file system. -- [Case sensitivity](osxfs.md#case-sensitivity) -- [Access control](osxfs.md#access-control) -- [Namespaces](osxfs.md#namespaces) -- [Ownership](osxfs.md#ownership) -- [File system events](osxfs.md#file-system-events) -- [Mounts](osxfs.md#mounts) -- [Symlinks](osxfs.md#symlinks) -- [File types](osxfs.md#file-types) -- [Extended attributes](osxfs.md#extended-attributes) -- [Technology](osxfs.md#technology) - ### Case sensitivity With Docker for Mac, file systems are shared from OS X into containers @@ -69,7 +58,7 @@ sharing**. (See [Preferences](index.md#preferences).) All other paths used in `-v` bind mounts are sourced from the Moby Linux VM running the Docker containers, so arguments such as `-v /var/run/docker.sock:/var/run/docker.sock` should work as expected. If -an OS X path is not shared and does not exist in th VM, an attempt to +an OS X path is not shared and does not exist in the VM, an attempt to bind mount it will fail rather than create it in the VM. Paths that already exist in the VM and contain files are reserved by Docker and cannot be exported from OS X. @@ -85,9 +74,11 @@ requests for ownership metadata will return the previously set values. Ownership-based permissions are only enforced at the OS X file system level with all accessing processes behaving as the user running Docker. If the user does not have permission to read extended attributes -on an object, e.g. when that object's permissions are `0000`, ownership -will be reported as the accessing process until the extended attribute -is again readable. +on an object (such as when that object's permissions are `0000`), `osxfs` +will attempt to add an access control list (ACL) entry that allows +the user to read and write extended attributes. If this attempt +fails, the object will appear to be owned by the process accessing +it until the extended attribute is readable again. ### File system events @@ -124,11 +115,11 @@ containers, only to those events originating in OS X. ### Mounts The OS X mount structure is not visible in the shared volume, but volume -contents are visible. Volume contents appear in the same file system as the -rest of the shared file system. Mounting/unmounting OS X volumes that -are also bind mounted into containers may result in unexpected behavior -in those containers. Unmount events are not supported. Mount export -support is planned but is still under development. +contents are visible. Volume contents appear in the same file system as the rest +of the shared file system. Mounting/unmounting OS X volumes that are also bind +mounted into containers may result in unexpected behavior in those containers. +Unmount events are not supported. Mount export support is planned but is still +under development. ### Symlinks @@ -152,3 +143,200 @@ Extended attributes are not yet supported. `osxfs` does not use OSXFUSE. `osxfs` does not run under, inside, or between OS X userspace processes and the OS X kernel. + +### Performance issues, solutions, and roadmap + +With regard to reported performance issues ([GitHub issue 77: File access in +mounted volumes extremely slow](https://github.com/docker/for-mac/issues/77)), +and a similar thread on [Docker for Mac forums on topic: File access in mounted +volumes extremely +slow](https://forums.docker.com/t/file-access-in-mounted-volumes-extremely-slow-cpu-bound/), +this topic provides an explanation of the issues, what we are doing to +address them, how the community can help us, and what you can expect in the +future. This explanation is a slightly re-worked version of an [understanding +performance +post](https://forums.docker.com/t/file-access-in-mounted-volumes-extremely-slow-cpu-bound/8076/158?u=orangesnap) +from David Sheets (@dsheets) on the [Docker development +team](https://forums.docker.com/groups/Docker) to the forum topic just +mentioned. We want to surface it in the documentation for wider reach. + +#### Understanding performance + +Perhaps the most important thing to understand is that shared file system +performance is multi-dimensional. This means that, depending on your workload, +you may experience exceptional, adequate, or poor performance with `osxfs`, the +file system server in Docker for Mac. File system APIs are very wide (20-40 +message types) with many intricate semantics involving on-disk state, in-memory +cache state, and concurrent access by multiple processes. Additionally, `osxfs` +integrates a mapping between OS X's FSEvents API and Linux's inotify API +which is implemented inside of the file system itself complicating matters +further (cache behavior in particular). + +At the highest level, there are two dimensions to file system performance: +throughput (read/write IO) and latency (roundtrip time). In a traditional file +system on a modern SSD, applications can generally expect throughput of a few +GB/s. With large sequential IO operations, `osxfs` can achieve throughput of +around 250 MB/s which, while not native speed, will not be the bottleneck for +most applications which perform acceptably on HDDs. + +Latency is the time it takes for a file system system call to complete. For +instance, the time between a thread issuing write in a container and resuming +with the number of bytes written. With a classical block-based file system, this +latency is typically under 10μs (microseconds). With `osxfs`, latency is +presently around 200μs for most operations or 20x slower. For workloads which +demand many sequential roundtrips, this results in significant observable +slowdown. To reduce the latency, we need to shorten the data path from a Linux +system call to OS X and back again. This requires tuning each component in the +data path in turn -- some of which require significant engineering effort. Even +if we achieve a huge latency reduction of 100μs/roundtrip, we will still "only" +see a doubling of performance. This is typical of performance engineering, which +requires significant effort to analyze slowdowns and develop optimized +components. We know how we can likely halve the roundtrip time but we haven't +implemented those improvements yet (more on this below in [What you can +do](#what-you-can-do)). + +There is hope for significant performance improvement in the near term despite +these fundamental communication channel properties, which are difficult to +overcome (latency in particular). This hope comes in the form of increased +caching (storing "recent" values closer to their use to prevent roundtrips +completely). The Linux kernel's VFS layer contains a number of caches which can +be used to greatly improve performance by reducing the required communication +with the file system. Using this caching comes with a number of trade-offs: + +* It requires understanding the cache behavior in detail in order to write +correct, stateful functionality on top of those caches. + +* It harms the coherence or consistency of the file system as observed +from Linux containers and the OS X file system interfaces. + +#### What we are doing + +We are actively working on both increasing caching while mitigating the +associated issues and on reducing the file system data path latency. This +requires significant analysis of file system traces and speculative development +of system improvements to try to address specific performance issues. Perhaps +surprisingly, application workload can have a huge effect on performance. As an +example, here are two different use cases contributed on the [forum topic]([File +access in mounted volumes extremely +slow](https://forums.docker.com/t/file-access-in-mounted-volumes-extremely-slow-cpu-bound/))) +and how their performance differs and suffers due to latency, caching, and +coherence: + +1. A rake example (see below) appears to attempt to access 37000+ +different files that don't exist on the shared volume. We can work very hard to +speed up all use cases by 2x via latency reduction but this use case will still +seem "slow". The ultimate solution for rake is to use a "negative dcache" that +keeps track of, in the Linux kernel itself, the files that do not exist. +Unfortunately, even this is not sufficient for the first time rake is run on a +shared directory. To handle that case, we actually need to develop a Linux +kernel patch which negatively caches all directory entries not in a +specified set -- and this cache must be kept up-to-date in real-time with the OS +X file system state even in the presence of missing OS X FSEvents messages and +so must be invalidated if OS X ever reports an event delivery failure. + +2. Running ember build in a shared file system results in ember creating many +different temporary directories and performing lots of intermediate activity +within them. An empty ember project is over 300MB. This usage pattern does not +require coherence between Linux and OS X but, because we cannot distinguish this +fact at run-time, we maintain coherence during its hundreds of thousands of file +system accesses to manipulate temporary state. There is no "correct" solution in +this case. Either ember needs to change, the volume mount needs to have +coherence properties specified on it somehow, some heuristic needs to be +introduced to detect this access pattern and compensate, or the behavior needs +to be indicated via, e.g., extended attributes in the OS X file system. + +These two examples come from performance use cases contributed by users and they +are incredibly helpful in prioritizing aspects of file system performance to +improve. We are developing statistical file system trace analysis tools +to characterize slow-performing workloads more easily in order to decide what to +work on next. + +Under development, we have: + +1. A Linux kernel patch to reduce data path latency by 2/7 copies and 2/5 +context switches + +2. Increased OS X integration to reduce the latency between the hypervisor and +the file system server + +3. A server-side directory read cache to speed up traversal of large directories + +4. User-facing file system tracing capabilities so that you can send us +recordings of slow workloads for analysis + +5. A growing performance test suite of real world use cases (more on this below +in What you can do) + +6. Experimental support for using Linux's inode, writeback, and page caches + +7. End-user controls to configure the coherence of subsets of cross-OS bind +mounts without exposing all of the underlying complexity + +#### What you can do + +When you report shared file system performance issues, it is most helpful to +include a minimal Real World reproduction test case that demonstrates poor +performance. + +Without a reproduction, it is very difficult for us to analyze your use case and +determine what improvements would speed it up. When you don't provide a +reproduction, one of us has to take the time to figure out the specific software +you are using and guess and hope that we have configured it in a typical way or +a way that has poor performance. That usually takes 1-4 hours depending on your +use case and once it is done, we must then determine what regular performance is +like and what kind of slow-down your use case is experiencing. In some cases, it +is not obvious what operation is even slow in your specific development +workflow. The additional set-up to reproduce the problem means we have less time +to fix bugs, develop analysis tools, or improve performance. So, please include +simple, immediate performance issue reproduction test cases. The [rake +reproduction +case](https://forums.docker.com/t/file-access-in-mounted-volumes-extremely-slow-cpu-bound/8076/103) +by @hirowatari shown in the forums thread is a great example. + +This example originally provided: + +1. A version-controlled repository so any changes/improvements to the test case +can be easily tracked. + +2. A Dockerfile which constructs the exact image to run + +3. A command-line invocation of how to start the container + +4. A straight-forward way to measure the performance of the use case + +5. A clear explanation (README) of how to run the test case + +#### What you can expect + +We will continue to work toward an optimized shared file system implementation +on the Beta channel of Docker for Mac. + +You can expect some of the performance improvement work mentioned above to reach +the Beta channel in the coming release cycles. + +In due course, we will open source all of our shared file system components. At +that time, we would be very happy to collaborate with you on improving the +implementation of osxfs and related software. + +We still have on the slate to write up and publish details of shared file system +performance analysis and improvement on the Docker blog. Look for or nudge +@dsheets about those articles, which should serve as a jumping off point for +understanding the system, measuring it, or contributing to it. + +#### Wrapping Up + +We hope this gives you a rough idea of where `osxfs` performance is and where +it's going. We are treating good performance as a top priority feature of the +file system sharing component and we are actively working on improving it +through a number of different avenues. The osxfs project started in December +2015. Since the first integration into Docker for Mac in February 2016, we've +improved performance by 50x or more for many workloads while achieving nearly +complete POSIX compliance and without compromising coherence (it is shared and +not simply synced). Of course, in the beginning there was lots of low-hanging +fruit and now many of the remaining performance improvements require significant +engineering work on custom low-level components. + +We appreciate your understanding as we continue development of the product and +work on all dimensions of performance. We want to continue to work with the +community on this, so please continue to report issues as you find them. We look +forward to collaborting with you on ideas and on the source code itself. diff --git a/docker-for-mac/release-notes.md b/docker-for-mac/release-notes.md index 287a2d863a7..357bfd15293 100644 --- a/docker-for-mac/release-notes.md +++ b/docker-for-mac/release-notes.md @@ -29,7 +29,7 @@ Release notes for _stable_ and _beta_ releases are listed below. You can learn a **New** -* Support for OSX 10.12 Sierra +* Support for macOS 10.12 Sierra **Upgrades** @@ -43,12 +43,12 @@ Release notes for _stable_ and _beta_ releases are listed below. You can learn a **General** * Fixed communications glitch when UI talks to com.docker.vmnetd - Fixes https://github.com/docker/for-mac/issues/90 + Fixes [https://github.com/docker/for-mac/issues/90](https://github.com/docker/for-mac/issues/90) * `docker-diagnose`: display and record the time the diagnosis was captured * Don't compute the container folder in `com.docker.vmnetd` - Fixes https://github.com/docker/for-mac/issues/47 + Fixes [https://github.com/docker/for-mac/issues/47](https://github.com/docker/for-mac/issues/47) * Warn the user if BlueStacks is installed (potential kernel panic) @@ -69,11 +69,11 @@ Release notes for _stable_ and _beta_ releases are listed below. You can learn a * Entries from `/etc/hosts` should now resolve from within containers * Allow ports to be bound on host addresses other than `0.0.0.0` and `127.0.0.1` - Fixes issue reported in https://github.com/docker/for-mac/issues/68 + Fixes issue reported in [https://github.com/docker/for-mac/issues/68](https://github.com/docker/for-mac/issues/68) * Use Mac System Configuration database to detect DNS -**Filesharing (OSXFS)** +**Filesharing (osxfs)** * Fixed thread leak @@ -107,15 +107,15 @@ This bug fix release contains osxfs improvements. The fixed issues may have been seen as failures with apt-get and npm in containers, missed inotify events or unexpected unmounts. -* Bug fixes - - osxfs: fixed an issue causing access to children of renamed - directories to fail (symptoms: npm failures, apt-get failures) - - osxfs: fixed an issue causing some ATTRIB and CREATE inotify - events to fail delivery and other inotify events to stop - - osxfs: fixed an issue causing all inotify events to stop when an - ancestor directory of a mounted directory was mounted - - osxfs: fixed an issue causing volumes mounted under other mounts - to spontaneously unmount +**Bug fixes** + +* osxfs: fixed an issue causing access to children of renamed directories to fail (symptoms: npm failures, apt-get failures) + +* osxfs: fixed an issue causing some ATTRIB and CREATE inotify events to fail delivery and other inotify events to stop + +* osxfs: fixed an issue causing all inotify events to stop when an ancestor directory of a mounted directory was mounted + +* osxfs: fixed an issue causing volumes mounted under other mounts to spontaneously unmount ### Docker for Mac 1.12.0-a, 2016-08-03 (stable) @@ -145,6 +145,42 @@ events or unexpected unmounts. ## Beta Release Notes +### Beta 29 Release Notes (2016-10-25 1.12.3-rc1-beta29) + +**New** + +- Overlay2 is now the default storage driver. You must do a factory reset for overlay2 to be automatically used. (#5545) + +**Upgrades** + +- Docker 1.12.3-rc1 +- Linux kernel 4.4.27 + +**Bug fixes and minor changes** + +- Fix an issue where the whale animation during setting change was inconsistent +- Fix an issue where some windows stayed hidden behind another app +- Fix application of system or custom proxy settings over container restart +- Increase default ulimit for memlock (fixes [https://github.com/docker/for-mac/issues/801](https://github.com/docker/for-mac/issues/801) ) +- Fix an issue where the Docker status would continue to be + yellow/animated after the VM had started correctly +- osxfs: fix the prohibition of chown on read-only or mode 0 files (fixes [https://github.com/docker/for-mac/issues/117](https://github.com/docker/for-mac/issues/117), [https://github.com/docker/for-mac/issues/263](https://github.com/docker/for-mac/issues/263), [https://github.com/docker/for-mac/issues/633](https://github.com/docker/for-mac/issues/633) ) + +### Beta 28 Release Notes (2016-10-13 1.12.2-rc3-beta28) + +**Upgrades** + +- Docker 1.12.2 +- Kernel 4.4.24 +- Notary 0.4.2 + +**Bug fixes and minor changes** + +- Fixed an issue where Docker for Mac was incorrectly reported as updated +- osxfs: Fixed race condition causing some reads to run forever +- Channel is now displayed in About box +- Crash reports are sent over Bugsnag rather than HockeyApp + ### Beta 27 Release Notes (2016-09-28 1.12.2-rc1-beta27) **Upgrades** @@ -158,15 +194,15 @@ events or unexpected unmounts. **Bug fixes and minor changes** -* Fix an issue where some windows did not claim focus correctly -* Add UI when switching channel to prevent user losing containers and settings +* Fixed an issue where some windows did not claim focus correctly +* Added UI when switching channel to prevent user losing containers and settings * Check disk capacity before Toolbox import * Import certificates in `etc/ssl/certs/ca-certificates.crt` * DNS: reduce the number of UDP sockets consumed on the host * VPNkit: improve the connection-limiting code to avoid running out of sockets on the host * UDP: handle diagrams bigger than 2035, up to the configured macOS kernel limit -* UDP: make the forwarding more robust; drop packets and continue rather than stopping -* disk: make the "flush" behaviour configurable for database-like workloads. This works around a performance regression in `v1.12.1`. +* UDP: made the forwarding more robust; now, drop packets and continue rather than stopping +* disk: made the "flush" behaviour configurable for database-like workloads. This works around a performance regression in `v1.12.1`. ### Beta 26 Release Notes (2016-09-14 1.12.1-beta26) @@ -181,7 +217,7 @@ events or unexpected unmounts. **Bug fixes and minor changes** -* Fixed communications glitch when UI talks to `com.docker.vmnetd`. Fixes https://github.com/docker/for-mac/issues/90 +* Fixed communications glitch when UI talks to `com.docker.vmnetd`. Fixes [https://github.com/docker/for-mac/issues/90](https://github.com/docker/for-mac/issues/90) * UI fix for macOs 10.12 @@ -193,13 +229,13 @@ events or unexpected unmounts. * `docker-diagnose` displays and records the time the diagnosis was captured -* Ports are allowed to bind to host addresses other than `0.0.0.0` and `127.0.0.1`. Fixes issue reported in https://github.com/docker/for-mac/issues/68. +* Ports are allowed to bind to host addresses other than `0.0.0.0` and `127.0.0.1`. Fixes issue reported in [https://github.com/docker/for-mac/issues/68](https://github.com/docker/for-mac/issues/68). -* We no longer compute the container folder in `com.docker.vmnetd`. Fixes https://github.com/docker/for-mac/issues/47. +* We no longer compute the container folder in `com.docker.vmnetd`. Fixes [https://github.com/docker/for-mac/issues/47](https://github.com/docker/for-mac/issues/47). **Known Issues** -* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode. The +* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. * There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and @@ -214,7 +250,7 @@ available in [Known Issues](troubleshoot.md#known-issues) in Troubleshooting. **Upgrades** -* Experimental support for OSX 10.12 Sierra (beta) +* Experimental support for macOS 10.12 Sierra (beta) **Bug fixes and minor changes** @@ -228,7 +264,7 @@ available in [Known Issues](troubleshoot.md#known-issues) in Troubleshooting. investigated. This includes failure to launch the app and being unable to upgrade to a new version. -* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The +* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app * There are a number of issues with the performance of directories bind-mounted @@ -266,7 +302,7 @@ Issues](troubleshoot.md#known-issues) in Troubleshooting. * Several problems have been reported on macOS 10.12 Sierra and are being investigated. This includes failure to launch the app and being unable to upgrade to a new version. -* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`. +* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`. * There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large directories are currently slow. Additionally, containers that perform large @@ -293,9 +329,9 @@ trees, may suffer from poor performance. For more information and workarounds, s **Known issues** -* Docker for Mac is not supported on OSX 10.12 Sierra +* Docker for Mac is not supported on macOS 10.12 Sierra -* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app +* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app * There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. For more information and workarounds, see the bullet on [performance of bind-mounted directories](troubleshoot.md#bind-mounted-dirs) in [Known Issues](troubleshoot.md#known-issues) in Troubleshooting. @@ -318,7 +354,7 @@ trees, may suffer from poor performance. For more information and workarounds, s **Known issues** -* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app +* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app * There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. More information is available in [Known Issues](troubleshoot.md#known-issues) in [Troubleshooting](troubleshoot.md) @@ -365,7 +401,7 @@ events or unexpected unmounts. **Known issues** -* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app +* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app * There are a number of issues with the performance of directories bind-mounted with `osxfs`. In particular, writes of small blocks, and traversals of large directories are currently slow. Additionally, containers that perform large numbers of directory operations, such as repeated scans of large directory trees, may suffer from poor performance. For more information and workarounds, see [Known Issues](troubleshoot.md#known-issues) in [Logs and Troubleshooting](troubleshoot.md). @@ -384,7 +420,7 @@ events or unexpected unmounts. **Known issues** -* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker for Mac (`Docker.app`). +* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker for Mac (`Docker.app`). ### Beta 19 Release Notes (2016-07-14 1.12.0-rc4-beta19) @@ -468,7 +504,7 @@ events or unexpected unmounts. **Bug fixes and minor changes** -* Documentation moved to https://docs.docker.com/docker-for-mac/ +* Documentation moved to [https://docs.docker.com/docker-for-mac/](https://docs.docker.com/docker-for-mac/) * Allow non-admin users to launch the app for the first time (using admin creds) * Prompt non-admin users for admin password when needed in Preferences * Fixed download links, documentation links @@ -533,7 +569,7 @@ events or unexpected unmounts. **Known issues** -* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode with OSX 10.10. The issue is being investigated. The workaround is to restart `Docker.app`. +* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode with macOS 10.10. The issue is being investigated. The workaround is to restart `Docker.app`. **Bug fixes and minor changes** @@ -564,7 +600,7 @@ events or unexpected unmounts. **Known issues** -* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. +* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. **Bug fixes and minor changes** @@ -583,7 +619,7 @@ events or unexpected unmounts. **Known issues** -* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. +* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. **Bug fixes and minor changes** @@ -594,7 +630,7 @@ events or unexpected unmounts. **New** -The `osxfs` file system now persists ownership changes in an extended attribute. (See the topic on [ownership](osxfs.md#ownership) in [Sharing the OS X file system with Docker containers](osxfs.md).) +The `osxfs` file system now persists ownership changes in an extended attribute. (See the topic on [ownership](osxfs.md#ownership) in [Sharing the macOS file system with Docker containers](osxfs.md).) **Upgrades** @@ -644,7 +680,7 @@ The `osxfs` file system now persists ownership changes in an extended attribute. **Known issues** -* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. +* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. **Bug fixes and minor changes** @@ -679,7 +715,7 @@ The `osxfs` file system now persists ownership changes in an extended attribute. **Known issues** -* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app` +* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app` **Bug fixes and minor changes** @@ -708,7 +744,7 @@ The `osxfs` file system now persists ownership changes in an extended attribute. **Known issues** -* Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app +* Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app * If VPN mode is enabled and then disabled and then re-enabled again, `docker ps` will block for 90s @@ -732,7 +768,7 @@ The `osxfs` file system now persists ownership changes in an extended attribute. **Known issues** -* `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode. +* `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`. @@ -768,7 +804,7 @@ lead to `Docker.app` not starting on reboot - There is a race on startup between docker and networking which can lead to Docker.app not starting on reboot. The workaround is to restart the application manually. -- Docker.app sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. +- Docker.app sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart Docker.app. - In VPN mode, the `-p` option needs to be explicitly of the form `-p :`. `-p ` and `-P` will not work yet. @@ -798,7 +834,7 @@ lead to `Docker.app` not starting on reboot - There is a race on startup between Docker and networking that can lead to `Docker.app` not starting on reboot. The workaround is to restart the application manually. -- `Docker.app` sometimes uses 200% CPU after OS X wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`. +- `Docker.app` sometimes uses 200% CPU after macOS wakes up from sleep mode. The issue is being investigated. The workaround is to restart `Docker.app`. - VPN/Hostnet: In VPN mode, the `-p` option needs to be explicitly of the form `-p :`. `-p ` and `-P` will not @@ -810,7 +846,7 @@ work yet. - `docker ps` shows IP address rather than `docker.local` -- Re-enabled support for OS X Yosemite version 10.10 +- Re-enabled support for macOS Yosemite version 10.10 - Ensured binaries are built for 10.10 rather than 10.11 @@ -823,7 +859,7 @@ work yet. **New Features and Upgrades** -- Improved file sharing write speed in OSXFS +- Improved file sharing write speed in osxfs - User space networking: Renamed `bridged` mode to `nat` mode @@ -836,8 +872,8 @@ work yet. - GUI: Auto update automatically checks for new versions again - File System - - Fixed OSXFS chmod on sockets - - FixED OSXFS EINVAL from `open` using O_NOFOLLOW + - Fixed osxfs chmod on sockets + - FixED osxfs EINVAL from `open` using O_NOFOLLOW - Hypervisor stability fixes, resynced with upstream repository diff --git a/docker-for-mac/troubleshoot.md b/docker-for-mac/troubleshoot.md index 6abb18c0abd..de6c49a9678 100644 --- a/docker-for-mac/troubleshoot.md +++ b/docker-for-mac/troubleshoot.md @@ -99,7 +99,7 @@ The diagnostics and usage information to the left of the results provide auto-ge ## Troubleshooting -#### Recreate or update your containers after Beta 18 upgrade +### Recreate or update your containers after Beta 18 upgrade Docker 1.12.0 RC3 release introduces a backward incompatible change from RC2 to RC3. (For more information, see https://github.com/docker/docker/issues/24343#issuecomment-230623542.) @@ -111,13 +111,13 @@ You can fix this by either [recreating](troubleshoot.md#recreate-your-containers If you get the error message shown above, we recommend recreating them. -##### Recreate your containers +#### Recreate your containers To recreate your containers, use Docker Compose. docker-compose down && docker-compose up -##### Update your containers +#### Update your containers To fix existing containers, follow these steps. @@ -153,7 +153,7 @@ To fix existing containers, follow these steps. $ docker start old-container old-container -#### Incompatible CPU detected +### Incompatible CPU detected Docker for Mac requires a processor (CPU) that supports virtualization and, more specifically, the [Apple Hypervisor framework](https://developer.apple.com/library/mac/documentation/DriversKernelHardware/Reference/Hypervisor/). Docker for Mac is only compatible with Macs that have a CPU that supports the Hypervisor framework. Most Macs built in 2010 and later support it, as described in the Apple Hypervisor Framework documentation about supported hardware: @@ -171,7 +171,7 @@ If not, the command will print `kern.hv_support: 0`. See also, [Hypervisor Framework Reference](https://developer.apple.com/library/mac/documentation/DriversKernelHardware/Reference/Hypervisor/) in the Apple documentation, and Docker for Mac system requirements in [What to know before you install](index.md#what-to-know-before-you-install). -#### Workarounds for common problems +### Workarounds for common problems * IPv6 workaround to auto-filter DNS addresses - IPv6 is not yet supported on Docker for Mac, which typically manifests as a network timeout when running `docker` commands that need access to external network servers (e.g., `docker pull busybox`). @@ -220,7 +220,7 @@ ${!DOCKER_*}` will unset existing `DOCKER` environment variables you have set.

-* Note that network connections will fail if the OS X Firewall is set to +* Note that network connections will fail if the macOS Firewall is set to "Block all incoming connections". You can enable the firewall, but `bootpd` must be allowed incoming connections so that the VM can get an IP address.

@@ -244,8 +244,6 @@ environments, see [Docker for Mac vs. Docker Toolbox](docker-toolbox.md). See also [Known Issues](troubleshoot.md#known-issues) on this page, and the [FAQs](faqs.md) topic. - - ## Known issues * IPv6 is not yet supported on Docker for Mac. If you are using IPv6, and haven't upgraded to Beta 24 or v1.12.1 stable or newer, you will see a network @@ -268,7 +266,7 @@ Docker for Mac does not yet support IPv6. See "IPv6 workaround to auto-filter DN

-* Docker for Mac uses the `HyperKit` hypervisor (https://github.com/docker/hyperkit) in Mac OS X 10.10 Yosemite and higher. If you are developing with tools that have conflicts with `HyperKit`, such as [Intel Hardware Accelerated Execution Manager (HAXM)](https://software.intel.com/en-us/android/articles/intel-hardware-accelerated-execution-manager/), the current workaround is not to run them at the same time. You can pause `HyperKit` by quitting Docker for Mac temporarily while you work with HAXM. This will allow you to continue work with the other tools and prevent `HyperKit` from interfering. +* Docker for Mac uses the `HyperKit` hypervisor (https://github.com/docker/hyperkit) in macOS 10.10 Yosemite and higher. If you are developing with tools that have conflicts with `HyperKit`, such as [Intel Hardware Accelerated Execution Manager (HAXM)](https://software.intel.com/en-us/android/articles/intel-hardware-accelerated-execution-manager/), the current workaround is not to run them at the same time. You can pause `HyperKit` by quitting Docker for Mac temporarily while you work with HAXM. This will allow you to continue work with the other tools and prevent `HyperKit` from interfering.

@@ -299,18 +297,14 @@ Alternatively you could create a plain-text TCP proxy on localhost:1234 using: - Symfony - Magento - Zend Framework - - PHP applications that use [Composer](https://getcomposer.org) to install dependencies in a ```vendor``` folder - -

+ - PHP applications that use [Composer](https://getcomposer.org) to install dependencies in a ```vendor``` folder

As a work-around for this behavior, you can put vendor or third-party library directories in Docker volumes, perform temporary file system operations outside of `osxfs` mounts, and use third-party tools like Unison or `rsync` to synchronize between container directories and bind-mounted directories. We are actively working on `osxfs` - performance using a number of different techniques and we look forward - to sharing improvements with you soon. + performance using a number of different techniques. To learn more, please see the topic on [Performance issues, solutions, and roadmap](osxfs.md#performance-issues-solutions-and-roadmap). -

* If your system does not have access to an NTP server, then after a hibernate the time seen by Docker for Mac may be considerably out of sync with the host. Furthermore, the time may slowly drift out of sync during use. To manually reset the time after hibernation, run: diff --git a/docker-for-windows/images/drive_sharing_firewall_blocked.png b/docker-for-windows/images/drive_sharing_firewall_blocked.png new file mode 100644 index 00000000000..d3075a7bd2b Binary files /dev/null and b/docker-for-windows/images/drive_sharing_firewall_blocked.png differ diff --git a/docker-for-windows/images/win-virtualization-enabled.png b/docker-for-windows/images/win-virtualization-enabled.png new file mode 100644 index 00000000000..7b9ecd8ed9b Binary files /dev/null and b/docker-for-windows/images/win-virtualization-enabled.png differ diff --git a/docker-for-windows/index.md b/docker-for-windows/index.md index 0151d59401e..568cc03f899 100644 --- a/docker-for-windows/index.md +++ b/docker-for-windows/index.md @@ -5,6 +5,7 @@ aliases: - /windows/ - /windows/started/ - /docker-for-windows/started/ +- /installation/windows/ description: Getting Started keywords: - windows, beta, alpha, tutorial @@ -24,7 +25,6 @@ Please read through these topics on how to get started. To **give us your feedba >**Already have Docker for Windows?** If you already have Docker for Windows installed, and are ready to get started, skip over to the [Getting Started with Docker](/engine/getstarted/index.md) tutorial. - ## Download Docker for Windows If you have not already done so, please install Docker for Windows. You can download installers from the stable or beta channel. For more about stable and beta channels, see the [FAQs](faqs.md#questions-about-stable-and-beta-channels). @@ -51,25 +51,48 @@ If you have not already done so, please install Docker for Windows. You can down ->**Important Notes**: +>**Important Notes:** > ->* Docker for Windows requires 64bit Windows 10 Pro, Enterprise and Education (1511 November update, Build 10586 or later) and Microsoft Hyper-V. Please see [What to know before you install](index.md#what-to-know-before-you-install) for a full list of prerequisites. +>- Docker for Windows requires 64bit Windows 10 Pro, Enterprise and Education +> (1511 November update, Build 10586 or later) and Microsoft Hyper-V. Please +> see [What to know before you install](index.md#what-to-know-before-you-install) +> for a full list of prerequisites. > ->* You can switch between beta and stable versions, but _you must have only one app installed at a time_. Also, you will need to save images and export containers you want to keep before uninstalling the current version before installing another. For more about this, see the [FAQs about beta and stable channels](faqs.md#questions-about-stable-and-beta-channels). +>- You can switch between beta and stable versions, but you must have only one +> app installed at a time. Also, you will need to save images and export +> containers you want to keep before uninstalling the current version before +> installing another. For more about this, see the +> [FAQs about beta and stable channels](faqs.md#questions-about-stable-and-beta-channels). ## What to know before you install * **README FIRST for Docker Toolbox and Docker Machine users**: Docker for Windows requires Microsoft Hyper-V to run. After Hyper-V is enabled, VirtualBox will no longer work, but any VirtualBox VM images will remain. VirtualBox VMs created with `docker-machine` (including the `default` one typically created during Toolbox install) will no longer start. These VMs cannot be used side-by-side with Docker for Windows. However, you can still use `docker-machine` to manage remote VMs. - +

* The current version of Docker for Windows runs on 64bit Windows 10 Pro, Enterprise and Education (1511 November update, Build 10586 or later). In the future we will support more versions of Windows 10. - +

* Containers and images created with Docker for Windows are shared between all user accounts on machines where it is installed. This is because all Windows accounts will use the same VM to build and run containers. In the future, Docker for Windows will better isolate user content. - -* The Hyper-V package must be enabled for Docker for Windows to work. The Docker for Windows installer will enable it for you, if needed. (This requires a reboot). - - >**Note**: If your system does not satisfy these requirements, you can install [Docker Toolbox](/toolbox/overview.md), which uses Oracle Virtual Box instead of Hyper-V. - -* **What the install includes**: The installation provides [Docker Engine](https://docs.docker.com/engine/userguide/intro/), Docker CLI client, [Docker Compose](https://docs.docker.com/compose/overview/), and [Docker Machine](https://docs.docker.com/machine/overview/). +

+* The Hyper-V package must be enabled for Docker for Windows to work. The Docker for Windows installer will enable it for you, if needed. (This requires a reboot). If your system does not satisfy these requirements, you can install [Docker Toolbox](/toolbox/overview.md), which uses Oracle Virtual Box instead of Hyper-V. +

+* Virtualization must be enabled. Typically, virtualization is enabled by default. (Note that this is different from having Hyper-V enabled.) For more detail see [Virtualization must be enabled](troubleshoot.md#virtualization-must-be-enabled) in Troubleshooting. +

+* **What the Docker for Windows install includes**: The installation provides [Docker Engine](https://docs.docker.com/engine/userguide/intro/), Docker CLI client, [Docker Compose](https://docs.docker.com/compose/overview/), and [Docker Machine](https://docs.docker.com/machine/overview/). +

+* You can run **Docker on Windows Server 16 and Windows 10** in two different ways: + + * You can [install a native Docker binary](https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start_windows_10) + which allows you to develop and run Windows containers natively. However, if you + install Docker this way, you cannot develop or run Linux containers. If you try to run a Linux container on the native Docker daemon, an error occurs: + + ```no-highlight + C:\Program Files\Docker\docker.exe: + image operating system "linux" cannot be used on this platform. + See 'C:\Program Files\Docker\docker.exe run --help'. + ``` + + * You can [install Docker for Windows](#step-1-install-docker-for-windows), + which allows you to develop and run Windows containers natively, or develop + and run Linux containers using Hyper-V. This is the best of both worlds. ## Step 1. Install Docker for Windows @@ -301,6 +324,21 @@ Permissions to access shared drives are tied to the credentials you provide here See also [Verify domain user has permissions for shared drives](troubleshoot.md#verify-domain-user-has-permissions-for-shared-drives-volumes) in Troubleshooting. +#### Firewall rules for shared drives + +Shared drives require port 445 to be open between the host machine and the virtual +machine that runs Linux containers. + +>**Note**: In Docker for Windows Beta 29 and higher, +Docker detects if port 445 is closed and shows the following message when you +try to add a shared drive: ![Port 445 blocked](images/drive_sharing_firewall_blocked.png) + + +To share the drive, allow connections between the Windows host machine and the +virtual machine in Windows Firewall or your third party firewall software. You +do not need to open port 445 on any other network. By default, allow connections +to 10.0.75.1 port 445 (the Windows host) from 10.0.75.2 (the virtual machine). + ### Advanced ![CPU and Memory settings](images/settings-cpu-ram.png) @@ -352,7 +390,7 @@ If you have containers that you wish to keep running across restarts, you should ### Docker daemon You can configure options on the Docker daemon in the given JSON configuration file, and determine how your containers will run. -For a full list of options on the Docker daemon, see daemon in the Docker Engine command line reference. +For a full list of options on the Docker daemon, see daemon in the Docker Engine command line reference. ![Docker Daemon](images/docker-daemon.png) @@ -368,6 +406,16 @@ This feature is not yet available on stable builds. See also [Shared Drives](#shared-drives) +#### Getting started with Windows containers (Beta feature) + +If you are interested in working with Windows containers, here are some guides to help you get started. + +* [Build and Run Your First Windows Server Container (Blog Post)](https://blog.docker.com/2016/09/build-your-first-docker-windows-server-container/) gives a quick tour of how to build and run native Docker Windows containers on Windows 10 and Windows Server 2016 evaluation releases. + +* [Getting Started with Windows Containers (Lab)](https://github.com/docker/labs/tree/master/windows/windows-containers) shows you how to use the [MusicStore](https://github.com/aspnet/MusicStore/blob/dev/README.md) application with Windows containers. The MusicStore is a standard .NET application and, [forked here to use containers](https://github.com/friism/MusicStore), is a good example of a multi-container application. + + >**Disclaimer:** This lab is still in work, and is based off of the blog, but you can test and leverage the example walkthroughs now, if you want to start experimenting. Please checking back as the lab evolves. + ### Diagnose and Feedback If you encounter problems for which you do not find solutions in this documentation, searching [Docker for Windows issues on GitHub](https://github.com/docker/for-win/issues) already filed by other users, or on the [Docker for Windows forum](https://forums.docker.com/c/docker-for-windows), we can help you troubleshoot the log data. diff --git a/docker-for-windows/menu.md b/docker-for-windows/menu.md deleted file mode 100644 index 4e489c29873..00000000000 --- a/docker-for-windows/menu.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -aliases: [] -description: Docker Windows -keywords: -- docker, windows, desktop, editions -menu: - main: - identifier: pinata_win_menu - weight: -78 -title: Docker for Windows ---- - -# Docker for Mac and Docker for Windows diff --git a/docker-for-windows/opensource.md b/docker-for-windows/opensource.md index 36864816d95..293d5b04809 100644 --- a/docker-for-windows/opensource.md +++ b/docker-for-windows/opensource.md @@ -12,7 +12,7 @@ title: Open Source Licensing # Open Source Components and Licensing -Docker Desktop Editions are built using open source software software. For details on the licensing, choose --> **About** from within the application, then click **Acknowledgements**. +Docker Desktop Editions are built using open source software software. For details on the licensing, choose --> **About** from within the application, then click **Acknowledgements**. Docker Desktop Editions distribute some components that are licensed under the GNU General Public License. You can download the source for these components [here](https://download.docker.com/opensource/License.tar.gz). diff --git a/docker-for-windows/release-notes.md b/docker-for-windows/release-notes.md index 51c324a812d..7692aefa96c 100644 --- a/docker-for-windows/release-notes.md +++ b/docker-for-windows/release-notes.md @@ -132,6 +132,81 @@ Release notes for _stable_ and _beta_ releases are listed below. You can learn a ## Beta Release Notes +### Beta 29 Release Notes (2016-10-25 1.12.3-rc1-beta29) + +>**Important Note**: +> +> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: + +> [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) + +> This problem is fixed as of Beta 23 for subsequent auto-updates. +> +Windows Container support relies on the Windows 10 container feature, which is +**experimental** at this point. Windows 10 Pro (1607, build number 14393) +requires update `KB3192366` (soon to be released via Windows Update) to fully +work. Some insider builds may not work. + +**New** + +- Restore the VM's configuration when it was changed by the user +- Overlay2 is now the default storage driver. After a factory reset overlay2 will automatically be used +- Detect firewall configuration that might block the file sharing +- Send more GUI usage statistics to help us improve the product + +**Upgrades** + +- Docker 1.12.3-rc1 +- Linux Kernel 4.4.27 + +**Bug fixes and minor changes** + +- Faster mount/unmount of shared drives +- Added a timeout to mounting/unmounting a shared drive +- Add the settings to the diagnostics +- Increase default ulimit for memlock (fixes https://github.com/docker/for-mac/issues/801) +- Make sure we don't use an older Nlog library from the GAC + + +### Beta 28 Release Notes (2016-10-13 1.12.2-rc3-beta28) + +>**Important Note**: +> +> The auto-update function in Beta 21 will not be able to install this update. To install the latest beta manually if you are still on Beta 21, please download the installer here: + +> [https://download.docker.com/win/beta/InstallDocker.msi](https://download.docker.com/win/beta/InstallDocker.msi) + +> This problem is fixed as of Beta 23 for subsequent auto-updates. +> +Windows Container support relies on the Windows 10 container feature, which is +**experimental** at this point. Windows 10 Pro (1607, build number 14393) +requires update `KB3192366` (soon to be released via Windows Update) to fully +work. Some insider builds may not work. + +**New** + +- Path to HyperV disks in no longer hardcoded, making the Toolbox import work with non-standard path +- Verify that ALL HyperV features are enabled +- Make it clear why user cannot switch to Windows Containers with a tooltip in the systray +- Added Moby console to the logs +- Save the current engine with the other settings +- Notary version 0.4.2 installed + + +**Upgrades** + +- Docker 1.12.2 +- Kernel 4.4.24 + +**Bug fixes and minor changes** + +- Fixed a password escaping regression +- Support writing large values to the database, especially for trusted CAs +- VpnKit is now restarted if it dies +- Make sure invalid "DockerNat" switches are not used +- Preserve the Powershell stacktraces +- Write OS and Application versions at the top of each log file + ### Beta 27 Release Notes (2016-09-28 1.12.2-rc1-beta27) >**Important Note**: @@ -814,8 +889,8 @@ are working on a solution. - Rename `console` to `debug console` - Remove `machine` from notification - Open the feedback forum -- Use same MixPanel project for Windows and OSX -- Align MixPanel events with OSX +- Use same MixPanel project for Windows and macOS +- Align MixPanel events with macOS - Added a script to diagnose problems - Submit diagnostic with bugsnag reports - MixPanel heartbeat every hour diff --git a/docker-for-windows/troubleshoot.md b/docker-for-windows/troubleshoot.md index 04424bc259d..ab951efbd8c 100644 --- a/docker-for-windows/troubleshoot.md +++ b/docker-for-windows/troubleshoot.md @@ -22,7 +22,7 @@ Here is information about how to diagnose and troubleshoot problems, send logs a ## Submitting diagnostics, feedback, and GitHub issues -If you encounter problems for which you do not find solutions in this documentation, on [Docker for Windows issues on GitHub](https://github.com/docker/for-win/issues), or the [Docker for Windows forum](https://forums.docker.com/c/docker-for-windows), we can help you troubleshoot the log data. See [Diagnose and Feedback](index.md#diagnose-and-feedback) to learn about diagnostics and how to create new issues on GitHub. +If you encounter problems for which you do not find solutions in this documentation, on [Docker for Windows issues on GitHub](https://github.com/docker/for-win/issues), or the [Docker for Windows forum](https://forums.docker.com/c/docker-for-windows), we can help you troubleshoot the log data. See [Diagnose and Feedback](index.md#diagnose-and-feedback) to learn about diagnostics and how to create new issues on GitHub. ## Checking the Logs @@ -97,7 +97,7 @@ Symlinks will work within and across containers. However, symlinks created outsi Any file destined to run inside a container must use Unix style `\n` line endings. This includes files referenced at the command line for builds and in RUN commands in Docker files. -Docker containers and `docker build` run in a Unix environment, so files in containers must use Unix style line endings `\n`, _not_ Windows style: `\r\n`. Keep this in mind when authoring files such as shell scripts using Windows tools, where the default is likely to be Windows style line endings. These commands ultimately get passed to Unix commands inside a Unix based container (for example, a shell script passed to `/bin/sh`). If Windows style line endings are used, `docker run` will fail with syntax errors. +Docker containers and `docker build` run in a Unix environment, so files in containers must use Unix style line endings: `\n`, _not_ Windows style: `\r\n`. Keep this in mind when authoring files such as shell scripts using Windows tools, where the default is likely to be Windows style line endings. These commands ultimately get passed to Unix commands inside a Unix based container (for example, a shell script passed to `/bin/sh`). If Windows style line endings are used, `docker run` will fail with syntax errors. For an example of this issue and the resolution, see this issue on GitHub: Docker RUN fails to execute shell script (https://github.com/docker/docker/issues/24388). @@ -160,7 +160,22 @@ To fix existing containers, follow these steps. ``` ### Hyper-V -Docker for Windows requires a Hyper-V as well as the Hyper-V Module for Windows Powershell to be installed and enabled. See [these instructions](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick_start/walkthrough_install) to install Hyper-V manually. A reboot is *required*. If you install Hyper-V without the reboot, Docker for Windows will not work correctly. On some systems, Virtualization needs to be enabled in the BIOS. The steps to do so are Vendor specific, but typically the BIOS option is called `Virtualization Technology (VTx)` or similar. + +Docker for Windows requires a Hyper-V as well as the Hyper-V Module for Windows Powershell to be installed and enabled. The Docker for Windows installer will enable it for you. + +See [these instructions](https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/quick_start/walkthrough_install) to install Hyper-V manually. A reboot is *required*. If you install Hyper-V without the reboot, Docker for Windows will not work correctly. On some systems, Virtualization needs to be enabled in the BIOS. The steps to do so are Vendor specific, but typically the BIOS option is called `Virtualization Technology (VTx)` or similar. + +### Virtualization must be enabled + +In addition to [Hyper-V](#hyper-v), virtualization must be enabled. + +If, at some point, if you manually uninstall Hyper-V or disable virtualization, Docker for Windows will not start. + +Verify that virtualization is enabled on Task Manager. + +![Task Manager](images/win-virtualization-enabled.png) + +See also, the user reported issue [Unable to run Docker for Windows on Windows 10 Enterprise](https://github.com/docker/for-win/issues/74) ### Networking issues diff --git a/docker-hub/bitbucket.md b/docker-hub/bitbucket.md index b192c0c4dc2..e83c4d6e6cc 100644 --- a/docker-hub/bitbucket.md +++ b/docker-hub/bitbucket.md @@ -34,12 +34,12 @@ to create a Docker Hub repository from which to create the Automatic Build. ## Creating an Automated Build You can [create an Automated Build]( -https://hub.docker.com/add/automated-build/bitbucket/orgs/) from any of your +https://hub.docker.com/add/automated-build/bitbucket/) from any of your public or private Bitbucket repositories with a `Dockerfile`. To get started, log in to Docker Hub and click the "Create ▼" menu item at the top right of the screen. Then select -[Create Automated Build](https://hub.docker.com/add/automated-build). +[Create Automated Build](https://hub.docker.com/add/automated-build/bitbucket/). Select the the linked Bitbucket account, and then choose a repository to set up an Automated Build for. diff --git a/docker-hub/builds.md b/docker-hub/builds.md index 8e15576bd1e..ebffb1e751b 100644 --- a/docker-hub/builds.md +++ b/docker-hub/builds.md @@ -16,8 +16,7 @@ You can build your images automatically from a build context stored in a reposit Automated Builds have several advantages: * Images built in this way are built exactly as specified. - * The `Dockerfile` is available to anyone with access to -your Docker Hub repository. + * The `Dockerfile` is available to anyone with access to your Docker Hub repository. * Your repository is kept up-to-date with code changes automatically. Automated Builds are supported for both public and private repositories @@ -27,15 +26,22 @@ on both [GitHub](http://github.com) and [Bitbucket](https://bitbucket.org/). Thi To use automated builds you must have an [account on Docker Hub](accounts.md) and on the hosted repository provider (GitHub or Bitbucket). If you have previously linked your Github or Bitbucket account, you must have -chosen the Public and Private connection type. To view your current connection -settings, log in to Docker Hub and choose Profile > Settings > Linked Accounts & Services. +chosen the Public and Private connection type. +To view your current connection +settings, log in to Docker Hub and choose **Profile > Settings > Linked Accounts & Services**. + +## Limitations + +Currently Docker Hub does not support Git LFS (Large File Storage). If you have binaries in your build context that are managed by Git LFS, only the pointer file will be present in the clone made during the automated build, which is not what you want. + +Subscribe to the [GitHub issue](https://github.com/docker/hub-feedback/issues/500) tracking this limitation. ## Link to a hosted repository service 1. Log into Docker Hub. -2. Navigate to Profile > Settings > Linked Accounts & Services. +2. Navigate to **Profile > Settings > Linked Accounts & Services**. 3. Click the service you want to link. diff --git a/docker-hub/menu.md b/docker-hub/menu.md deleted file mode 100644 index 6f5d5e30697..00000000000 --- a/docker-hub/menu.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -description: this is a menu -keywords: none -menu: - main: - identifier: mn_pubhub -title: Docker Hub -type: menu ---- - -# Menu topic - -If you can view this content, please raise a bug report. diff --git a/docker-hub/webhooks.md b/docker-hub/webhooks.md index db87448dbd3..cbb653864ad 100644 --- a/docker-hub/webhooks.md +++ b/docker-hub/webhooks.md @@ -31,8 +31,8 @@ With your webhook, you specify a target URL and a JSON payload to deliver. The e "comment_count": "0", "date_created": 1.417494799e+09, "description": "", - "dockerfile": "#\n# BUILD\u0009\u0009docker build -t svendowideit/apt-cacher .\n# RUN\u0009\u0009docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher\n#\n# and then you can run containers with:\n# \u0009\u0009docker run -t -i -rm -e http_proxy http://192.168.1.2:3142/ debian bash\n#\nFROM\u0009\u0009ubuntu\nMAINTAINER\u0009SvenDowideit@home.org.au\n\n\nVOLUME\u0009\u0009[\/var/cache/apt-cacher-ng\]\nRUN\u0009\u0009apt-get update ; apt-get install -yq apt-cacher-ng\n\nEXPOSE \u0009\u00093142\nCMD\u0009\u0009chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*\n, - full_description: Docker Hub based automated build from a GitHub repo", + "dockerfile": "#\n# BUILD\u0009\u0009docker build -t svendowideit/apt-cacher .\n# RUN\u0009\u0009docker run -d -p 3142:3142 -name apt-cacher-run apt-cacher\n#\n# and then you can run containers with:\n# \u0009\u0009docker run -t -i -rm -e http_proxy http://192.168.1.2:3142/ debian bash\n#\nFROM\u0009\u0009ubuntu\nMAINTAINER\u0009SvenDowideit@home.org.au\n\n\nVOLUME\u0009\u0009[\/var/cache/apt-cacher-ng\]\nRUN\u0009\u0009apt-get update ; apt-get install -yq apt-cacher-ng\n\nEXPOSE \u0009\u00093142\nCMD\u0009\u0009chmod 777 /var/cache/apt-cacher-ng ; /etc/init.d/apt-cacher-ng start ; tail -f /var/log/apt-cacher-ng/*\n", + "full_description": "Docker Hub based automated build from a GitHub repo", "is_official": false, "is_private": true, "is_trusted": true, diff --git a/docker-store/menu.md b/docker-store/menu.md deleted file mode 100644 index a7dc01275b1..00000000000 --- a/docker-store/menu.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -description: Docker Store menu -keywords: -- Docker, docker, store, purchase images -menu: - main: - identifier: docker-store -title: Docker Store -type: menu ---- - -If you can see this page, please file a bug by emailing docs@docker.com diff --git a/docker-trusted-registry/high-availability/backups-and-disaster-recovery.md b/docker-trusted-registry/high-availability/backups-and-disaster-recovery.md index c3e9fbb6a7f..ac75a5cf308 100644 --- a/docker-trusted-registry/high-availability/backups-and-disaster-recovery.md +++ b/docker-trusted-registry/high-availability/backups-and-disaster-recovery.md @@ -74,7 +74,7 @@ $ docker run -i --rm docker/dtr backup \ --ucp-ca "$(cat ucp-ca.pem)" \ --existing-replica-id \ --ucp-username \ - --ucp-password > /tmp/backup.tar + --ucp-password > /var/tmp/backup.tar ``` Where: @@ -109,8 +109,8 @@ consideration whether your DTR installation is configured to store images on the filesystem or using a cloud provider. You can check the -[reference documentation](../reference/backup.md), for the -backup command to learn about all the available flags. +[reference documentation](../reference/restore.md), for the +restore command to learn about all the available flags. As an example, to install DTR on the host and restore its @@ -127,7 +127,7 @@ $ docker run -i --rm \ --ucp-ca "$(cat ucp-ca.pem)" \ --ucp-username \ --ucp-password \ - --dtr-load-balancer < /tmp/backup.tar + --dtr-external-url < /var/tmp/backup.tar ``` Where: @@ -135,7 +135,7 @@ Where: * `--ucp-url` is the address of UCP, * `--ucp-ca` is the UCP certificate authority, * `--ucp-username`, and `--ucp-password` are the credentials of a UCP administrator, -* `--dtr-load-balancer` is the domain name or ip where DTR can be reached. +* `--dtr-external-url` is the domain name or ip where DTR can be reached. ## Where to go next diff --git a/docker-trusted-registry/high-availability/index.md b/docker-trusted-registry/high-availability/index.md index 214772779b2..b2548796492 100644 --- a/docker-trusted-registry/high-availability/index.md +++ b/docker-trusted-registry/high-availability/index.md @@ -82,7 +82,7 @@ or cloud-based load balancer to balance requests across multiple DTR replicas. Make sure you configure your load balancer to: * Load-balance TCP traffic on ports 80 and 443, -* Not terminate HTTPS connections, +* Use a TCP load balancer that doesn't terminate HTTPS connections, * Use the `/load_balancer_status` endpoint on each DTR replica, to check if the replica is healthy and if it should remain on the load balancing pool or not. diff --git a/docker-trusted-registry/high-availability/menu.md b/docker-trusted-registry/high-availability/menu.md deleted file mode 100644 index ab78408a897..00000000000 --- a/docker-trusted-registry/high-availability/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Learn how to set up Docker Trusted Registry for high availability. -keywords: -- docker, registry, high-availability, backup, recovery -menu: - main: - identifier: dtr_menu_high_availability - parent: workw_dtr - weight: 70 -title: High-availability -type: menu ---- - - diff --git a/docker-trusted-registry/install/install-dtr-offline.md b/docker-trusted-registry/install/install-dtr-offline.md index 42527c0da04..4b2928bb286 100644 --- a/docker-trusted-registry/install/install-dtr-offline.md +++ b/docker-trusted-registry/install/install-dtr-offline.md @@ -26,7 +26,7 @@ copy that package to the nodes where you’ll install DTR. Use a computer with internet access to download a single package with all Docker Datacenter components: - ```bash + ```none $ wget https://packages.docker.com/caas/ucp-1.1.2_dtr-2.0.3.tar.gz -O docker-datacenter.tar.gz ``` @@ -36,7 +36,7 @@ copy that package to the nodes where you’ll install DTR. want to install Docker Trusted Registry. You can use the `scp` command for this. - ```bash + ```none $ scp docker-datacenter.tag.gz $USER@$DTR_HOST:/tmp ``` @@ -47,7 +47,7 @@ copy that package to the nodes where you’ll install DTR. Once the package is on the node where you want to install DTR, you can use the `docker load` command, to load the images from the .tar file. - ```bash + ```none $ docker load < /tmp/docker-datacenter.tar.gz ``` diff --git a/docker-trusted-registry/install/menu.md b/docker-trusted-registry/install/menu.md deleted file mode 100644 index 6e2e365492b..00000000000 --- a/docker-trusted-registry/install/menu.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: Trusted Registry Installation Overview -keywords: -- docker, documentation, about, technology, install, enterprise, hub, CS engine, Docker - Trusted Registry -menu: - main: - identifier: dtr-menu-install - parent: workw_dtr - weight: 30 -title: Installation -type: menu ---- - - diff --git a/docker-trusted-registry/install/upgrade/menu.md b/docker-trusted-registry/install/upgrade/menu.md deleted file mode 100644 index 0395a0afdcd..00000000000 --- a/docker-trusted-registry/install/upgrade/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Learn how to upgrade your Docker Trusted Registry. -keywords: -- docker, dtr, upgrade, install -menu: - main: - identifier: menu_dtr_upgrade - parent: dtr-menu-install - weight: 40 -title: Upgrade -type: menu ---- - - diff --git a/docker-trusted-registry/menu.md b/docker-trusted-registry/menu.md deleted file mode 100644 index 31e6e95be94..00000000000 --- a/docker-trusted-registry/menu.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -description: Docker Trusted Registry -keywords: -- docker, documentation, about, technology, understanding, enterprise, hub, registry -menu: - main: - identifier: workw_dtr - weight: -63 -title: Docker Trusted Registry -type: menu ---- - - diff --git a/docker-trusted-registry/monitor-troubleshoot/menu.md b/docker-trusted-registry/monitor-troubleshoot/menu.md deleted file mode 100644 index 7774d8012c2..00000000000 --- a/docker-trusted-registry/monitor-troubleshoot/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Learn how to monitor and troubleshoot your DTR cluster -keywords: -- docker, registry, monitor, troubleshoot -menu: - main: - identifier: dtr_menu_monitor_troubleshoot - parent: workw_dtr - weight: 60 -title: Monitor and troubleshoot -type: menu ---- - - diff --git a/docker-trusted-registry/reference/backup.md b/docker-trusted-registry/reference/backup.md index 11b10715514..503a70dfa3a 100644 --- a/docker-trusted-registry/reference/backup.md +++ b/docker-trusted-registry/reference/backup.md @@ -16,8 +16,8 @@ Backup a DTR cluster to a tar file and stream it to stdout ## Usage ```bash -$ docker run -i --rm docker/dtr \ - backup [command options] > backup.tar +docker run -i --rm docker/dtr \ + backup [command options] > backup.tar ``` ## Description @@ -35,16 +35,17 @@ configured to store images on the filesystem or using a cloud provider. WARNING: This backup contains sensitive information and should be stored securely. - -| Option | Description | -|:------------------------|:--------------------------------------------------------------------------------------| -| `--ucp-url` | Specify the UCP controller URL [$UCP_URL] | -| `--ucp-username` | Specify the UCP admin username [$UCP_USERNAME] | -| `--ucp-password` | Specify the UCP admin password [$UCP_PASSWORD] | -| `--debug` | Enable debug mode, provides additional logging [$DEBUG] | -| `--hub-username` | Specify the Docker Hub username for pulling images [$HUB_USERNAME] | -| `--hub-password` | Specify the Docker Hub password for pulling images [$HUB_PASSWORD] | -| `--ucp-insecure-tls` | Disable TLS verification for UCP [$UCP_INSECURE_TLS] | -| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP [$UCP_CA] | -| `--existing-replica-id` | ID of an existing replica in a cluster [$DTR_EXISTING_REPLICA_ID] | -| `--config-only` | Backup/restore only the configurations of DTR and not the database [$DTR_CONFIG_ONLY] | +## Options + +| Option | Description | +|:------------------------|:-------------------------------------------------------------------| +| `--ucp-url` | Specify the UCP controller URL including domain and port | +| `--ucp-username` | Specify the UCP admin username | +| `--ucp-password` | Specify the UCP admin password | +| `--debug` | Enable debug mode, provides additional logging | +| `--hub-username` | Specify the Docker Hub username for pulling images | +| `--hub-password` | Specify the Docker Hub password for pulling images | +| `--ucp-insecure-tls` | Disable TLS verification for UCP | +| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP | +| `--existing-replica-id` | ID of an existing replica in a cluster | +| `--config-only` | Backup/restore only the configurations of DTR and not the database | diff --git a/docker-trusted-registry/reference/dumpcerts.md b/docker-trusted-registry/reference/dumpcerts.md index ab58f6398ad..6842f6c9534 100644 --- a/docker-trusted-registry/reference/dumpcerts.md +++ b/docker-trusted-registry/reference/dumpcerts.md @@ -16,7 +16,7 @@ Dump out the TLS certificates used by this DTR instance ## Usage ```bash -$ docker run -it --rm docker/dtr \ +docker run -i --rm docker/dtr \ dumpcerts [command options] > backup.tar ``` @@ -26,14 +26,16 @@ This command creates a backup of the certificates used by DTR for communicating across replicas with TLS. -| Option | Description | -|:------------------------|:-------------------------------------------------------------------| -| `--ucp-url ` | Specify the UCP controller URL [$UCP_URL] | -| `--ucp-username ` | Specify the UCP admin username [$UCP_USERNAME] | -| `--ucp-password` | Specify the UCP admin password [$UCP_PASSWORD] | -| `--debug` | Enable debug mode, provides additional logging [$DEBUG] | -| `--hub-username` | Specify the Docker Hub username for pulling images [$HUB_USERNAME] | -| `--hub-password ` | Specify the Docker Hub password for pulling images [$HUB_PASSWORD] | -| `--ucp-insecure-tls` | Disable TLS verification for UCP [$UCP_INSECURE_TLS] | -| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP [$UCP_CA] | -| `--existing-replica-id` | ID of an existing replica in a cluster [$DTR_EXISTING_REPLICA_ID] | +## Options + +| Option | Description | +|:------------------------|:---------------------------------------------------------| +| `--ucp-url` | Specify the UCP controller URL including domain and port | +| `--ucp-username` | Specify the UCP admin username | +| `--ucp-password` | Specify the UCP admin password | +| `--debug` | Enable debug mode, provides additional logging | +| `--hub-username` | Specify the Docker Hub username for pulling images | +| `--hub-password` | Specify the Docker Hub password for pulling images | +| `--ucp-insecure-tls` | Disable TLS verification for UCP | +| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP | +| `--existing-replica-id` | ID of an existing replica in a cluster | diff --git a/docker-trusted-registry/reference/images.md b/docker-trusted-registry/reference/images.md new file mode 100644 index 00000000000..609649ed4ed --- /dev/null +++ b/docker-trusted-registry/reference/images.md @@ -0,0 +1,26 @@ +--- +menu: + main: + description: Docker Trusted Registry images command reference. + identifier: dtr_reference_images + keywords: + - docker, registry, reference, images + parent: dtr_menu_reference +title: images +--- + +# docker/dtr images + +Lists all the images necessary to install DTR + +## Usage + +```bash +docker run -it --rm docker/dtr \ + images [command options] +``` + +## Description + + +This command lists all the images necessary to install DTR. diff --git a/docker-trusted-registry/reference/index.md b/docker-trusted-registry/reference/index.md index b0acad5f6e4..75b12486955 100644 --- a/docker-trusted-registry/reference/index.md +++ b/docker-trusted-registry/reference/index.md @@ -10,35 +10,23 @@ menu: title: Overview --- -# docker/dtr image overview +# docker/dtr overview This tool has commands to install, configure, and backup Docker Trusted Registry (DTR). It also allows uninstalling DTR. By default the tool runs in interactive mode. It prompts you for the values needed. -For running this tool in non-interactive mode, there are three -ways you can use to pass values: - -```bash -$ docker run -it --rm docker/dtr command --option value -$ docker run -e --rm docker/dtr command ENV_VARIABLE=value -$ docker run -e --rm docker/dtr command ENV_VARIABLE -``` Additional help is available for each command with the '--help' option. + ## Usage ```bash -$ docker run -it --rm docker/dtr \ +docker run -it --rm docker/dtr \ command [command options] ``` -## Options - -| Option | Description | -|:------------|:------------| -| `--help, h` | Show help | ## Commands @@ -51,4 +39,6 @@ $ docker run -it --rm docker/dtr \ | `restore` | Create a new DTR cluster from an existing backup | | `backup` | Backup a DTR cluster to a tar file and stream it to stdout | | `migrate` | Migrate configurations, accounts, and repository metadata from DTR 1.4.3 to 2.0 | +| `upgrade` | Upgrade a v2.0.0 or later cluster to this version of DTR | | `dumpcerts` | Dump out the TLS certificates used by this DTR instance | +| `images` | Lists all the images necessary to install DTR | diff --git a/docker-trusted-registry/reference/install.md b/docker-trusted-registry/reference/install.md index ecc5adb2fa5..d3b5f09199a 100644 --- a/docker-trusted-registry/reference/install.md +++ b/docker-trusted-registry/reference/install.md @@ -16,7 +16,7 @@ Install Docker Trusted Registry on this Docker Engine ## Usage ```bash -$ docker run -it --rm docker/dtr \ +docker run -it --rm docker/dtr \ install [command options] ``` @@ -28,25 +28,29 @@ the 'join' command. ## Options -| Option | Description | -|:---------------------------|:-------------------------------------------------------------------------------------------------| -| `--ucp-url` | Specify the UCP controller URL [$UCP_URL] | -| `--ucp-username` | Specify the UCP admin username [$UCP_USERNAME] | -| `--ucp-password` | Specify the UCP admin password [$UCP_PASSWORD] | -| `--debug` | Enable debug mode, provides additional logging [$DEBUG] | -| `--hub-username` | Specify the Docker Hub username for pulling images [$HUB_USERNAME] | -| `--hub-password` | Specify the Docker Hub password for pulling images [$HUB_PASSWORD] | -| `--http-proxy` | Set the HTTP proxy for outgoing requests [$DTR_HTTP_PROXY] | -| `--https-proxy` | Set the HTTPS proxy for outgoing requests [$DTR_HTTPS_PROXY] | -| `--no-proxy` | Set the list of domains to not proxy to [$DTR_NO_PROXY] | -| `--replica-http-port "0"` | Specify the public HTTP port for the DTR replica [$REPLICA_HTTP_PORT] | -| `--replica-https-port "0"` | Specify the public HTTPS port for the DTR replica [$REPLICA_HTTPS_PORT] | -| `--log-protocol` | The protocol for sending container logs: tcp, udp or internal. Default: internal [$LOG_PROTOCOL] | -| `--log-host` | Endpoint to send logs to, required if --log-protocol is tcp or udp [$LOG_HOST] | -| `--log-level` | Log level for container logs. Default: INFO [$LOG_LEVEL] | -| `--dtr-external-url` | Specify the domain name and port for the DTR load balancer [$DTR_LOAD_BALANCER] | -| `--ucp-insecure-tls` | Disable TLS verification for UCP [$UCP_INSECURE_TLS] | -| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP [$UCP_CA] | -| `--ucp-node` | Specify the host to install Docker Trusted Registry [$UCP_NODE] | -| `--replica-id` | Specify the replica Id. Must be unique per replica, leave blank for random [$DTR_REPLICA_ID] | -| `--unsafe` | Enable this flag to skip safety checks when installing or joining [$DTR_UNSAFE] | +| Option | Description | +|:----------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--ucp-url` | Specify the UCP controller URL including domain and port | +| `--ucp-username` | Specify the UCP admin username | +| `--ucp-password` | Specify the UCP admin password | +| `--debug` | Enable debug mode, provides additional logging | +| `--hub-username` | Specify the Docker Hub username for pulling images | +| `--hub-password` | Specify the Docker Hub password for pulling images | +| `--http-proxy` | Set the HTTP proxy for outgoing requests | +| `--https-proxy` | Set the HTTPS proxy for outgoing requests | +| `--no-proxy` | Set the list of domains to not proxy to | +| `--replica-http-port` | Specify the public HTTP port for the DTR replica; 0 means unchanged/default | +| `--replica-https-port` | Specify the public HTTPS port for the DTR replica; 0 means unchanged/default | +| `--log-protocol` | The protocol for sending container logs: tcp, udp or internal. Default: internal | +| `--log-host` | Endpoint to send logs to, required if --log-protocol is tcp or udp | +| `--log-level` | Log level for container logs. Default: INFO | +| `--dtr-external-url` | Specify the external domain name and port for DTR. If using a load balancer, use its external URL instead. | +| `--etcd-heartbeat-interval` | Set etcd's frequency (ms) that its leader will notify followers that it is still the leader. | +| `--etcd-election-timeout` | Set etcd's timeout (ms) for how long a follower node will go without hearing a heartbeat before attempting to become leader itself. | +| `--etcd-snapshot-count` | Set etcd's number of changes before creating a snapshot. | +| `--ucp-insecure-tls` | Disable TLS verification for UCP | +| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP | +| `--ucp-node` | Specify the host to install Docker Trusted Registry | +| `--replica-id` | Specify the replica ID. Must be unique per replica, leave blank for random | +| `--unsafe` | Enable this flag to skip safety checks when installing or joining | +| `--extra-envs` | List of extra environment variables to use for deploying the DTR containers for the replica. This can be used to specify swarm constraints. Separate the environment variables with ampersands (&). You can escape actual ampersands with backslashes (\). Can't be used in combination with --ucp-node | diff --git a/docker-trusted-registry/reference/join.md b/docker-trusted-registry/reference/join.md index 41463c5c7f2..503e7e168ff 100644 --- a/docker-trusted-registry/reference/join.md +++ b/docker-trusted-registry/reference/join.md @@ -13,13 +13,6 @@ title: join Add a new replica to an existing DTR cluster -## Usage - -```bash -$ docker run -it --rm docker/dtr \ - join [command options] -``` - ## Description This command installs DTR on the Docker Engine that runs the command, @@ -28,21 +21,23 @@ and joins the new installation to an existing cluster. To set up a cluster with high-availability, add 3, 5, or 7 nodes to the cluster. - -| Option | Description | -|:---------------------------|:---------------------------------------------------------------------------------------------| -| `--ucp-url` | Specify the UCP controller URL [$UCP_URL] | -| `--ucp-username` | Specify the UCP admin username [$UCP_USERNAME] | -| `--ucp-password` | Specify the UCP admin password [$UCP_PASSWORD] | -| `--debug` | Enable debug mode, provides additional logging [$DEBUG] | -| `--hub-username` | Specify the Docker Hub username for pulling images [$HUB_USERNAME] | -| `--hub-password` | Specify the Docker Hub password for pulling images [$HUB_PASSWORD] | -| `--ucp-insecure-tls` | Disable TLS verification for UCP [$UCP_INSECURE_TLS] | -| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP [$UCP_CA] | -| `--ucp-node` | Specify the host to install Docker Trusted Registry [$UCP_NODE] | -| `--replica-id` | Specify the replica Id. Must be unique per replica, leave blank for random [$DTR_REPLICA_ID] | -| `--unsafe` | Enable this flag to skip safety checks when installing or joining [$DTR_UNSAFE] | -| `--existing-replica-id` | ID of an existing replica in a cluster [$DTR_EXISTING_REPLICA_ID] | -| `--replica-http-port "0"` | Specify the public HTTP port for the DTR replica [$REPLICA_HTTP_PORT] | -| `--replica-https-port "0"` | Specify the public HTTPS port for the DTR replica [$REPLICA_HTTPS_PORT] | -| `--skip-network-test` | Whether to skip the overlay networking test or not [$DTR_SKIP_NETWORK_TEST] | +## Options + +| Option | Description | +|:------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `--ucp-url` | Specify the UCP controller URL including domain and port | +| `--ucp-username` | Specify the UCP admin username | +| `--ucp-password` | Specify the UCP admin password | +| `--debug` | Enable debug mode, provides additional logging | +| `--hub-username` | Specify the Docker Hub username for pulling images | +| `--hub-password` | Specify the Docker Hub password for pulling images | +| `--ucp-insecure-tls` | Disable TLS verification for UCP | +| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP | +| `--ucp-node` | Specify the host to install Docker Trusted Registry | +| `--replica-id` | Specify the replica ID. Must be unique per replica, leave blank for random | +| `--unsafe` | Enable this flag to skip safety checks when installing or joining | +| `--existing-replica-id` | ID of an existing replica in a cluster | +| `--replica-http-port` | Specify the public HTTP port for the DTR replica; 0 means unchanged/default | +| `--replica-https-port` | Specify the public HTTPS port for the DTR replica; 0 means unchanged/default | +| `--skip-network-test` | Enable this flag to skip the overlay networking test | +| `--extra-envs` | List of extra environment variables to use for deploying the DTR containers for the replica. This can be used to specify swarm constraints. Separate the environment variables with ampersands (&). You can escape actual ampersands with backslashes (\). Can't be used in combination with --ucp-node | diff --git a/docker-trusted-registry/reference/menu.md b/docker-trusted-registry/reference/menu.md deleted file mode 100644 index d02f920b800..00000000000 --- a/docker-trusted-registry/reference/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Learn about the options available on the docker/trusted-registry image. -keywords: -- docker, dtr, install, uninstall, configure -menu: - main: - identifier: dtr_menu_reference - parent: dtr-menu-install - weight: 60 -title: docker/dtr image reference -type: menu ---- - - diff --git a/docker-trusted-registry/reference/migrate.md b/docker-trusted-registry/reference/migrate.md index 60ee05f6fcd..accb4d14376 100644 --- a/docker-trusted-registry/reference/migrate.md +++ b/docker-trusted-registry/reference/migrate.md @@ -16,7 +16,7 @@ Migrate configurations, accounts, and repository metadata from DTR 1.4.3 to 2.0 ## Usage ```bash -$ docker run -it --rm docker/dtr \ +docker run -it --rm -v /var/run/docker.sock:/var/run/docker.sock docker/dtr \ migrate [command options] ``` @@ -31,20 +31,22 @@ migrate the data to the new installation. Finally, you decommission your DTR 1.4.3 by uninstalling it. -| Option | Description | -|:-----------------------|:-------------------------------------------------------------------------------------| -| `--ucp-url ` | Specify the UCP controller URL [$UCP_URL] | -| `--ucp-username` | Specify the UCP admin username [$UCP_USERNAME] | -| `--ucp-password` | Specify the UCP admin password [$UCP_PASSWORD] | -| `--debug` | Enable debug mode, provides additional logging [$DEBUG] | -| `--hub-username` | Specify the Docker Hub username for pulling images [$HUB_USERNAME] | -| `--hub-password` | Specify the Docker Hub password for pulling images [$HUB_PASSWORD] | -| `--ucp-insecure-tls` | Disable TLS verification for UCP [$UCP_INSECURE_TLS] | -| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP [$UCP_CA] | -| `--run-full-migration` | Run full migration procedure instead of dumping configurations [$RUN_FULL_MIGRATION] | -| `--dtr-load-balancer` | Specify the domain name and port for the DTR load balancer [$DTR_LOAD_BALANCER] | -| `--dtr-insecure-tls` | Disable TLS verification for DTR [$DTR_INSECURE_TLS] | -| `--dtr-ca` | PEM-encoded TLS CA cert for DTR [$DTR_CA] | -| `--http-proxy` | Set the HTTP proxy for outgoing requests [$DTR_HTTP_PROXY] | -| `--https-proxy` | Set the HTTPS proxy for outgoing requests [$DTR_HTTPS_PROXY] | -| `--no-proxy` | Set the list of domains to not proxy to [$DTR_NO_PROXY] | +## Options + +| Option | Description | +|:-----------------------|:-----------------------------------------------------------------------------------------------------------| +| `--ucp-url` | Specify the UCP controller URL including domain and port | +| `--ucp-username` | Specify the UCP admin username | +| `--ucp-password` | Specify the UCP admin password | +| `--debug` | Enable debug mode, provides additional logging | +| `--hub-username` | Specify the Docker Hub username for pulling images | +| `--hub-password` | Specify the Docker Hub password for pulling images | +| `--ucp-insecure-tls` | Disable TLS verification for UCP | +| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP | +| `--run-full-migration` | Run full migration procedure instead of dumping configurations | +| `--dtr-external-url` | Specify the external domain name and port for DTR. If using a load balancer, use its external URL instead. | +| `--dtr-insecure-tls` | Disable TLS verification for DTR | +| `--dtr-ca` | PEM-encoded TLS CA cert for DTR | +| `--http-proxy` | Set the HTTP proxy for outgoing requests | +| `--https-proxy` | Set the HTTPS proxy for outgoing requests | +| `--no-proxy` | Set the list of domains to not proxy to | diff --git a/docker-trusted-registry/reference/reconfigure.md b/docker-trusted-registry/reference/reconfigure.md index 7e8c8250a7f..1ee2cb01e64 100644 --- a/docker-trusted-registry/reference/reconfigure.md +++ b/docker-trusted-registry/reference/reconfigure.md @@ -13,11 +13,11 @@ title: reconfigure Change DTR configurations -# Usage +## Usage ```bash -$ docker run -it --rm docker/dtr \ - reconfigure [command options] +docker run -it --rm docker/dtr \ + reconfigure [command options] ``` ## Description @@ -28,24 +28,28 @@ existing configurations in its original state. WARNING: DTR is restarted for the new configurations to take effect. To have no down time, configure your DTR for high-availability. - -| Option | Description | -|:---------------------------|:-------------------------------------------------------------------------------------------------| -| `--ucp-url` | Specify the UCP controller URL [$UCP_URL] | -| `--ucp-username` | Specify the UCP admin username [$UCP_USERNAME] | -| `--ucp-password` | Specify the UCP admin password [$UCP_PASSWORD] | -| `--debug` | Enable debug mode, provides additional logging [$DEBUG] | -| `--hub-username` | Specify the Docker Hub username for pulling images [$HUB_USERNAME] | -| `--hub-password` | Specify the Docker Hub password for pulling images [$HUB_PASSWORD] | -| `--http-proxy` | Set the HTTP proxy for outgoing requests [$DTR_HTTP_PROXY] | -| `--https-proxy` | Set the HTTPS proxy for outgoing requests [$DTR_HTTPS_PROXY] | -| `--no-proxy` | Set the list of domains to not proxy to [$DTR_NO_PROXY] | -| `--replica-http-port "0"` | Specify the public HTTP port for the DTR replica [$REPLICA_HTTP_PORT] | -| `--replica-https-port "0"` | Specify the public HTTPS port for the DTR replica [$REPLICA_HTTPS_PORT] | -| `--log-protocol` | The protocol for sending container logs: tcp, udp or internal. Default: internal [$LOG_PROTOCOL] | -| `--log-host` | Endpoint to send logs to, required if --log-protocol is tcp or udp [$LOG_HOST] | -| `--log-level` | Log level for container logs. Default: INFO [$LOG_LEVEL] | -| `--dtr-load-balancer` | Specify the domain name and port for the DTR load balancer [$DTR_LOAD_BALANCER] | -| `--ucp-insecure-tls` | Disable TLS verification for UCP [$UCP_INSECURE_TLS] | -| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP [$UCP_CA] | -| `--existing-replica-id` | ID of an existing replica in a cluster [$DTR_EXISTING_REPLICA_ID] | +## Options + +| Option | Description | +|:----------------------------|:------------------------------------------------------------------------------------------------------------------------------------| +| `--ucp-url` | Specify the UCP controller URL including domain and port | +| `--ucp-username` | Specify the UCP admin username | +| `--ucp-password` | Specify the UCP admin password | +| `--debug` | Enable debug mode, provides additional logging | +| `--hub-username` | Specify the Docker Hub username for pulling images | +| `--hub-password` | Specify the Docker Hub password for pulling images | +| `--http-proxy` | Set the HTTP proxy for outgoing requests | +| `--https-proxy` | Set the HTTPS proxy for outgoing requests | +| `--no-proxy` | Set the list of domains to not proxy to | +| `--replica-http-port` | Specify the public HTTP port for the DTR replica; 0 means unchanged/default | +| `--replica-https-port` | Specify the public HTTPS port for the DTR replica; 0 means unchanged/default | +| `--log-protocol` | The protocol for sending container logs: tcp, udp or internal. Default: internal | +| `--log-host` | Endpoint to send logs to, required if --log-protocol is tcp or udp | +| `--log-level` | Log level for container logs. Default: INFO | +| `--dtr-external-url` | Specify the external domain name and port for DTR. If using a load balancer, use its external URL instead. | +| `--etcd-heartbeat-interval` | Set etcd's frequency (ms) that its leader will notify followers that it is still the leader. | +| `--etcd-election-timeout` | Set etcd's timeout (ms) for how long a follower node will go without hearing a heartbeat before attempting to become leader itself. | +| `--etcd-snapshot-count` | Set etcd's number of changes before creating a snapshot. | +| `--ucp-insecure-tls` | Disable TLS verification for UCP | +| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP | +| `--existing-replica-id` | ID of an existing replica in a cluster | diff --git a/docker-trusted-registry/reference/remove.md b/docker-trusted-registry/reference/remove.md index 00347ce175e..8a52e1fca1c 100644 --- a/docker-trusted-registry/reference/remove.md +++ b/docker-trusted-registry/reference/remove.md @@ -16,8 +16,8 @@ Remove a replica from a DTR cluster ## Usage ```bash -$ docker run -it --rm docker/dtr \ - remove [command options] +docker run -it --rm docker/dtr \ + remove [command options] ``` ## Description @@ -25,17 +25,18 @@ $ docker run -it --rm docker/dtr \ This command removes a replica from the cluster, stops and removes all DTR containers, and deletes all DTR volumes. -| Option | Description | -|:------------------------|:---------------------------------------------------------------------------------------------| -| `--ucp-url` | Specify the UCP controller URL [$UCP_URL] | -| `--ucp-username` | Specify the UCP admin username [$UCP_USERNAME] | -| `--ucp-password` | Specify the UCP admin password [$UCP_PASSWORD] | -| `--debug` | Enable debug mode, provides additional logging [$DEBUG] | -| `--hub-username` | Specify the Docker Hub username for pulling images [$HUB_USERNAME] | -| `--hub-password` | Specify the Docker Hub password for pulling images [$HUB_PASSWORD] | -| `--ucp-insecure-tls` | Disable TLS verification for UCP [$UCP_INSECURE_TLS] | -| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP [$UCP_CA] | -| `--ucp-node` | Specify the host to install Docker Trusted Registry [$UCP_NODE] | -| `--force-remove` | Force removal of replica even if it will break your cluster's state [$DTR_CONFIG_ONLY] | -| `--replica-id` | Specify the replica Id. Must be unique per replica, leave blank for random [$DTR_REPLICA_ID] | -| `--existing-replica-id` | ID of an existing replica in a cluster [$DTR_EXISTING_REPLICA_ID] | +## Options + +| Option | Description | +|:------------------------|:-------------------------------------------------------------------------------------------------------------------------------| +| `--ucp-url` | Specify the UCP controller URL including domain and port | +| `--ucp-username` | Specify the UCP admin username | +| `--ucp-password` | Specify the UCP admin password | +| `--debug` | Enable debug mode, provides additional logging | +| `--hub-username` | Specify the Docker Hub username for pulling images | +| `--hub-password` | Specify the Docker Hub password for pulling images | +| `--ucp-insecure-tls` | Disable TLS verification for UCP | +| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP | +| `--force-remove` | Force removal of replica even if it can break your cluster's state. Necessary only when --existing-replica-id == --replica-id. | +| `--replica-id` | Specify the replica ID. Must be unique per replica, leave blank for random | +| `--existing-replica-id` | ID of an existing replica in a cluster | diff --git a/docker-trusted-registry/reference/restore.md b/docker-trusted-registry/reference/restore.md index f663f865b98..1b4921aaee6 100644 --- a/docker-trusted-registry/reference/restore.md +++ b/docker-trusted-registry/reference/restore.md @@ -16,8 +16,8 @@ Create a new DTR cluster from an existing backup ## Usage ```bash -$ docker run -i --rm docker/dtr \ - restore [command options] < backup.tar +docker run -i --rm docker/dtr \ + restore [command options] < backup.tar ``` ## Description @@ -37,25 +37,30 @@ After restoring DTR, you can add more nodes to the DTR cluster with the 'join' command. -| Option | Description | -|:---------------------------|:-------------------------------------------------------------------------------------------------| -| `--ucp-url` | Specify the UCP controller URL [$UCP_URL] | -| `--ucp-username` | Specify the UCP admin username [$UCP_USERNAME] | -| `--ucp-password` | Specify the UCP admin password [$UCP_PASSWORD] | -| `--debug` | Enable debug mode, provides additional logging [$DEBUG] | -| `--hub-username` | Specify the Docker Hub username for pulling images [$HUB_USERNAME] | -| `--hub-password` | Specify the Docker Hub password for pulling images [$HUB_PASSWORD] | -| `--http-proxy` | Set the HTTP proxy for outgoing requests [$DTR_HTTP_PROXY] | -| `--https-proxy` | Set the HTTPS proxy for outgoing requests [$DTR_HTTPS_PROXY] | -| `--no-proxy` | Set the list of domains to not proxy to [$DTR_NO_PROXY] | -| `--replica-http-port "0"` | Specify the public HTTP port for the DTR replica [$REPLICA_HTTP_PORT] | -| `--replica-https-port "0"` | Specify the public HTTPS port for the DTR replica [$REPLICA_HTTPS_PORT] | -| `--log-protocol` | The protocol for sending container logs: tcp, udp or internal. Default: internal [$LOG_PROTOCOL] | -| `--log-host` | Endpoint to send logs to, required if --log-protocol is tcp or udp [$LOG_HOST] | -| `--log-level` | Log level for container logs. Default: INFO [$LOG_LEVEL] | -| `--dtr-load-balancer` | Specify the domain name and port for the DTR load balancer [$DTR_LOAD_BALANCER] | -| `--ucp-insecure-tls` | Disable TLS verification for UCP [$UCP_INSECURE_TLS] | -| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP [$UCP_CA] | -| `--ucp-node` | Specify the host to install Docker Trusted Registry [$UCP_NODE] | -| `--replica-id` | Specify the replica Id. Must be unique per replica, leave blank for random [$DTR_REPLICA_ID] | -| `--config-only` | Backup/restore only the configurations of DTR and not the database [$DTR_CONFIG_ONLY] | +## Options + +| Option | Description | +|:----------------------------|:------------------------------------------------------------------------------------------------------------------------------------| +| `--ucp-url` | Specify the UCP controller URL including domain and port | +| `--ucp-username` | Specify the UCP admin username | +| `--ucp-password` | Specify the UCP admin password | +| `--debug` | Enable debug mode, provides additional logging | +| `--hub-username` | Specify the Docker Hub username for pulling images | +| `--hub-password` | Specify the Docker Hub password for pulling images | +| `--http-proxy` | Set the HTTP proxy for outgoing requests | +| `--https-proxy` | Set the HTTPS proxy for outgoing requests | +| `--no-proxy` | Set the list of domains to not proxy to | +| `--replica-http-port` | Specify the public HTTP port for the DTR replica; 0 means unchanged/default | +| `--replica-https-port` | Specify the public HTTPS port for the DTR replica; 0 means unchanged/default | +| `--log-protocol` | The protocol for sending container logs: tcp, udp or internal. Default: internal | +| `--log-host` | Endpoint to send logs to, required if --log-protocol is tcp or udp | +| `--log-level` | Log level for container logs. Default: INFO | +| `--dtr-external-url` | Specify the external domain name and port for DTR. If using a load balancer, use its external URL instead. | +| `--etcd-heartbeat-interval` | Set etcd's frequency (ms) that its leader will notify followers that it is still the leader. | +| `--etcd-election-timeout` | Set etcd's timeout (ms) for how long a follower node will go without hearing a heartbeat before attempting to become leader itself. | +| `--etcd-snapshot-count` | Set etcd's number of changes before creating a snapshot. | +| `--ucp-insecure-tls` | Disable TLS verification for UCP | +| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP | +| `--ucp-node` | Specify the host to install Docker Trusted Registry | +| `--replica-id` | Specify the replica ID. Must be unique per replica, leave blank for random | +| `--config-only` | Backup/restore only the configurations of DTR and not the database | diff --git a/docker-trusted-registry/reference/upgrade.md b/docker-trusted-registry/reference/upgrade.md new file mode 100644 index 00000000000..5433f6ee724 --- /dev/null +++ b/docker-trusted-registry/reference/upgrade.md @@ -0,0 +1,54 @@ +--- +description: Docker Trusted Registry upgrade command reference. +keywords: +- docker, registry, restore, upgrade +menu: + main: + identifier: dtr_reference_upgrade + parent: dtr_menu_reference +title: upgrade +--- + +# docker/dtr upgrade + +Upgrade a v2.0.0 or later cluster to this version of DTR + +## Usage + +```bash +docker run -it --rm docker/dtr \ + upgrade [command options] +``` + +## Description + + +This command upgrades an existing DTR 2.0.0 or later cluster to the current version of +this bootstrapper. + + +## Options + +| Option | Description | +|:----------------------------|:------------------------------------------------------------------------------------------------------------------------------------| +| `--ucp-url` | Specify the UCP controller URL including domain and port | +| `--ucp-username` | Specify the UCP admin username | +| `--ucp-password` | Specify the UCP admin password | +| `--debug` | Enable debug mode, provides additional logging | +| `--hub-username` | Specify the Docker Hub username for pulling images | +| `--hub-password` | Specify the Docker Hub password for pulling images | +| `--http-proxy` | Set the HTTP proxy for outgoing requests | +| `--https-proxy` | Set the HTTPS proxy for outgoing requests | +| `--no-proxy` | Set the list of domains to not proxy to | +| `--replica-http-port` | Specify the public HTTP port for the DTR replica; 0 means unchanged/default | +| `--replica-https-port` | Specify the public HTTPS port for the DTR replica; 0 means unchanged/default | +| `--log-protocol` | The protocol for sending container logs: tcp, udp or internal. Default: internal | +| `--log-host` | Endpoint to send logs to, required if --log-protocol is tcp or udp | +| `--log-level` | Log level for container logs. Default: INFO | +| `--dtr-external-url` | Specify the external domain name and port for DTR. If using a load balancer, use its external URL instead. | +| `--etcd-heartbeat-interval` | Set etcd's frequency (ms) that its leader will notify followers that it is still the leader. | +| `--etcd-election-timeout` | Set etcd's timeout (ms) for how long a follower node will go without hearing a heartbeat before attempting to become leader itself. | +| `--etcd-snapshot-count` | Set etcd's number of changes before creating a snapshot. | +| `--ucp-insecure-tls` | Disable TLS verification for UCP | +| `--ucp-ca` | Use a PEM-encoded TLS CA certificate for UCP | +| `--existing-replica-id` | ID of an existing replica in a cluster | diff --git a/docker-trusted-registry/release-notes/index.md b/docker-trusted-registry/release-notes/index.md index bebef145e08..b1313226a5c 100644 --- a/docker-trusted-registry/release-notes/index.md +++ b/docker-trusted-registry/release-notes/index.md @@ -18,9 +18,28 @@ title: Trusted Registry release notes Here you can learn about new features, bug fixes, breaking changes and known issues for each DTR version. -You can then use [the upgrade instructions](../install/upgrade/upgrade-major.md), +You can then use [the upgrade instructions](../install/upgrade/index.md), to upgrade your installation to the latest release. +## Version 2.0.4 + +(13 Oct 2016) + +**General improvements** + +* Increased limits on pagination for all lists in the UI +* Improved health check endpoints to report DTR status more accurately + +**Bug fixes** + +* You can now edit the permissions for a team member +* Fixed issue that prevented DTR from being installed in Docker Engine 1.12 +* Several improvements to the migrate command +* Improved the reconfigure command to allow tuning the key-value store heartbeat +interval, election timeout, and snapshot count +* Users can now pull from public repositories in an organization namespace +without having to authenticate. Requires UCP 1.1.4 + ## Version 2.0.3 August 11, 2016 diff --git a/docker-trusted-registry/release-notes/menu.md b/docker-trusted-registry/release-notes/menu.md deleted file mode 100644 index 90c49fc24eb..00000000000 --- a/docker-trusted-registry/release-notes/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Learn about the latest versions of Docker Trusted Registry. -keywords: -- docker, dtr, release notes, upgrade -menu: - main: - identifier: dtr_menu_release_notes - parent: workw_dtr - weight: 110 -title: Release notes -type: menu ---- - - diff --git a/docker-trusted-registry/release-notes/prior-release-notes.md b/docker-trusted-registry/release-notes/prior-release-notes.md index 5e9e4d8b101..b421fd7c153 100644 --- a/docker-trusted-registry/release-notes/prior-release-notes.md +++ b/docker-trusted-registry/release-notes/prior-release-notes.md @@ -106,9 +106,9 @@ Settings > Auth to perform the sync. * Fixed an issue where Trusted Registry administrators could not list all repositories in the registries. To list them, you must use the `catalog` API using a `bash` shell. The following example lists repositories in a Trusted Registry located at my.dtr.host where the user `admin` has password `password`. - ``` +```none bash -c 'host=vagrant.host admin=admin password=password token=$(curl -u $admin:$password -k "https://$host/auth/token?service=$host&scope=registry:catalog:*" | python2 -c "import json,sys;obj=json.load(sys.stdin);print obj[\"token\"]") && curl -k -H "Authorization: Bearer $token" "https://$host/v2/_catalog"' - ``` +``` ## Version 1.4.3 diff --git a/docker-trusted-registry/repos-and-images/menu.md b/docker-trusted-registry/repos-and-images/menu.md deleted file mode 100644 index 4c6eb4f7eda..00000000000 --- a/docker-trusted-registry/repos-and-images/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Learn how to manage your repositories and images on Docker Trusted Registry. -keywords: -- docker, registry, management, repository, image -menu: - main: - identifier: dtr_menu_repos_and_images - parent: workw_dtr - weight: 90 -title: Repositories and images -type: menu ---- - - diff --git a/docker-trusted-registry/user-management/menu.md b/docker-trusted-registry/user-management/menu.md deleted file mode 100644 index aa33617b82f..00000000000 --- a/docker-trusted-registry/user-management/menu.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -description: Learn how to manage user permissions on Docker Trusted Registry. -keywords: -- docker, registry, management, security, users -menu: - main: - identifier: dtr_menu_user_management - parent: workw_dtr - weight: 80 -title: User management -type: menu ---- - - diff --git a/engine/admin/ansible.md b/engine/admin/ansible.md new file mode 100644 index 00000000000..1fd7fdcba08 --- /dev/null +++ b/engine/admin/ansible.md @@ -0,0 +1,67 @@ +--- +description: Installation and using Docker via Ansible +keywords: +- ansible, installation, usage, docker, documentation +title: Using Ansible +--- + +# Using Ansible + +> **Note**: +> Please note this is a community contributed installation path. + +## Requirements + +To use this guide you'll need a working installation of +[Ansible](https://www.ansible.com/) version 2.1.0 or later. + +Requirements on the host that will execute the module: + +``` +python >= 2.6 +docker-py >= 1.7.0 +Docker API >= 1.20 +``` + +## Installation + +The `docker_container` module is a core module, and will ship with +Ansible by default. + +## Usage + +Task example that pulls the latest version of the `nginx` image and +runs a container. Bind address and ports are in the example defined +as [a variable](https://docs.ansible.com/ansible/playbooks_variables.html). + +``` +--- +- name: nginx container + docker: + name: nginx + image: nginx + state: reloaded + ports: + - "{{ nginx_bind_address }}:{{ nginx_port }}:{{ nginx_port }}" + cap_drop: all + cap_add: + - setgid + - setuid + pull: always + restart_policy: on-failure + restart_policy_retry: 3 + volumes: + - /some/nginx.conf:/etc/nginx/nginx.conf:ro + tags: + - docker_container + - nginx +... +``` + +## Documentation + +The documentation for the `ansible_container` module is present at +[docs.ansible.com](https://docs.ansible.com/ansible/docker_container_module.html). + +Documentation covering Docker images, networks and services is also present +at [docs.ansible.com](https://docs.ansible.com/ansible/list_of_cloud_modules.html#docker). diff --git a/engine/admin/b2d_volume_resize.md b/engine/admin/b2d_volume_resize.md index 347d1d48467..3276e2a4d2a 100644 --- a/engine/admin/b2d_volume_resize.md +++ b/engine/admin/b2d_volume_resize.md @@ -1,13 +1,13 @@ - +--- +description: Resizing a Boot2Docker volume in VirtualBox with GParted +published: false +keywords: +- boot2docker, volume, virtualbox +menu: + main: + parent: smn_win_osx +title: "Resizing a Boot2Docker volume\t" +--- # Getting "no space left on device" errors with Boot2Docker? diff --git a/engine/admin/formatting.md b/engine/admin/formatting.md index 08a70ab32c3..f11679156b9 100644 --- a/engine/admin/formatting.md +++ b/engine/admin/formatting.md @@ -1,13 +1,9 @@ - +--- +description: CLI and log output formatting reference +keywords: +- format, formatting, output, templates, log +title: Format command and log output +--- # Formatting reference @@ -52,7 +48,8 @@ Lower turns a string into its lower case representation. Split slices a string into a list of strings separated by a separator. - # docker inspect --format '{{split (join .Names "/") "/"}}' container + $ docker inspect --format '{{split (join .Names "/") "/"}}' container + {% endraw %} ### Title diff --git a/engine/admin/index.md b/engine/admin/index.md index e65807f9cf5..60d6fab819c 100644 --- a/engine/admin/index.md +++ b/engine/admin/index.md @@ -1,17 +1,12 @@ - +--- +aliases: +- /engine/articles/configuring/ +- /engine/admin/configuring/ +description: Configuring and running the Docker daemon on various distributions +keywords: +- docker, daemon, configuration, running, process managers +title: Configuring and running Docker +--- # Configuring and running Docker on various distributions @@ -28,13 +23,15 @@ or `systemd` to manage the `docker` daemon's start and stop. The Docker daemon can be run directly using the `dockerd` command. By default it listens on the Unix socket `unix:///var/run/docker.sock` - $ dockerd +```bash +$ dockerd - INFO[0000] +job init_networkdriver() - INFO[0000] +job serveapi(unix:///var/run/docker.sock) - INFO[0000] Listening for HTTP on unix (/var/run/docker.sock) - ... - ... +INFO[0000] +job init_networkdriver() +INFO[0000] +job serveapi(unix:///var/run/docker.sock) +INFO[0000] Listening for HTTP on unix (/var/run/docker.sock) +... +... +``` ### Configuring the docker daemon directly @@ -46,14 +43,15 @@ Some of the daemon's options are: | Flag | Description | |-----------------------|-----------------------------------------------------------| -| `-D`, `--debug=false` | Enable or disable debug mode. By default, this is false. | +| `-D`, `--debug=false` | Enable or disable debug mode. By default, this is false. | | `-H`,`--host=[]` | Daemon socket(s) to connect to. | | `--tls=false` | Enable or disable TLS. By default, this is false. | Here is an example of running the Docker daemon with configuration options: - - $ dockerd -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376 +```bash +$ dockerd -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376 +``` These options : @@ -75,8 +73,8 @@ command on Linux systems. For example, `kill -USR1 ` sends the `SIGU signal to the daemon process, causing the stack dump to be added to the daemon log. > **Note:** The log level setting of the daemon must be at least "info" level and above for -> the stack trace to be saved to the logfile. By default the daemon's log level is set to -> "info". +the stack trace to be saved to the logfile. By default the daemon's log level is set to +"info". The daemon will continue operating after handling the `SIGUSR1` signal and dumping the stack traces to the log. The stack traces can be used to determine the state of all goroutines and @@ -89,21 +87,21 @@ are located in `/etc/init` and the `docker` Upstart job can be found at `/etc/i After successfully [installing Docker for Ubuntu](../installation/linux/ubuntulinux.md), you can check the running status using Upstart in this way: +```bash +$ sudo status docker - $ sudo status docker - - docker start/running, process 989 - +docker start/running, process 989 +``` ### Running Docker You can start/stop/restart the `docker` daemon using +```bash +$ sudo start docker - $ sudo start docker - - $ sudo stop docker - - $ sudo restart docker +$ sudo stop docker +$ sudo restart docker +``` ### Configuring Docker @@ -121,40 +119,40 @@ To configure Docker options: 2. If you don't have one, create the `/etc/default/docker` file on your host. Depending on how you installed Docker, you may already have this file. -3. Open the file with your favorite editor. +3. Open the file with your favorite editor. - ``` + ```bash $ sudo vi /etc/default/docker ``` -4. Add a `DOCKER_OPTS` variable with the following options. These options are appended to the +4. Add a `DOCKER_OPTS` variable with the following options. These options are appended to the `docker` daemon's run command. -``` + ```bash DOCKER_OPTS="-D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376" -``` + ``` -These options : + These options : -- Enable `-D` (debug) mode -- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively -- Listen for connections on `tcp://192.168.59.3:2376` + - Enable `-D` (debug) mode + - Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively + - Listen for connections on `tcp://192.168.59.3:2376` -The command line reference has the [complete list of daemon flags](../reference/commandline/dockerd.md) -with explanations. + The command line reference has the [complete list of daemon flags](../reference/commandline/dockerd.md) + with explanations. -5. Save and close the file. +5. Save and close the file. -6. Restart the `docker` daemon. +6. Restart the `docker` daemon. - ``` + ```bash $ sudo restart docker ``` 7. Verify that the `docker` daemon is running as specified with the `ps` command. - ``` + ```bash $ ps aux | grep docker | grep -v grep ``` @@ -163,13 +161,15 @@ with explanations. By default logs for Upstart jobs are located in `/var/log/upstart` and the logs for `docker` daemon can be located at `/var/log/upstart/docker.log` - $ tail -f /var/log/upstart/docker.log - INFO[0000] Loading containers: done. - INFO[0000] Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev - INFO[0000] +job acceptconnections() - INFO[0000] -job acceptconnections() = OK (0) - INFO[0000] Daemon has completed initialization +```bash +$ tail -f /var/log/upstart/docker.log +INFO[0000] Loading containers: done. +INFO[0000] Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev +INFO[0000] +job acceptconnections() +INFO[0000] -job acceptconnections() = OK (0) +INFO[0000] Daemon has completed initialization +``` ## CentOS / Red Hat Enterprise Linux / Fedora @@ -177,23 +177,23 @@ As of `7.x`, CentOS and RHEL use `systemd` as the process manager. As of `21`, F `systemd` as its process manager. After successfully installing Docker for [CentOS](../installation/linux/centos.md)/[Red Hat Enterprise Linux](../installation/linux/rhel.md)/[Fedora](../installation/linux/fedora.md), you can check the running status in this way: - - $ sudo systemctl status docker - +```bash +$ sudo systemctl status docker +``` ### Running Docker You can start/stop/restart the `docker` daemon using +```bash +$ sudo systemctl start docker - $ sudo systemctl start docker - - $ sudo systemctl stop docker - - $ sudo systemctl restart docker +$ sudo systemctl stop docker +$ sudo systemctl restart docker +``` If you want Docker to start at boot, you should also: - - $ sudo systemctl enable docker - +```bash +$ sudo systemctl enable docker +``` ### Configuring Docker For CentOS 7.x and RHEL 7.x you can [control and configure Docker with systemd](systemd.md). @@ -208,58 +208,58 @@ For this section, we will use CentOS 7.x as an example to configure the `docker` To configure Docker options: -1. Log into your host as a user with `sudo` or `root` privileges. +1. Log into your host as a user with `sudo` or `root` privileges. -2. Create the `/etc/systemd/system/docker.service.d` directory. +2. Create the `/etc/systemd/system/docker.service.d` directory. - ``` + ```bash $ sudo mkdir /etc/systemd/system/docker.service.d ``` -3. Create a `/etc/systemd/system/docker.service.d/docker.conf` file. +3. Create a `/etc/systemd/system/docker.service.d/docker.conf` file. -4. Open the file with your favorite editor. +4. Open the file with your favorite editor. - ``` + ```bash $ sudo vi /etc/systemd/system/docker.service.d/docker.conf ``` -5. Override the `ExecStart` configuration from your `docker.service` file to customize -the `docker` daemon. To modify the `ExecStart` configuration you have to specify -an empty configuration followed by a new one as follows: +5. Override the `ExecStart` configuration from your `docker.service` file to customize + the `docker` daemon. To modify the `ExecStart` configuration you have to specify + an empty configuration followed by a new one as follows: -``` -[Service] -ExecStart= -ExecStart=/usr/bin/dockerd -H fd:// -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376 -``` + ```bash + [Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H fd:// -D --tls=true --tlscert=/var/docker/server.pem --tlskey=/var/docker/serverkey.pem -H tcp://192.168.59.3:2376 + ``` -These options : + These options : -- Enable `-D` (debug) mode -- Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively -- Listen for connections on `tcp://192.168.59.3:2376` + - Enable `-D` (debug) mode + - Set `tls` to true with the server certificate and key specified using `--tlscert` and `--tlskey` respectively + - Listen for connections on `tcp://192.168.59.3:2376` -The command line reference has the [complete list of daemon flags](../reference/commandline/dockerd.md) -with explanations. + The command line reference has the [complete list of daemon flags](../reference/commandline/dockerd.md) + with explanations. -6. Save and close the file. +6. Save and close the file. -7. Flush changes. +7. Flush changes. - ``` + ```bash $ sudo systemctl daemon-reload ``` -8. Restart the `docker` daemon. +8. Restart the `docker` daemon. - ``` + ```bash $ sudo systemctl restart docker ``` -9. Verify that the `docker` daemon is running as specified with the `ps` command. +9. Verify that the `docker` daemon is running as specified with the `ps` command. - ``` + ```bash $ ps aux | grep docker | grep -v grep ``` @@ -267,17 +267,18 @@ with explanations. systemd has its own logging system called the journal. The logs for the `docker` daemon can be viewed using `journalctl -u docker` - - $ sudo journalctl -u docker - May 06 00:22:05 localhost.localdomain systemd[1]: Starting Docker Application Container Engine... - May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="+job serveapi(unix:///var/run/docker.sock)" - May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="Listening for HTTP on unix (/var/run/docker.sock)" - May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job init_networkdriver()" - May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)" - May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start." - May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done." - May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev" - May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()" - May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)" +```no-highlight +$ sudo journalctl -u docker +May 06 00:22:05 localhost.localdomain systemd[1]: Starting Docker Application Container Engine... +May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="+job serveapi(unix:///var/run/docker.sock)" +May 06 00:22:05 localhost.localdomain docker[2495]: time="2015-05-06T00:22:05Z" level="info" msg="Listening for HTTP on unix (/var/run/docker.sock)" +May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job init_networkdriver()" +May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job init_networkdriver() = OK (0)" +May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: start." +May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Loading containers: done." +May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="Docker daemon commit=1b09a95-unsupported graphdriver=aufs version=1.11.0-dev" +May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="+job acceptconnections()" +May 06 00:22:06 localhost.localdomain docker[2495]: time="2015-05-06T00:22:06Z" level="info" msg="-job acceptconnections() = OK (0)" +``` _Note: Using and configuring journal is an advanced topic and is beyond the scope of this article._ diff --git a/engine/admin/logging/fluentd.md b/engine/admin/logging/fluentd.md index 575fb0efe4e..cb56dd783e9 100644 --- a/engine/admin/logging/fluentd.md +++ b/engine/admin/logging/fluentd.md @@ -1,13 +1,15 @@ - +--- +aliases: +- /engine/reference/logging/fluentd/ +- /reference/logging/fluentd/ +description: Describes how to use the fluentd logging driver. +keywords: +- Fluentd, docker, logging, driver +menu: + main: + parent: smn_logging +title: Fluentd logging driver +--- # Fluentd logging driver diff --git a/engine/admin/logging/overview.md b/engine/admin/logging/overview.md index c90d881850a..6e9c317da99 100644 --- a/engine/admin/logging/overview.md +++ b/engine/admin/logging/overview.md @@ -1,15 +1,11 @@ - - +--- +aliases: +- /engine/reference/logging/overview/ +description: Configure logging driver. +keywords: +- docker, logging, driver, Fluentd +title: Configuring Logging Drivers +--- # Configure logging drivers @@ -46,9 +42,9 @@ attributes in the output, run the following command: ```bash $ dockerd \ - --log-driver=json-file \ - --log-opt labels=foo \ - --log-opt env=foo,fizz + --log-driver=json-file \ + --log-opt labels=foo \ + --log-opt env=foo,fizz ``` Then, run a container and specify values for the `labels` or `env`. For @@ -70,7 +66,7 @@ This adds additional fields to the log depending on the driver, e.g. for The following logging options are supported for the `json-file` logging driver: -```bash +```no-highlight --log-opt max-size=[0-9]+[kmg] --log-opt max-file=[0-9]+ --log-opt labels=label1,label2 @@ -93,7 +89,7 @@ from the newest log file. The following logging options are supported for the `syslog` logging driver: -```bash +```no-highlight --log-opt syslog-address=[tcp|udp|tcp+tls]://host:port --log-opt syslog-address=unix://path --log-opt syslog-address=unixgram://path @@ -186,7 +182,7 @@ driver, see [the journald logging driver](journald.md) reference documentation. The GELF logging driver supports the following options: -```bash +```no-highlight --log-opt gelf-address=udp://host:port --log-opt tag="database" --log-opt labels=label1,label2 @@ -202,9 +198,9 @@ must specify a `port` value. The following example shows how to connect the ```bash $ docker run -dit \ - --log-driver=gelf \ - --log-opt gelf-address=udp://192.168.0.42:12201 \ - alpine sh + --log-driver=gelf \ + --log-opt gelf-address=udp://192.168.0.42:12201 \ + alpine sh ``` By default, Docker uses the first 12 characters of the container ID to tag log @@ -214,12 +210,12 @@ customizing the log tag format. The `labels` and `env` options are supported by the gelf logging driver. It adds additional key on the `extra` fields, prefixed by an underscore (`_`). - - // […] - "_foo": "bar", - "_fizz": "buzz", - // […] - +```json +// […] +"_foo": "bar", +"_fizz": "buzz", +// […] +``` The `gelf-compression-type` option can be used to change how the GELF driver compresses each log message. The accepted values are `gzip`, `zlib` and `none`. `gzip` is chosen by default. @@ -279,7 +275,7 @@ logging driver](awslogs.md) reference documentation. The Splunk logging driver requires the following options: -```bash +```no-highlight --log-opt splunk-token= --log-opt splunk-url=https://your_splunk_instance:8088 ``` @@ -301,7 +297,7 @@ reference documentation. The Google Cloud Logging driver supports the following options: -```bash +```no-highlight --log-opt gcp-project= --log-opt labels=, --log-opt env=, diff --git a/engine/admin/logging/splunk.md b/engine/admin/logging/splunk.md index e081512d2c2..4635c7f9692 100644 --- a/engine/admin/logging/splunk.md +++ b/engine/admin/logging/splunk.md @@ -1,13 +1,11 @@ - +--- +aliases: +- /engine/reference/logging/splunk/ +description: Describes how to use the Splunk logging driver. +keywords: +- splunk, docker, logging, driver +title: Splunk logging driver +--- # Splunk logging driver @@ -60,16 +58,16 @@ specified using an HTTPS scheme. This is used for verification. The `SplunkServerDefaultCert` is automatically generated by Splunk certificates. docker run --log-driver=splunk \ - --log-opt splunk-token=176FCEBF-4CF5-4EDF-91BC-703796522D20 \ - --log-opt splunk-url=https://splunkhost:8088 \ - --log-opt splunk-capath=/path/to/cert/cacert.pem \ - --log-opt splunk-caname=SplunkServerDefaultCert - --log-opt tag="{{.Name}}/{{.FullID}}" - --log-opt labels=location - --log-opt env=TEST - --env "TEST=false" - --label location=west - your/application + --log-opt splunk-token=176FCEBF-4CF5-4EDF-91BC-703796522D20 \ + --log-opt splunk-url=https://splunkhost:8088 \ + --log-opt splunk-capath=/path/to/cert/cacert.pem \ + --log-opt splunk-caname=SplunkServerDefaultCert + --log-opt tag="{{.Name}}/{{.FullID}}" + --log-opt labels=location + --log-opt env=TEST + --env "TEST=false" + --label location=west + your/application ### Message formats diff --git a/engine/admin/menu.md b/engine/admin/menu.md deleted file mode 100644 index 103b67b7e76..00000000000 --- a/engine/admin/menu.md +++ /dev/null @@ -1,27 +0,0 @@ - - -# Admin Topics - -* [Configuring and running Docker](index.md) -* [Automatically start containers](host_integration.md) -* [Keep containers alive during daemon downtime](live-restore.md) -* [Control and configure Docker with systemd](systemd.md) -* [Format command and log output](formatting.md) -* [Run a local registry mirror](registry_mirror.md) -* [PowerShell DSC Usage](dsc.md) -* [Using Chef](chef.md) -* [Using Puppet](puppet.md) -* [Using Supervisor with Docker](using_supervisord.md) -* [Runtime metrics](runmetrics.md) -* [Link via an ambassador container](ambassador_pattern_linking.md) diff --git a/engine/admin/systemd.md b/engine/admin/systemd.md index 5dd9ec48220..4a8ea003b27 100644 --- a/engine/admin/systemd.md +++ b/engine/admin/systemd.md @@ -1,14 +1,11 @@ - +--- +aliases: +- /engine/articles/systemd/ +description: Controlling and configuring Docker using systemd +keywords: +- docker, daemon, systemd, configuration +title: Control and configure Docker with systemd +--- # Control and configure Docker with systemd @@ -18,17 +15,17 @@ shows a few examples of how to customize Docker's settings. ## Starting the Docker daemon Once Docker is installed, you will need to start the Docker daemon. - - $ sudo systemctl start docker - # or on older distributions, you may need to use - $ sudo service docker start - +```bash +$ sudo systemctl start docker +# or on older distributions, you may need to use +$ sudo service docker start +``` If you want Docker to start at boot, you should also: - - $ sudo systemctl enable docker - # or on older distributions, you may need to use - $ sudo chkconfig docker on - +```bash +$ sudo systemctl enable docker +# or on older distributions, you may need to use +$ sudo chkconfig docker on +``` ## Custom Docker daemon options There are a number of ways to configure the daemon flags and environment variables @@ -48,28 +45,38 @@ backwards compatibility, you drop a file with a `.conf` extension into the `/etc/systemd/system/docker.service.d` directory including the following: - [Service] - EnvironmentFile=-/etc/sysconfig/docker - EnvironmentFile=-/etc/sysconfig/docker-storage - EnvironmentFile=-/etc/sysconfig/docker-network - ExecStart= - ExecStart=/usr/bin/dockerd $OPTIONS \ - $DOCKER_STORAGE_OPTIONS \ - $DOCKER_NETWORK_OPTIONS \ - $BLOCK_REGISTRY \ - $INSECURE_REGISTRY +```conf +[Service] +EnvironmentFile=-/etc/sysconfig/docker +EnvironmentFile=-/etc/sysconfig/docker-storage +EnvironmentFile=-/etc/sysconfig/docker-network +ExecStart= +ExecStart=/usr/bin/dockerd $OPTIONS \ + $DOCKER_STORAGE_OPTIONS \ + $DOCKER_NETWORK_OPTIONS \ + $BLOCK_REGISTRY \ + $INSECURE_REGISTRY +``` To check if the `docker.service` uses an `EnvironmentFile`: - $ systemctl show docker | grep EnvironmentFile - EnvironmentFile=-/etc/sysconfig/docker (ignore_errors=yes) +```bash +$ systemctl show docker | grep EnvironmentFile + +EnvironmentFile=-/etc/sysconfig/docker (ignore_errors=yes) +``` Alternatively, find out where the service file is located: - $ systemctl show --property=FragmentPath docker - FragmentPath=/usr/lib/systemd/system/docker.service - $ grep EnvironmentFile /usr/lib/systemd/system/docker.service - EnvironmentFile=-/etc/sysconfig/docker +```bash +$ systemctl show --property=FragmentPath docker + +FragmentPath=/usr/lib/systemd/system/docker.service + +$ grep EnvironmentFile /usr/lib/systemd/system/docker.service + +EnvironmentFile=-/etc/sysconfig/docker +``` You can customize the Docker daemon options using override files as explained in the [HTTP Proxy example](#http-proxy) below. The files located in `/usr/lib/systemd/system` @@ -82,42 +89,46 @@ and volumes by moving it to a separate partition. In this example, we'll assume that your `docker.service` file looks something like: - [Unit] - Description=Docker Application Container Engine - Documentation=https://docs.docker.com - After=network.target - - [Service] - Type=notify - # the default is not to use systemd for cgroups because the delegate issues still - # exists and systemd currently does not support the cgroup feature set required - # for containers run by docker - ExecStart=/usr/bin/dockerd - ExecReload=/bin/kill -s HUP $MAINPID - # Having non-zero Limit*s causes performance problems due to accounting overhead - # in the kernel. We recommend using cgroups to do container-local accounting. - LimitNOFILE=infinity - LimitNPROC=infinity - LimitCORE=infinity - # Uncomment TasksMax if your systemd version supports it. - # Only systemd 226 and above support this version. - #TasksMax=infinity - TimeoutStartSec=0 - # set delegate yes so that systemd does not reset the cgroups of docker containers - Delegate=yes - # kill only the docker process, not all processes in the cgroup - KillMode=process - - [Install] - WantedBy=multi-user.target +```conf +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network.target + +[Service] +Type=notify +# the default is not to use systemd for cgroups because the delegate issues still +# exists and systemd currently does not support the cgroup feature set required +# for containers run by docker +ExecStart=/usr/bin/dockerd +ExecReload=/bin/kill -s HUP $MAINPID +# Having non-zero Limit*s causes performance problems due to accounting overhead +# in the kernel. We recommend using cgroups to do container-local accounting. +LimitNOFILE=infinity +LimitNPROC=infinity +LimitCORE=infinity +# Uncomment TasksMax if your systemd version supports it. +# Only systemd 226 and above support this version. +#TasksMax=infinity +TimeoutStartSec=0 +# set delegate yes so that systemd does not reset the cgroups of docker containers +Delegate=yes +# kill only the docker process, not all processes in the cgroup +KillMode=process + +[Install] +WantedBy=multi-user.target +``` This will allow us to add extra flags via a drop-in file (mentioned above) by placing a file containing the following in the `/etc/systemd/system/docker.service.d` directory: - [Service] - ExecStart= - ExecStart=/usr/bin/dockerd --graph="/mnt/docker-data" --storage-driver=overlay +```conf +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd --graph="/mnt/docker-data" --storage-driver=overlay +``` You can also set other environment variables in this file, for example, the `HTTP_PROXY` environment variables described below. @@ -125,13 +136,17 @@ You can also set other environment variables in this file, for example, the To modify the ExecStart configuration, specify an empty configuration followed by a new configuration as follows: - [Service] - ExecStart= - ExecStart=/usr/bin/dockerd --bip=172.17.42.1/16 +```conf +[Service] +ExecStart= +ExecStart=/usr/bin/dockerd --bip=172.17.42.1/16 +``` If you fail to specify an empty configuration, Docker reports an error such as: - docker.service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. Refusing. +```conf +docker.service has more than one ExecStart= setting, which is only allowed for Type=oneshot services. Refusing. +``` ### HTTP proxy @@ -140,33 +155,44 @@ This example overrides the default `docker.service` file. If you are behind an HTTP proxy server, for example in corporate settings, you will need to add this configuration in the Docker systemd service file. -First, create a systemd drop-in directory for the docker service: +1. Create a systemd drop-in directory for the docker service: - mkdir /etc/systemd/system/docker.service.d + ```bash + $ mkdir /etc/systemd/system/docker.service.d + ``` -Now create a file called `/etc/systemd/system/docker.service.d/http-proxy.conf` -that adds the `HTTP_PROXY` environment variable: +2. Create a file called `/etc/systemd/system/docker.service.d/http-proxy.conf` + that adds the `HTTP_PROXY` environment variable: + ```conf [Service] Environment="HTTP_PROXY=http://proxy.example.com:80/" + ``` -If you have internal Docker registries that you need to contact without -proxying you can specify them via the `NO_PROXY` environment variable: +3. If you have internal Docker registries that you need to contact without + proxying you can specify them via the `NO_PROXY` environment variable: + ```conf Environment="HTTP_PROXY=http://proxy.example.com:80/" "NO_PROXY=localhost,127.0.0.1,docker-registry.somecorporation.com" + ``` -Flush changes: +4. Flush changes: + ```bash $ sudo systemctl daemon-reload + ``` -Verify that the configuration has been loaded: +5. Verify that the configuration has been loaded: + ```bash $ systemctl show --property=Environment docker Environment=HTTP_PROXY=http://proxy.example.com:80/ + ``` +6. Restart Docker: -Restart Docker: - + ```bash $ sudo systemctl restart docker + ``` ## Manually creating the systemd unit files diff --git a/engine/breaking_changes.md b/engine/breaking_changes.md index ac3a8630571..bc79fa4d8ca 100644 --- a/engine/breaking_changes.md +++ b/engine/breaking_changes.md @@ -1,15 +1,12 @@ - +--- +aliases: +- /engine/misc/breaking/ +description: Breaking changes +keywords: +- docker, documentation, about, technology, breaking +- incompatibilities +title: Breaking changes +--- # Breaking changes and incompatibilities @@ -40,7 +37,7 @@ breaking change. Images pushed by Engine 1.10 to a Registry 2.3 cannot be pulled by digest by older Engine versions. A `docker pull` that encounters this situation returns the following error: -``` +```none Error response from daemon: unsupported schema version 2 for tag TAGNAME ``` diff --git a/engine/deprecated.md b/engine/deprecated.md index a591880ef06..dae2d9a2f4f 100644 --- a/engine/deprecated.md +++ b/engine/deprecated.md @@ -1,14 +1,11 @@ - +--- +aliases: +- /engine/misc/deprecated/ +description: Deprecated Features. +keywords: +- docker, documentation, about, technology, deprecate +title: Deprecated Engine Features +--- # Deprecated Engine Features @@ -36,7 +33,7 @@ The daemon is moved to a separate binary (`dockerd`), and should be used instead **Removed In Release: [v1.12.0](https://github.com/docker/docker/releases/tag/v1.12.0)** -The `docker import` command format 'file|URL|- [REPOSITORY [TAG]]' is deprecated since November 2013. It's no more supported. +The `docker import` command format `file|URL|- [REPOSITORY [TAG]]` is deprecated since November 2013. It's no more supported. ### `-h` shorthand for `--help` @@ -119,7 +116,11 @@ Log tags are now generated in a standard way across different logging drivers. Because of which, the driver specific log tag options `syslog-tag`, `gelf-tag` and `fluentd-tag` have been deprecated in favor of the generic `tag` option. - docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" +```bash +{% raw %} +$ docker --log-driver=syslog --log-opt tag="{{.ImageName}}/{{.Name}}/{{.ID}}" +{% endraw %} +``` ### LXC built-in exec driver **Deprecated In Release: [v1.8.0](https://github.com/docker/docker/releases/tag/v1.8.0)** @@ -135,47 +136,53 @@ The built-in LXC execution driver, the lxc-conf flag, and API fields have been r The flags `-d` and `--daemon` are deprecated in favor of the `daemon` subcommand: - docker daemon -H ... +```bash +$ docker daemon -H ... +``` The following single-dash (`-opt`) variant of certain command line options are deprecated and replaced with double-dash options (`--opt`): - docker attach -nostdin - docker attach -sig-proxy - docker build -no-cache - docker build -rm - docker commit -author - docker commit -run - docker events -since - docker history -notrunc - docker images -notrunc - docker inspect -format - docker ps -beforeId - docker ps -notrunc - docker ps -sinceId - docker rm -link - docker run -cidfile - docker run -dns - docker run -entrypoint - docker run -expose - docker run -link - docker run -lxc-conf - docker run -n - docker run -privileged - docker run -volumes-from - docker search -notrunc - docker search -stars - docker search -t - docker search -trusted - docker tag -force +```none +docker attach -nostdin +docker attach -sig-proxy +docker build -no-cache +docker build -rm +docker commit -author +docker commit -run +docker events -since +docker history -notrunc +docker images -notrunc +docker inspect -format +docker ps -beforeId +docker ps -notrunc +docker ps -sinceId +docker rm -link +docker run -cidfile +docker run -dns +docker run -entrypoint +docker run -expose +docker run -link +docker run -lxc-conf +docker run -n +docker run -privileged +docker run -volumes-from +docker search -notrunc +docker search -stars +docker search -t +docker search -trusted +docker tag -force +``` The following double-dash options are deprecated and have no replacement: - docker run --cpuset - docker run --networking - docker ps --since-id - docker ps --before-id - docker search --trusted +```none +docker run --cpuset +docker run --networking +docker ps --since-id +docker ps --before-id +docker search --trusted +``` **Deprecated In Release: [v1.5.0](https://github.com/docker/docker/releases/tag/v1.5.0)** @@ -183,17 +190,11 @@ The following double-dash options are deprecated and have no replacement: The single-dash (`-help`) was removed, in favor of the double-dash `--help` - docker -help - docker [COMMAND] -help - -### `--run` flag on docker commit - -**Deprecated In Release: [v0.10.0](https://github.com/docker/docker/releases/tag/v0.10.0)** - -**Removed In Release: [v1.13.0](https://github.com/docker/docker/releases/)** - -The flag `--run` of the docker commit (and its short version `-run`) were deprecated in favor -of the `--changes` flag that allows to pass `Dockerfile` commands. +```bash +$ docker -help + + docker [COMMAND] -help +``` ### Interacting with V1 registries diff --git a/engine/examples/apt-cacher-ng.md b/engine/examples/apt-cacher-ng.md index 7213e6d2d93..9e113bff571 100644 --- a/engine/examples/apt-cacher-ng.md +++ b/engine/examples/apt-cacher-ng.md @@ -13,7 +13,7 @@ parent = "engine_dockerize" > **Note**: > - **If you don't like sudo** then see [*Giving non-root > access*](../installation/binaries.md#giving-non-root-access). -> - **If you're using OS X or docker via TCP** then you shouldn't use +> - **If you're using macOS or docker via TCP** then you shouldn't use > sudo. When you have multiple Docker servers, or build unrelated Docker diff --git a/engine/examples/couchbase.md b/engine/examples/couchbase.md index 27607cb85dd..0acd080840e 100644 --- a/engine/examples/couchbase.md +++ b/engine/examples/couchbase.md @@ -1,12 +1,9 @@ - +--- +description: Dockerizing a Couchbase service +keywords: +- docker, example, package installation, networking, couchbase +title: Dockerizing a Couchbase service +--- # Dockerizing a Couchbase service @@ -18,18 +15,19 @@ Couchbase is an open source, document-oriented NoSQL database for modern web, mo Couchbase Docker images are published at [Docker Hub](https://hub.docker.com/_/couchbase/). -Start Couchbase server as: +Start Couchbase server: -``` -docker run -d --name db -p 8091-8093:8091-8093 -p 11210:11210 couchbase +```bash +$ docker run -d --name db -p 8091-8093:8091-8093 -p 11210:11210 couchbase ``` The purpose of each port exposed is explained at [Couchbase Developer Portal - Network Configuration](http://developer.couchbase.com/documentation/server/4.1/install/install-ports.html). -Logs can be seen as: +Logs can be seen using the `docker logs` command: + +```bash +$ docker logs db -``` -docker logs db Starting Couchbase Server -- Web UI available at http://:8091 ``` @@ -51,11 +49,13 @@ Data, Query and Index are three different services that can be configured on a C Memory needs to be configured for Data and Index service only. -``` -curl -v -X POST http://192.168.99.100:8091/pools/default -d memoryQuota=300 -d indexMemoryQuota=300 +```bash +$ curl -v -X POST http://192.168.99.100:8091/pools/default -d memoryQuota=300 -d indexMemoryQuota=300 + * Hostname was NOT found in DNS cache * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) + > POST /pools/default HTTP/1.1 > User-Agent: curl/7.37.1 > Host: 192.168.99.100:8091 @@ -82,11 +82,13 @@ The command shows an HTTP POST request to the REST endpoint `/pools/default`. Th All three services, or only one of them, can be configured on each instance. This allows different Couchbase instances to use affinities and setup services accordingly. For example, if Docker host is running a machine with solid-state drive then only Data service can be started. -``` -curl -v http://192.168.99.100:8091/node/controller/setupServices -d 'services=kv%2Cn1ql%2Cindex' +```bash +$ curl -v http://192.168.99.100:8091/node/controller/setupServices -d 'services=kv%2Cn1ql%2Cindex' + * Hostname was NOT found in DNS cache * Trying 192.168.99.100... * Connected to 192.168.99.100 (192.168.99.100) port 8091 (#0) + > POST /node/controller/setupServices HTTP/1.1 > User-Agent: curl/7.37.1 > Host: 192.168.99.100:8091 diff --git a/engine/examples/mongodb.md b/engine/examples/mongodb.md index 3173aa1b7ee..3d51f38ad88 100644 --- a/engine/examples/mongodb.md +++ b/engine/examples/mongodb.md @@ -1,12 +1,12 @@ - +--- +description: Creating a Docker image with MongoDB pre-installed using a Dockerfile + and sharing the image on Docker Hub +keywords: +- docker, dockerize, dockerizing, article, example, docker.io, platform, package, + installation, networking, mongodb, containers, images, image, sharing, dockerfile, + build, auto-building, framework +title: Dockerizing MongoDB +--- # Dockerizing MongoDB @@ -17,7 +17,7 @@ MongoDB pre-installed. We'll also see how to `push` that image to the [Docker Hub registry](https://hub.docker.com) and share it with others! > **Note:** This guide will show the mechanics of building a MongoDB container, but -> you will probably want to use the official image on [Docker Hub]( https://hub.docker.com/_/mongo/) +you will probably want to use the official image on [Docker Hub]( https://hub.docker.com/_/mongo/) Using Docker and containers for deploying [MongoDB](https://www.mongodb.org/) instances will bring several benefits, such as: @@ -26,38 +26,44 @@ instances will bring several benefits, such as: - Ready to run and start working within milliseconds; - Based on globally accessible and shareable images. -> **Note:** -> -> If you do **_not_** like `sudo`, you might want to check out: -> [*Giving non-root access*](../installation/binaries.md#giving-non-root-access). +> **Note:** If you do **_not_** like `sudo`, you might want to check out +[*Giving non-root access*](../installation/binaries.md#giving-non-root-access). ## Creating a Dockerfile for MongoDB Let's create our `Dockerfile` and start building it: - $ nano Dockerfile +```bash +$ nano Dockerfile +``` Although optional, it is handy to have comments at the beginning of a `Dockerfile` explaining its purpose: +```dockerfile # Dockerizing MongoDB: Dockerfile for building MongoDB images # Based on ubuntu:latest, installs MongoDB following the instructions from: # http://docs.mongodb.org/manual/tutorial/install-mongodb-on-ubuntu/ +``` > **Tip:** `Dockerfile`s are flexible. However, they need to follow a certain -> format. The first item to be defined is the name of an image, which becomes -> the *parent* of your *Dockerized MongoDB* image. +format. The first item to be defined is the name of an image, which becomes +the *parent* of your *Dockerized MongoDB* image. We will build our image using the latest version of Ubuntu from the [Docker Hub Ubuntu](https://hub.docker.com/_/ubuntu/) repository. - # Format: FROM repository[:version] - FROM ubuntu:latest +```dockerfile +# Format: FROM repository[:version] +FROM ubuntu:latest +``` Continuing, we will declare the `MAINTAINER` of the `Dockerfile`: - # Format: MAINTAINER Name - MAINTAINER M.Y. Name +```dockerfile +# Format: MAINTAINER Name +MAINTAINER M.Y. Name +``` > **Note:** Although Ubuntu systems have MongoDB packages, they are likely to > be outdated. Therefore in this example, we will use the official MongoDB @@ -66,42 +72,48 @@ Continuing, we will declare the `MAINTAINER` of the `Dockerfile`: We will begin with importing the MongoDB public GPG key. We will also create a MongoDB repository file for the package manager. - # Installation: - # Import MongoDB public GPG key AND create a MongoDB list file - RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 - RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list +```dockerfile +# Installation: +# Import MongoDB public GPG key AND create a MongoDB list file +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 +RUN echo "deb http://repo.mongodb.org/apt/ubuntu "$(lsb_release -sc)"/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list +``` After this initial preparation we can update our packages and install MongoDB. - # Update apt-get sources AND install MongoDB - RUN apt-get update && apt-get install -y mongodb-org +```dockerfile +# Update apt-get sources AND install MongoDB +RUN apt-get update && apt-get install -y mongodb-org +``` -> **Tip:** You can install a specific version of MongoDB by using a list -> of required packages with versions, e.g.: -> -> RUN apt-get update && apt-get install -y mongodb-org=3.0.1 mongodb-org-server=3.0.1 mongodb-org-shell=3.0.1 mongodb-org-mongos=3.0.1 mongodb-org-tools=3.0.1 +> **Tip:** You can install a specific version of MongoDB by using a list of required packages with versions, e.g.: +```dockerfile +RUN apt-get update && apt-get install -y mongodb-org=3.0.1 mongodb-org-server=3.0.1 mongodb-org-shell=3.0.1 mongodb-org-mongos=3.0.1 mongodb-org-tools=3.0.1 +``` MongoDB requires a data directory. Let's create it as the final step of our installation instructions. - # Create the MongoDB data directory - RUN mkdir -p /data/db +```dockerfile +# Create the MongoDB data directory +RUN mkdir -p /data/db +``` Lastly we set the `ENTRYPOINT` which will tell Docker to run `mongod` inside the containers launched from our MongoDB image. And for ports, we will use the `EXPOSE` instruction. - # Expose port 27017 from the container to the host - EXPOSE 27017 +```dockerfile +# Expose port 27017 from the container to the host +EXPOSE 27017 - # Set usr/bin/mongod as the dockerized entry-point application - ENTRYPOINT ["/usr/bin/mongod"] +# Set usr/bin/mongod as the dockerized entry-point application +ENTRYPOINT ["/usr/bin/mongod"] +``` Now save the file and let's build our image. -> **Note:** -> -> The full version of this `Dockerfile` can be found [here](https://github.com/docker/docker/blob/master/docs/examples/mongodb/Dockerfile). +> **Note:** The full version of this `Dockerfile` can be found [here](https://github.com/docker/docker/blob/master/docs/examples/mongodb/Dockerfile). ## Building the MongoDB Docker image @@ -109,10 +121,11 @@ With our `Dockerfile`, we can now build the MongoDB image using Docker. Unless experimenting, it is always a good practice to tag Docker images by passing the `--tag` option to `docker build` command. - # Format: docker build --tag/-t / . - # Example: - $ docker build --tag my/repo . - +```dockerfile +# Format: docker build --tag/-t / . +# Example: +$ docker build --tag my/repo . +``` Once this command is issued, Docker will go through the `Dockerfile` and build the image. The final image will be tagged `my/repo`. @@ -122,55 +135,66 @@ All Docker image repositories can be hosted and shared on [Docker Hub](https://hub.docker.com) with the `docker push` command. For this, you need to be logged-in. - # Log-in - $ docker login - Username: - .. +```bash +# Log-in +$ docker login + +Username: +.. +``` + +```bash +# Push the image +# Format: docker push / +$ docker push my/repo - # Push the image - # Format: docker push / - $ docker push my/repo - The push refers to a repository [my/repo] (len: 1) - Sending image list - Pushing repository my/repo (1 tags) - .. +The push refers to a repository [my/repo] (len: 1) +Sending image list +Pushing repository my/repo (1 tags) +.. +``` ## Using the MongoDB image Using the MongoDB image we created, we can run one or more MongoDB instances as daemon process(es). - # Basic way - # Usage: docker run --name -d / - $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo +```bash +# Basic way +# Usage: docker run --name -d / +$ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo - # Dockerized MongoDB, lean and mean! - # Usage: docker run --name -d / --noprealloc --smallfiles - $ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo --smallfiles +# Dockerized MongoDB, lean and mean! +# Usage: docker run --name -d / --noprealloc --smallfiles +$ docker run -p 27017:27017 --name mongo_instance_001 -d my/repo --smallfiles - # Checking out the logs of a MongoDB container - # Usage: docker logs - $ docker logs mongo_instance_001 +# Checking out the logs of a MongoDB container +# Usage: docker logs +$ docker logs mongo_instance_001 - # Playing with MongoDB - # Usage: mongo --port - $ mongo --port 27017 +# Playing with MongoDB +# Usage: mongo --port +$ mongo --port 27017 - # If using docker-machine - # Usage: mongo --port --host - $ mongo --port 27017 --host 192.168.59.103 +# If using docker-machine +# Usage: mongo --port --host +$ mongo --port 27017 --host 192.168.59.103 +``` -> **Tip:** -If you want to run two containers on the same engine, then you will need to map +> **Tip:** If you want to run two containers on the same engine, then you will need to map the exposed port to two different ports on the host - # Start two containers and map the ports - $ docker run -p 28001:27017 --name mongo_instance_001 -d my/repo - $ docker run -p 28002:27017 --name mongo_instance_002 -d my/repo +```bash +# Start two containers and map the ports +$ docker run -p 28001:27017 --name mongo_instance_001 -d my/repo + +$ docker run -p 28002:27017 --name mongo_instance_002 -d my/repo + +# Now you can connect to each MongoDB instance on the two ports +$ mongo --port 28001 - # Now you can connect to each MongoDB instance on the two ports - $ mongo --port 28001 - $ mongo --port 28002 +$ mongo --port 28002 +``` - [Linking containers](../userguide/networking/default_network/dockerlinks.md) - [Cross-host linking containers](../admin/ambassador_pattern_linking.md) diff --git a/engine/examples/postgresql_service.md b/engine/examples/postgresql_service.md index 8d5f675260d..5f49ba10c7a 100644 --- a/engine/examples/postgresql_service.md +++ b/engine/examples/postgresql_service.md @@ -1,12 +1,9 @@ - +--- +description: Running and installing a PostgreSQL service +keywords: +- docker, example, package installation, postgresql +title: Dockerizing PostgreSQL +--- # Dockerizing PostgreSQL @@ -22,75 +19,80 @@ Hub](http://hub.docker.com), you can create one yourself. Start by creating a new `Dockerfile`: > **Note**: -> This PostgreSQL setup is for development-only purposes. Refer to the -> PostgreSQL documentation to fine-tune these settings so that it is -> suitably secure. +This PostgreSQL setup is for development-only purposes. Refer to the +PostgreSQL documentation to fine-tune these settings so that it is +suitably secure. - # - # example Dockerfile for https://docs.docker.com/examples/postgresql_service/ - # +```dockerfile +# +# example Dockerfile for https://docs.docker.com/examples/postgresql_service/ +# - FROM ubuntu - MAINTAINER SvenDowideit@docker.com +FROM ubuntu +MAINTAINER SvenDowideit@docker.com - # Add the PostgreSQL PGP key to verify their Debian packages. - # It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc - RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 +# Add the PostgreSQL PGP key to verify their Debian packages. +# It should be the same key as https://www.postgresql.org/media/keys/ACCC4CF8.asc +RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys B97B0AFCAA1A47F044F244A07FCC7D46ACCC4CF8 - # Add PostgreSQL's repository. It contains the most recent stable release - # of PostgreSQL, ``9.3``. - RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list +# Add PostgreSQL's repository. It contains the most recent stable release +# of PostgreSQL, ``9.3``. +RUN echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main" > /etc/apt/sources.list.d/pgdg.list - # Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 - # There are some warnings (in red) that show up during the build. You can hide - # them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive - RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 +# Install ``python-software-properties``, ``software-properties-common`` and PostgreSQL 9.3 +# There are some warnings (in red) that show up during the build. You can hide +# them by prefixing each apt-get statement with DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y python-software-properties software-properties-common postgresql-9.3 postgresql-client-9.3 postgresql-contrib-9.3 - # Note: The official Debian and Ubuntu images automatically ``apt-get clean`` - # after each ``apt-get`` +# Note: The official Debian and Ubuntu images automatically ``apt-get clean`` +# after each ``apt-get`` - # Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` - USER postgres +# Run the rest of the commands as the ``postgres`` user created by the ``postgres-9.3`` package when it was ``apt-get installed`` +USER postgres - # Create a PostgreSQL role named ``docker`` with ``docker`` as the password and - # then create a database `docker` owned by the ``docker`` role. - # Note: here we use ``&&\`` to run commands one after the other - the ``\`` - # allows the RUN command to span multiple lines. - RUN /etc/init.d/postgresql start &&\ - psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\ - createdb -O docker docker +# Create a PostgreSQL role named ``docker`` with ``docker`` as the password and +# then create a database `docker` owned by the ``docker`` role. +# Note: here we use ``&&\`` to run commands one after the other - the ``\`` +# allows the RUN command to span multiple lines. +RUN /etc/init.d/postgresql start &&\ + psql --command "CREATE USER docker WITH SUPERUSER PASSWORD 'docker';" &&\ + createdb -O docker docker - # Adjust PostgreSQL configuration so that remote connections to the - # database are possible. - RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf +# Adjust PostgreSQL configuration so that remote connections to the +# database are possible. +RUN echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/9.3/main/pg_hba.conf - # And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf`` - RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf +# And add ``listen_addresses`` to ``/etc/postgresql/9.3/main/postgresql.conf`` +RUN echo "listen_addresses='*'" >> /etc/postgresql/9.3/main/postgresql.conf - # Expose the PostgreSQL port - EXPOSE 5432 +# Expose the PostgreSQL port +EXPOSE 5432 - # Add VOLUMEs to allow backup of config, logs and databases - VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] +# Add VOLUMEs to allow backup of config, logs and databases +VOLUME ["/etc/postgresql", "/var/log/postgresql", "/var/lib/postgresql"] - # Set the default command to run when starting the container - CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] +# Set the default command to run when starting the container +CMD ["/usr/lib/postgresql/9.3/bin/postgres", "-D", "/var/lib/postgresql/9.3/main", "-c", "config_file=/etc/postgresql/9.3/main/postgresql.conf"] +``` Build an image from the Dockerfile assign it a name. - $ docker build -t eg_postgresql . +```bash +$ docker build -t eg_postgresql . +``` -And run the PostgreSQL server container (in the foreground): +Run the PostgreSQL server container (in the foreground): - $ docker run --rm -P --name pg_test eg_postgresql +```bash +$ docker run --rm -P --name pg_test eg_postgresql +``` There are 2 ways to connect to the PostgreSQL server. We can use [*Link Containers*](../userguide/networking/default_network/dockerlinks.md), or we can access it from our host (or the network). -> **Note**: -> The `--rm` removes the container and its image when -> the container exits successfully. +> **Note**: The `--rm` removes the container and its image when +the container exits successfully. ### Using container linking @@ -99,9 +101,11 @@ Containers can be linked to another container's ports directly using `docker run`. This will set a number of environment variables that can then be used to connect: - $ docker run --rm -t -i --link pg_test:pg eg_postgresql bash +```bash +$ docker run --rm -t -i --link pg_test:pg eg_postgresql bash - postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password +postgres@7ef98b1b7243:/$ psql -h $PG_PORT_5432_TCP_ADDR -p $PG_PORT_5432_TCP_PORT -d docker -U docker --password +``` ### Connecting from your host system @@ -110,44 +114,52 @@ host-mapped port to test as well. You need to use `docker ps` to find out what local host port the container is mapped to first: - $ docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test - $ psql -h localhost -p 49153 -d docker -U docker --password +```bash +$ docker ps + +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +5e24362f27f6 eg_postgresql:latest /usr/lib/postgresql/ About an hour ago Up About an hour 0.0.0.0:49153->5432/tcp pg_test + +$ psql -h localhost -p 49153 -d docker -U docker --password +``` ### Testing the database Once you have authenticated and have a `docker =#` prompt, you can create a table and populate it. - psql (9.3.1) - Type "help" for help. - - $ docker=# CREATE TABLE cities ( - docker(# name varchar(80), - docker(# location point - docker(# ); - CREATE TABLE - $ docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); - INSERT 0 1 - $ docker=# select * from cities; - name | location - ---------------+----------- - San Francisco | (-194,53) - (1 row) +```sql +psql (9.3.1) +Type "help" for help. + +$ docker=# CREATE TABLE cities ( +docker(# name varchar(80), +docker(# location point +docker(# ); +CREATE TABLE +$ docker=# INSERT INTO cities VALUES ('San Francisco', '(-194.0, 53.0)'); +INSERT 0 1 +$ docker=# select * from cities; + name | location +---------------+----------- + San Francisco | (-194,53) +(1 row) +``` ### Using the container volumes You can use the defined volumes to inspect the PostgreSQL log files and to backup your configuration and data: - $ docker run --rm --volumes-from pg_test -t -i busybox sh - - / # ls - bin etc lib linuxrc mnt proc run sys usr - dev home lib64 media opt root sbin tmp var - / # ls /etc/postgresql/9.3/main/ - environment pg_hba.conf postgresql.conf - pg_ctl.conf pg_ident.conf start.conf - /tmp # ls /var/log - ldconfig postgresql +```bash +$ docker run --rm --volumes-from pg_test -t -i busybox sh + +/ # ls +bin etc lib linuxrc mnt proc run sys usr +dev home lib64 media opt root sbin tmp var +/ # ls /etc/postgresql/9.3/main/ +environment pg_hba.conf postgresql.conf +pg_ctl.conf pg_ident.conf start.conf +/tmp # ls /var/log +ldconfig postgresql +``` diff --git a/engine/examples/running_redis_service.md b/engine/examples/running_redis_service.md index 66d852206e8..e5ad0209cd3 100644 --- a/engine/examples/running_redis_service.md +++ b/engine/examples/running_redis_service.md @@ -1,12 +1,9 @@ - +--- +description: Installing and running a redis service +keywords: +- docker, example, package installation, networking, redis +title: Dockerizing a Redis service +--- # Dockerizing a Redis service @@ -18,15 +15,19 @@ using a link. Firstly, we create a `Dockerfile` for our new Redis image. - FROM ubuntu:14.04 - RUN apt-get update && apt-get install -y redis-server - EXPOSE 6379 - ENTRYPOINT ["/usr/bin/redis-server"] +```dockerfile +FROM ubuntu:14.04 +RUN apt-get update && apt-get install -y redis-server +EXPOSE 6379 +ENTRYPOINT ["/usr/bin/redis-server"] +``` Next we build an image from our `Dockerfile`. Replace `` with your own user name. - $ docker build -t /redis . +```bash +$ docker build -t /redis . +``` ## Run the service @@ -39,7 +40,9 @@ Importantly, we're not exposing any ports on our container. Instead we're going to use a container link to provide access to our Redis database. - $ docker run --name redis -d /redis +```bash +$ docker run --name redis -d /redis +``` ## Create your web application container @@ -49,41 +52,53 @@ created with an alias of `db`. This will create a secure tunnel to the `redis` container and expose the Redis instance running inside that container to only this container. - $ docker run --link redis:db -i -t ubuntu:14.04 /bin/bash +```bash +$ docker run --link redis:db -i -t ubuntu:14.04 /bin/bash +``` Once inside our freshly created container we need to install Redis to get the `redis-cli` binary to test our connection. - $ sudo apt-get update - $ sudo apt-get install redis-server - $ sudo service redis-server stop +```bash +$ sudo apt-get update +$ sudo apt-get install redis-server +$ sudo service redis-server stop +``` As we've used the `--link redis:db` option, Docker has created some environment variables in our web application container. - $ env | grep DB_ +```bash +$ env | grep DB_ - # Should return something similar to this with your values - DB_NAME=/violet_wolf/db - DB_PORT_6379_TCP_PORT=6379 - DB_PORT=tcp://172.17.0.33:6379 - DB_PORT_6379_TCP=tcp://172.17.0.33:6379 - DB_PORT_6379_TCP_ADDR=172.17.0.33 - DB_PORT_6379_TCP_PROTO=tcp +# Should return something similar to this with your values +DB_NAME=/violet_wolf/db +DB_PORT_6379_TCP_PORT=6379 +DB_PORT=tcp://172.17.0.33:6379 +DB_PORT_6379_TCP=tcp://172.17.0.33:6379 +DB_PORT_6379_TCP_ADDR=172.17.0.33 +DB_PORT_6379_TCP_PROTO=tcp +``` We can see that we've got a small list of environment variables prefixed with `DB`. The `DB` comes from the link alias specified when we launched the container. Let's use the `DB_PORT_6379_TCP_ADDR` variable to connect to our Redis container. - $ redis-cli -h $DB_PORT_6379_TCP_ADDR - $ redis 172.17.0.33:6379> - $ redis 172.17.0.33:6379> set docker awesome - OK - $ redis 172.17.0.33:6379> get docker - "awesome" - $ redis 172.17.0.33:6379> exit +```bash +$ redis-cli -h $DB_PORT_6379_TCP_ADDR +$ redis 172.17.0.33:6379> +$ redis 172.17.0.33:6379> set docker awesome + +OK + +$ redis 172.17.0.33:6379> get docker + +"awesome" + +$ redis 172.17.0.33:6379> exit +``` We could easily use this or other environment variables in our web application to make a connection to our `redis` container. diff --git a/engine/extend/index.md b/engine/extend/index.md index 4ce1eac1c54..77552aa7b6c 100644 --- a/engine/extend/index.md +++ b/engine/extend/index.md @@ -68,7 +68,7 @@ to create a volume. The plugin requests 2 privileges, the `CAP_SYS_ADMIN` capability to be able to do mount inside the plugin and `host networking`. -2. Check for a value of `true` the `ENABLED` column to verify the plugin +2. Check for a value of `true` the `ENABLED` column to verify the plugin started without error. ```bash @@ -78,7 +78,7 @@ started without error. vieux/sshfs latest true ``` -3. Create a volume using the plugin. +3. Create a volume using the plugin. ```bash $ docker volume create \ @@ -97,7 +97,7 @@ started without error. ``` -5. Verify the plugin successfully created the volume. +5. Verify the plugin successfully created the volume. ```bash $ docker volume ls diff --git a/engine/extend/menu.md b/engine/extend/menu.md deleted file mode 100644 index 4b9955038a8..00000000000 --- a/engine/extend/menu.md +++ /dev/null @@ -1,15 +0,0 @@ - - - - diff --git a/engine/extend/plugins_volume.md b/engine/extend/plugins_volume.md index f9c93a3de11..077f983ba94 100644 --- a/engine/extend/plugins_volume.md +++ b/engine/extend/plugins_volume.md @@ -34,14 +34,14 @@ beyond the lifetime of a single Engine host. See the ## Command-line changes -A volume plugin makes use of the `-v`and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example: +A volume plugin makes use of the `-v` and `--volume-driver` flag on the `docker run` command. The `-v` flag accepts a volume name and the `--volume-driver` flag a driver type, for example: $ docker run -ti -v volumename:/data --volume-driver=flocker busybox sh This command passes the `volumename` through to the volume plugin as a user-given name for the volume. The `volumename` must not begin with a `/`. -By having the user specify a `volumename`, a plugin can associate the volume +By having the user specify a `volumename`, a plugin can associate the volume with an external volume beyond the lifetime of a single container or container host. This can be used, for example, to move a stateful container from one server to another. @@ -79,7 +79,7 @@ containers. ``` Instruct the plugin that the user wants to create a volume, given a user -specified volume name. The plugin does not need to actually manifest the +specified volume name. The plugin does not need to actually manifest the volume on the filesystem yet (until Mount is called). Opts is a map of driver specific options passed through from the user request. @@ -174,7 +174,7 @@ however the plugin may be queried again later if one is not provided. ``` Indication that Docker no longer is using the named volume. This is called once -per container stop. Plugin may deduce that it is safe to deprovision it at +per container stop. Plugin may deduce that it is safe to deprovision it at this point. `ID` is a unique ID for the caller that is requesting the mount. diff --git a/engine/faq.md b/engine/faq.md index b017557cb5d..8f97aabe3d3 100644 --- a/engine/faq.md +++ b/engine/faq.md @@ -29,18 +29,22 @@ We are using the Apache License Version 2.0, see it here: [https://github.com/docker/docker/blob/master/LICENSE]( https://github.com/docker/docker/blob/master/LICENSE) -### Does Docker run on Mac OS X or Windows? +### Does Docker run on macOS or Windows? -Docker Engine currently runs only on Linux, but you can use VirtualBox to run -Engine in a virtual machine on your box, and get the best of both worlds. Check -out the [*Mac OS X*](installation/mac.md) and [*Microsoft -Windows*](installation/windows.md) installation guides. The small Linux -distribution boot2docker can be set up using the Docker Machine tool to be run -inside virtual machines on these two operating systems. +The Docker Engine client runs natively on Linux, macOS, and Windows. By default, these +clients connect to a local Docker daemon running in a virtual environment managed +by Docker, which provides the required features to run Linux-based containers within +OS X or Windows, or Windows-based containers on Windows. ->**Note:** if you are using a remote Docker Engine daemon on a VM through Docker ->Machine, then _do not_ type the `sudo` before the `docker` commands shown in ->the documentation's examples. +If your version of macOS or Windows does not include the required virtualization +technology, you can use Docker Machine to work around these limitations. + +You can run Windows-based **containers** on Windows Server 2016 and +Windows 10. Windows-based containers require a Windows kernel to run, in the same +way that Linux-based containers require a Linux kernel to run. You can even run +Windows-based containers on a Windows virtual machine running on an macOS or Linux +host. Docker Machine is not necessary if you run macOS 10.10.3 Yosemite, Windows +Server 2016, or Windows 10. ### How do containers compare to virtual machines? @@ -155,14 +159,14 @@ the container will continue to as well. You can see a more substantial example Linux: - - Ubuntu 12.04, 13.04 et al - - Fedora 19/20+ - - RHEL 6.5+ - - CentOS 6+ - - Gentoo - - ArchLinux - - openSUSE 12.3+ - - CRUX 3.0+ + - Any distribution running version 3.10+ of the Linux kernel + - Specific instructions are available for most Linux distributions, including + [RHEL](installation/linux/rhel.md), [Ubuntu](installation/linux/ubuntulinux.md), + [SuSE](installation/linux/suse.md), and many others. + +Microsoft Windows: + - Windows Server 2016 + - Windows 10 Cloud: diff --git a/engine/getstarted/menu.md b/engine/getstarted/menu.md deleted file mode 100644 index f447c667e5e..00000000000 --- a/engine/getstarted/menu.md +++ /dev/null @@ -1,16 +0,0 @@ - - -# Get Started with Docker diff --git a/engine/getstarted/step_four.md b/engine/getstarted/step_four.md index 81b1194a0d1..49d0e6648fd 100644 --- a/engine/getstarted/step_four.md +++ b/engine/getstarted/step_four.md @@ -51,7 +51,7 @@ commands to run. Your recipe is going to be very short. The command appears to do nothing but it actually creates the Dockerfile in the current directory. Just type `ls Dockerfile` to see it. - $ ls Dockerfile + $ ls Dockerfile 5. Open the `Dockerfile` in a visual text editor like Atom or Sublime, or a text based editor like `vi`, or `nano` (https://www.nano-editor.org/). diff --git a/engine/getstarted/step_one.md b/engine/getstarted/step_one.md index 81821617a60..ed200e2d676 100644 --- a/engine/getstarted/step_one.md +++ b/engine/getstarted/step_one.md @@ -33,7 +33,7 @@ Docker for Mac is our newest offering for the Mac. It runs as a native Mac appli - Mac must be a 2010 or newer model, with Intel's hardware support for memory management unit (MMU) virtualization; i.e., Extended Page Tables (EPT) -- OS X 10.10.3 Yosemite or newer +- macOS 10.10.3 Yosemite or newer - At least 4GB of RAM diff --git a/engine/getstarted/step_three.md b/engine/getstarted/step_three.md index aaf77ef5085..077c931c7d5 100644 --- a/engine/getstarted/step_three.md +++ b/engine/getstarted/step_three.md @@ -30,15 +30,11 @@ image you'll use in the rest of this getting started. The Docker Hub contains images from individuals like you and official images from organizations like RedHat, IBM, Google, and a whole lot more. -2. Click **Browse & Search**. - - The browser opens the search page. - -3. Enter the word `whalesay` in the search bar. +2. Enter the word `whalesay` in the search bar. ![Browse Docker Hub](tutimg/image_found.png) -4. Click on the **docker/whalesay** image in the results. +3. Click on the **docker/whalesay** image in the results. The browser displays the repository for the **whalesay** image. diff --git a/engine/getstarted/step_two.md b/engine/getstarted/step_two.md index 820f9328ef1..49ccc5b62eb 100644 --- a/engine/getstarted/step_two.md +++ b/engine/getstarted/step_two.md @@ -31,7 +31,7 @@ When you ran the command, Docker Engine: * downloaded the image from the Docker Hub (more about the hub later) * loaded the image into the container and "ran" it -Depending on how it was built, an image might run a simple, single command and then exit. This is what `Hello-World` did. +Depending on how it was built, an image might run a simple, single command and then exit. This is what `hello-world` did. A Docker image, though, is capable of much more. An image can start software as complex as a database, wait for you (or someone else) to add data, store the data for later use, and then wait for the next person. diff --git a/engine/installation/binaries.md b/engine/installation/binaries.md index c9bff3844bc..957debf7d68 100644 --- a/engine/installation/binaries.md +++ b/engine/installation/binaries.md @@ -60,7 +60,7 @@ minor version will ensure critical kernel bugs get fixed. > newer kernels. Note that Docker also has a client mode, which can run on virtually any -Linux kernel (it even builds on OS X!). +Linux kernel (it even builds on macOS!). ## Enable AppArmor and SELinux when possible @@ -171,14 +171,14 @@ For additional information about running the Engine in daemon mode, refer to the [daemon command](../reference/commandline/dockerd.md) in the Engine command line reference. -### Get the Mac OS X binary +### Get the macOS binary -The Mac OS X binary is only a client. You cannot use it to run the `docker` -daemon. To download the latest version for Mac OS X, use the following URLs: +The macOS binary is only a client. You cannot use it to run the `docker` +daemon. To download the latest version for macOS, use the following URLs: https://get.docker.com/builds/Darwin/x86_64/docker-latest.tgz -To download a specific version for Mac OS X, use the +To download a specific version for macOS, use the following URL pattern: https://get.docker.com/builds/Darwin/x86_64/docker-.tgz diff --git a/engine/installation/cloud/cloud-ex-machine-ocean.md b/engine/installation/cloud/cloud-ex-machine-ocean.md index ddcc9562816..fea5babbacc 100644 --- a/engine/installation/cloud/cloud-ex-machine-ocean.md +++ b/engine/installation/cloud/cloud-ex-machine-ocean.md @@ -48,7 +48,7 @@ To generate your access token: 1. If you have not done so already, install Docker Machine on your local host. - * How to install Docker Machine on Mac OS X + * How to install Docker Machine on macOS * How to install Docker Machine on Windows diff --git a/engine/installation/index.md b/engine/installation/index.md index 3d796ea2249..e8088aa4064 100644 --- a/engine/installation/index.md +++ b/engine/installation/index.md @@ -1,19 +1,22 @@ - +--- +aliases: +- /installation/ +- /engine/installation/linux/frugalware/ +- /engine/installation/frugalware/ +description: Lists the installation methods +keywords: +- 'Docker install ' +menu: + main: + identifier: engine_install + parent: engine_use + weight: "-81" +title: Install +--- # Install Docker Engine -Docker Engine is supported on Linux, Cloud, Windows, and OS X. Installation instructions are available for the following: +Docker Engine is supported on Linux, Cloud, Windows, and macOS. Installation instructions are available for the following: ## On Linux * [Arch Linux](linux/archlinux.md) @@ -34,8 +37,8 @@ If your linux distribution is not listed above, don't give up yet. To try out Do * [Example: Manual install on a cloud provider](cloud/cloud-ex-aws.md) * [Example: Use Docker Machine to provision cloud hosts](cloud/cloud-ex-machine-ocean.md) -## On OSX and Windows -* [Mac OS X](mac.md) +## On macOS and Windows +* [macOS](mac.md) * [Windows](windows.md) ## The Docker Archives diff --git a/engine/installation/linux/archlinux.md b/engine/installation/linux/archlinux.md index b62b21c674f..cd4a9e2a0a8 100644 --- a/engine/installation/linux/archlinux.md +++ b/engine/installation/linux/archlinux.md @@ -86,6 +86,7 @@ IPForward=kernel ``` This configuration allows IP forwarding from the container as expected. + ## Uninstallation To uninstall the Docker package: diff --git a/engine/installation/linux/centos.md b/engine/installation/linux/centos.md index 5239ac7670c..4b36ee020be 100644 --- a/engine/installation/linux/centos.md +++ b/engine/installation/linux/centos.md @@ -153,7 +153,7 @@ learn how to [customize your Systemd Docker daemon options](../../admin/systemd. 6. Verify `docker` is installed correctly by running a test image in a container. ```bash - $ sudo docker run hello-world + $ sudo docker run --rm hello-world ``` If you need to add an HTTP Proxy, set a different directory or partition for the @@ -197,7 +197,7 @@ To create the `docker` group and add your user: 5. Verify that your user is in the docker group by running `docker` without `sudo`. ```bash - $ docker run hello-world + $ docker run --rm hello-world ``` ## Start the docker daemon at boot diff --git a/engine/installation/linux/rhel.md b/engine/installation/linux/rhel.md index 1122c32494a..60c2085bbf7 100644 --- a/engine/installation/linux/rhel.md +++ b/engine/installation/linux/rhel.md @@ -1,14 +1,16 @@ - +--- +aliases: +- /engine/installation/rhel/ +- /installation/rhel/ +description: Instructions for installing Docker on Red Hat Enterprise Linux. +keywords: +- Docker, Docker documentation, requirements, linux, rhel +menu: + main: + parent: engine_linux + weight: -5 +title: Installation on Red Hat Enterprise Linux +--- # Red Hat Enterprise Linux diff --git a/engine/installation/linux/ubuntulinux.md b/engine/installation/linux/ubuntulinux.md index ca38e8c9585..294515e59fd 100644 --- a/engine/installation/linux/ubuntulinux.md +++ b/engine/installation/linux/ubuntulinux.md @@ -1,14 +1,16 @@ - +--- +aliases: +- /engine/installation/ubuntulinux/ +- /installation/ubuntulinux/ +description: 'Instructions for installing Docker on Ubuntu. ' +keywords: +- Docker, Docker documentation, requirements, apt, installation, ubuntu +menu: + main: + parent: engine_linux + weight: -6 +title: 'Installation on Ubuntu ' +--- # Ubuntu @@ -20,12 +22,12 @@ Docker is supported on these Ubuntu operating systems: - Ubuntu Precise 12.04 (LTS) This page instructs you to install using Docker-managed release packages and -installation mechanisms. Using these packages ensures you get the latest release -of Docker. If you wish to install using Ubuntu-managed packages, consult your -Ubuntu documentation. +installation mechanisms. Using these packages ensures you get the latest official +release of Docker. If you are required to install using Ubuntu-managed packages, +consult the Ubuntu documentation. >**Note**: Ubuntu Utopic 14.10 and 15.04 exist in Docker's `APT` repository but -> are no longer officially supported. +are no longer officially supported. ## Prerequisites @@ -34,7 +36,7 @@ Additionally, your kernel must be 3.10 at minimum. The latest 3.10 minor version or a newer maintained version are also acceptable. Kernels older than 3.10 lack some of the features required to run Docker -containers. These older versions are known to have bugs which cause data loss +containers. These older versions have known bugs which cause data loss and frequently panic under certain conditions. To check your current kernel version, open a terminal and use `uname -r` to @@ -49,68 +51,83 @@ your `APT` sources to the new Docker repository. ### Update your apt sources Docker's `APT` repository contains Docker 1.7.1 and higher. To set `APT` to use -packages from the new repository: - -1. Log into your machine as a user with `sudo` or `root` privileges. - -2. Open a terminal window. - -3. Update package information, ensure that APT works with the `https` method, and that CA certificates are installed. - - $ sudo apt-get update - $ sudo apt-get install apt-transport-https ca-certificates - -4. Add the new `GPG` key. - - $ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D - -5. Open the `/etc/apt/sources.list.d/docker.list` file in your favorite editor. - - If the file doesn't exist, create it. - -6. Remove any existing entries. - -7. Add an entry for your Ubuntu operating system. - - The possible entries are: - - - On Ubuntu Precise 12.04 (LTS) - - deb https://apt.dockerproject.org/repo ubuntu-precise main - - - On Ubuntu Trusty 14.04 (LTS) - - deb https://apt.dockerproject.org/repo ubuntu-trusty main - - - Ubuntu Wily 15.10 - - deb https://apt.dockerproject.org/repo ubuntu-wily main - - - Ubuntu Xenial 16.04 (LTS) - - deb https://apt.dockerproject.org/repo ubuntu-xenial main - - > **Note**: Docker does not provide packages for all architectures. You can find - > nightly built binaries in https://master.dockerproject.org. To install docker on - > a multi-architecture system, add an `[arch=...]` clause to the entry. Refer to the - > [Debian Multiarch wiki](https://wiki.debian.org/Multiarch/HOWTO#Setting_up_apt_sources) - > for details. - -8. Save and close the `/etc/apt/sources.list.d/docker.list` file. - -9. Update the `APT` package index. - - $ sudo apt-get update - -10. Purge the old repo if it exists. - - $ sudo apt-get purge lxc-docker - -11. Verify that `APT` is pulling from the right repository. - - $ apt-cache policy docker-engine - - From now on when you run `apt-get upgrade`, `APT` pulls from the new repository. +packages from the Docker repository: + +1. Log into your machine as a user with `sudo` or `root` privileges. + +2. Open a terminal window. + +3. Update package information, ensure that APT works with the `https` method, and that CA certificates are installed. + + ```bash + $ sudo apt-get update + $ sudo apt-get install apt-transport-https ca-certificates + ``` +4. Add the new `GPG` key. + + ```bash + $ sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D + ``` + +5. Find the entry for your Ubuntu operating system. + + The entry determines where APT will search for packages. The possible entries + are: + + | Ubuntu version | Repository | + | ------------------- | ----------------------------------------------------------- | + | Precise 12.04 (LTS) | `deb https://apt.dockerproject.org/repo ubuntu-precise main`| + | Trusty 14.04 (LTS) | `deb https://apt.dockerproject.org/repo ubuntu-trusty main` | + | Xenial 16.04 (LTS) | `deb https://apt.dockerproject.org/repo ubuntu-xenial main` | + + + >**Note**: Docker does not provide packages for all architectures. Binary artifacts + are built nightly, and you can download them from + https://master.dockerproject.org. To install docker on a multi-architecture + system, add an `[arch=...]` clause to the entry. Refer to + [Debian Multiarch wiki](https://wiki.debian.org/Multiarch/HOWTO#Setting_up_apt_sources) + for details. + +6. Run the following command, substituting the entry for your operating system + for the placeholder ``. + + ```bash + $ echo "" | sudo tee /etc/apt/sources.list.d/docker.list + ``` + +7. Update the `APT` package index. + + ```bash + $ sudo apt-get update + ``` + +8. Verify that `APT` is pulling from the right repository. + + When you run the following command, an entry is returned for each version of + Docker that is available for you to install. Each entry should have the URL + `https://apt.dockerproject.org/repo/`. The version currently installed is + marked with `***`.The output below is truncated. + + ```bash + $ apt-cache policy docker-engine + + docker-engine: + Installed: 1.12.2-0~trusty + Candidate: 1.12.2-0~trusty + Version table: + *** 1.12.2-0~trusty 0 + 500 https://apt.dockerproject.org/repo/ ubuntu-trusty/main amd64 Packages + 100 /var/lib/dpkg/status + 1.12.1-0~trusty 0 + 500 https://apt.dockerproject.org/repo/ ubuntu-trusty/main amd64 Packages + 1.12.0-0~trusty 0 + 500 https://apt.dockerproject.org/repo/ ubuntu-trusty/main amd64 Packages + 1.11.2-0~trusty 0 + 500 https://apt.dockerproject.org/repo/ ubuntu-trusty/main amd64 Packages + 1.11.1-0~trusty 0 + 500 https://apt.dockerproject.org/repo/ ubuntu-trusty/main amd64 Packages + ``` +From now on when you run `apt-get upgrade`, `APT` pulls from the new repository. ### Prerequisites by Ubuntu Version diff --git a/engine/installation/mac.md b/engine/installation/mac.md index 444c42d5f27..40ef781d57b 100644 --- a/engine/installation/mac.md +++ b/engine/installation/mac.md @@ -1,15 +1,16 @@ - - -# Mac OS X +--- +description: Docker installation on macOS +keywords: +- Docker, Docker documentation, requirements, boot2docker, VirtualBox, SSH, Linux, + osx, os x, macOS, Mac +menu: + main: + parent: engine_install + weight: "-90" +title: Installation on macOS +--- + +# macOS You have two options for installing Docker on Mac: @@ -26,7 +27,7 @@ Go to [Getting Started with Docker for Mac](https://docs.docker.com/docker-for-m - Mac must be a 2010 or newer model, with Intel's hardware support for memory management unit (MMU) virtualization; i.e., Extended Page Tables (EPT) -- OS X 10.10.3 Yosemite or newer +- macOS 10.10.3 Yosemite or newer - At least 4GB of RAM @@ -38,11 +39,11 @@ If you have an earlier Mac that doesn't meet the Docker for Mac requirements, -+++ -draft = true -+++ - +--- +published: false +--- This directory holds the authoritative specifications of APIs defined and implemented by Docker. Currently this includes: diff --git a/engine/reference/api/docker-io_api.md b/engine/reference/api/docker-io_api.md index 5e3c6844846..d8940ecfb65 100644 --- a/engine/reference/api/docker-io_api.md +++ b/engine/reference/api/docker-io_api.md @@ -1,14 +1,16 @@ - +--- +aliases: + - /reference/api/docker-io_api/ +description: API Documentation for the Docker Hub API +published: false +keywords: +- API, Docker, index, REST, documentation, Docker Hub, registry +menu: + main: + parent: engine_remoteapi + weight: 99 +title: Docker Hub API +--- # Docker Hub API diff --git a/engine/reference/api/docker_io_accounts_api.md b/engine/reference/api/docker_io_accounts_api.md index dfee194b19e..699c3cc7b72 100644 --- a/engine/reference/api/docker_io_accounts_api.md +++ b/engine/reference/api/docker_io_accounts_api.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/docker_io_accounts_api/ +description: API Documentation for docker.io accounts. +keywords: +- API, Docker, accounts, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: 90 +title: docker.io accounts API +--- # docker.io accounts API diff --git a/engine/reference/api/docker_remote_api.md b/engine/reference/api/docker_remote_api.md index 91008b4e170..acb55992593 100644 --- a/engine/reference/api/docker_remote_api.md +++ b/engine/reference/api/docker_remote_api.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/docker_remote_api/ +description: API Documentation for Docker +keywords: +- API, Docker, rcli, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: -99 +title: Remote API +--- # Docker Remote API diff --git a/engine/reference/api/docker_remote_api_v1.18.md b/engine/reference/api/docker_remote_api_v1.18.md index b3616f6b796..dcaabd87d5e 100644 --- a/engine/reference/api/docker_remote_api_v1.18.md +++ b/engine/reference/api/docker_remote_api_v1.18.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/docker_remote_api_v1.18/ +description: API Documentation for Docker +keywords: +- API, Docker, rcli, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: 3 +title: Remote API v1.18 +--- # Docker Remote API v1.18 diff --git a/engine/reference/api/docker_remote_api_v1.19.md b/engine/reference/api/docker_remote_api_v1.19.md index 3b4db54fd7f..9700b636fde 100644 --- a/engine/reference/api/docker_remote_api_v1.19.md +++ b/engine/reference/api/docker_remote_api_v1.19.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/docker_remote_api_v1.19/ +description: API Documentation for Docker +keywords: +- API, Docker, rcli, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: 2 +title: Remote API v1.19 +--- # Docker Remote API v1.19 diff --git a/engine/reference/api/docker_remote_api_v1.20.md b/engine/reference/api/docker_remote_api_v1.20.md index 698fa7a6a69..d4fbdfbb9c3 100644 --- a/engine/reference/api/docker_remote_api_v1.20.md +++ b/engine/reference/api/docker_remote_api_v1.20.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/docker_remote_api_v1.20/ +description: API Documentation for Docker +keywords: +- API, Docker, rcli, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: 1 +title: Remote API v1.20 +--- # Docker Remote API v1.20 diff --git a/engine/reference/api/docker_remote_api_v1.21.md b/engine/reference/api/docker_remote_api_v1.21.md index e89605c807c..8df1184b867 100644 --- a/engine/reference/api/docker_remote_api_v1.21.md +++ b/engine/reference/api/docker_remote_api_v1.21.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/docker_remote_api_v1.21/ +description: API Documentation for Docker +keywords: +- API, Docker, rcli, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: -2 +title: Remote API v1.21 +--- # Docker Remote API v1.21 diff --git a/engine/reference/api/docker_remote_api_v1.22.md b/engine/reference/api/docker_remote_api_v1.22.md index 1f6d4c192e6..60c89b58be3 100644 --- a/engine/reference/api/docker_remote_api_v1.22.md +++ b/engine/reference/api/docker_remote_api_v1.22.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/docker_remote_api_v1.22/ +description: API Documentation for Docker +keywords: +- API, Docker, rcli, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: -3 +title: Remote API v1.22 +--- # Docker Remote API v1.22 diff --git a/engine/reference/api/docker_remote_api_v1.23.md b/engine/reference/api/docker_remote_api_v1.23.md index d188f79d363..d900872d4cd 100644 --- a/engine/reference/api/docker_remote_api_v1.23.md +++ b/engine/reference/api/docker_remote_api_v1.23.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/docker_remote_api_v1.23/ +description: API Documentation for Docker +keywords: +- API, Docker, rcli, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: -4 +title: Remote API v1.23 +--- # Docker Remote API v1.23 diff --git a/engine/reference/api/docker_remote_api_v1.24.md b/engine/reference/api/docker_remote_api_v1.24.md index efd0e939cfc..0bd2b62c907 100644 --- a/engine/reference/api/docker_remote_api_v1.24.md +++ b/engine/reference/api/docker_remote_api_v1.24.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/docker_remote_api_v1.24/ +description: API Documentation for Docker +keywords: +- API, Docker, rcli, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: -5 +title: Remote API v1.24 +--- # Docker Remote API v1.24 diff --git a/engine/reference/api/docker_remote_api_v1.25.md b/engine/reference/api/docker_remote_api_v1.25.md index 232f7839ce3..3fdb07cff81 100644 --- a/engine/reference/api/docker_remote_api_v1.25.md +++ b/engine/reference/api/docker_remote_api_v1.25.md @@ -1,13 +1,16 @@ - +--- +aliases: + - /reference/api/docker_remote_api_v1.25/ +description: API Documentation for Docker +published: false +keywords: +- API, Docker, rcli, REST, documentation +menu: + main: + parent: engine_remoteapi + weight: -6 +title: Remote API v1.25 +--- # Docker Remote API v1.25 diff --git a/engine/reference/api/hub_registry_spec.md b/engine/reference/api/hub_registry_spec.md index 87c4c884826..d83472fefe2 100644 --- a/engine/reference/api/hub_registry_spec.md +++ b/engine/reference/api/hub_registry_spec.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/api/hub_registry_spec/ +description: Documentation for docker Registry and Registry API +published: false +keywords: +- docker, registry, api, hub +menu: + main: + parent: smn_hub_ref +title: The Docker Hub and the Registry v1 +--- # The Docker Hub and the Registry v1 diff --git a/engine/reference/api/index.md b/engine/reference/api/index.md index 05f3d126f23..b098b9efcf9 100644 --- a/engine/reference/api/index.md +++ b/engine/reference/api/index.md @@ -1,14 +1,15 @@ - - +--- +aliases: + - /reference/api/ +description: Reference +keywords: +- Engine +menu: + main: + identifier: engine_remoteapi + parent: engine_ref +title: API Reference +--- # API Reference diff --git a/engine/reference/api/remote_api_client_libraries.md b/engine/reference/api/remote_api_client_libraries.md index 4b7f4327e66..121e8bc9e13 100644 --- a/engine/reference/api/remote_api_client_libraries.md +++ b/engine/reference/api/remote_api_client_libraries.md @@ -1,13 +1,16 @@ - +--- +aliases: + - /reference/api/remote_api_client_libraries/ +description: Various client libraries available to use with the Docker remote API +keywords: +- API, Docker, index, registry, REST, documentation, clients, C#, Erlang, Go, Groovy, + Java, JavaScript, Perl, PHP, Python, Ruby, Rust, Scala +menu: + main: + parent: engine_remoteapi + weight: 90 +title: Remote API client libraries +--- # Docker Remote API client libraries @@ -50,6 +53,11 @@ with the library maintainers. bwu_docker https://github.com/bwu-dart/bwu_docker + + Go + Docker Go client + https://godoc.org/github.com/docker/docker/client + Gradle gradle-docker-plugin diff --git a/engine/reference/commandline/attach.md b/engine/reference/commandline/attach.md index d0b519840bd..3a149b09f5e 100644 --- a/engine/reference/commandline/attach.md +++ b/engine/reference/commandline/attach.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/attach/ +description: The attach command description and usage +keywords: +- attach, running, container +menu: + main: + parent: smn_cli +title: attach +--- # attach diff --git a/engine/reference/commandline/build.md b/engine/reference/commandline/build.md index 65f54cc81f3..db41a75f868 100644 --- a/engine/reference/commandline/build.md +++ b/engine/reference/commandline/build.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/build/ +description: The build command description and usage +keywords: +- build, docker, image +menu: + main: + parent: smn_cli +title: build +--- # build @@ -97,11 +99,9 @@ Build Syntax Suffix | Commit Used | Build Context Used If you pass an URL to a remote tarball, the URL itself is sent to the daemon: -Instead of specifying a context, you can pass a single Dockerfile in the `URL` -or pipe the file in via `STDIN`. To pipe a Dockerfile from `STDIN`: - ```bash $ docker build http://server/context.tar.gz +``` The download operation will be performed on the host the Docker daemon is running on, which is not necessarily the same host from which the build command diff --git a/engine/reference/commandline/cli.md b/engine/reference/commandline/cli.md index 51a8dd6abaa..a5fde070d46 100644 --- a/engine/reference/commandline/cli.md +++ b/engine/reference/commandline/cli.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/cli/ +description: Docker's CLI command description and usage +keywords: +- Docker, Docker documentation, CLI, command line +menu: + main: + parent: smn_cli + weight: -2 +title: Use the Docker command line +--- # Use the Docker command line diff --git a/engine/reference/commandline/commit.md b/engine/reference/commandline/commit.md index acfb7c95be7..373c69c91bc 100644 --- a/engine/reference/commandline/commit.md +++ b/engine/reference/commandline/commit.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/commit/ +description: The commit command description and usage +keywords: +- commit, file, changes +menu: + main: + parent: smn_cli +title: commit +--- # commit diff --git a/engine/reference/commandline/cp.md b/engine/reference/commandline/cp.md index b3a0b789165..e93f53ef665 100644 --- a/engine/reference/commandline/cp.md +++ b/engine/reference/commandline/cp.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/cp/ +description: The cp command description and usage +keywords: +- copy, container, files, folders +menu: + main: + parent: smn_cli +title: cp +--- # cp diff --git a/engine/reference/commandline/create.md b/engine/reference/commandline/create.md index 9e3dfdead54..0b864d8c0c0 100644 --- a/engine/reference/commandline/create.md +++ b/engine/reference/commandline/create.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/create/ +description: The create command description and usage +keywords: +- docker, create, container +menu: + main: + parent: smn_cli +title: create +--- # create diff --git a/engine/reference/commandline/deploy.md b/engine/reference/commandline/deploy.md index 908131e4bb4..cbca20d1092 100644 --- a/engine/reference/commandline/deploy.md +++ b/engine/reference/commandline/deploy.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/deploy/ +advisory: experimental +description: The deploy command description and usage +keywords: +- stack, deploy +menu: + main: + parent: smn_cli +title: deploy +--- # stack deploy (experimental) diff --git a/engine/reference/commandline/diff.md b/engine/reference/commandline/diff.md index 8c01b8cdf2b..34dbe12c346 100644 --- a/engine/reference/commandline/diff.md +++ b/engine/reference/commandline/diff.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/diff/ +description: The diff command description and usage +keywords: +- list, changed, files, container +menu: + main: + parent: smn_cli +title: diff +--- # diff diff --git a/engine/reference/commandline/dockerd.md b/engine/reference/commandline/dockerd.md index d134f6e1023..ff8b98130c0 100644 --- a/engine/reference/commandline/dockerd.md +++ b/engine/reference/commandline/dockerd.md @@ -1,14 +1,17 @@ - +--- +aliases: + - /reference/commandline/dockerd/ +aliases: +- /engine/reference/commandline/daemon/ +description: The daemon command description and usage +keywords: +- container, daemon, runtime +menu: + main: + parent: smn_cli + weight: -1 +title: dockerd +--- # daemon @@ -283,7 +286,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. #### Devicemapper options -* `dm.thinpooldev` +* `dm.thinpooldev` Specifies a custom block storage device to use for the thin pool. @@ -310,7 +313,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.thinpooldev=/dev/mapper/thin-pool ``` -* `dm.basesize` +* `dm.basesize` Specifies the size to use when creating the base device, which limits the size of images and containers. The default value is 10G. Note, thin devices @@ -328,7 +331,6 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.basesize=50G ``` - This will increase the base device size to 50G. The Docker daemon will throw an error if existing base device size is larger than 50G. A user can use this option to expand the base device size however shrinking is not permitted. @@ -349,7 +351,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.basesize=20G ``` -* `dm.loopdatasize` +* `dm.loopdatasize` > **Note**: > This option configures devicemapper loopback, which should not @@ -366,7 +368,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.loopdatasize=200G ``` -* `dm.loopmetadatasize` +* `dm.loopmetadatasize` > **Note**: > This option configures devicemapper loopback, which should not @@ -383,7 +385,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.loopmetadatasize=4G ``` -* `dm.fs` +* `dm.fs` Specifies the filesystem type to use for the base device. The supported options are "ext4" and "xfs". The default is "xfs" @@ -394,7 +396,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.fs=ext4 ``` -* `dm.mkfsarg` +* `dm.mkfsarg` Specifies extra mkfs arguments to be used when creating the base device. @@ -404,7 +406,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt "dm.mkfsarg=-O ^has_journal" ``` -* `dm.mountopt` +* `dm.mountopt` Specifies extra mount options used when mounting the thin devices. @@ -414,7 +416,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.mountopt=nodiscard ``` -* `dm.datadev` +* `dm.datadev` (Deprecated, use `dm.thinpooldev`) @@ -432,7 +434,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. --storage-opt dm.metadatadev=/dev/sdc1 ``` -* `dm.metadatadev` +* `dm.metadatadev` (Deprecated, use `dm.thinpooldev`) @@ -456,7 +458,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. --storage-opt dm.metadatadev=/dev/sdc1 ``` -* `dm.blocksize` +* `dm.blocksize` Specifies a custom blocksize to use for the thin pool. The default blocksize is 64K. @@ -467,7 +469,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.blocksize=512K ``` -* `dm.blkdiscard` +* `dm.blkdiscard` Enables or disables the use of blkdiscard when removing devicemapper devices. This is enabled by default (only) if using loopback devices and is @@ -483,7 +485,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.blkdiscard=false ``` -* `dm.override_udev_sync_check` +* `dm.override_udev_sync_check` Overrides the `udev` synchronization checks between `devicemapper` and `udev`. `udev` is the device manager for the Linux kernel. @@ -523,7 +525,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. > Otherwise, set this flag for migrating existing Docker daemons to > a daemon with a supported environment. -* `dm.use_deferred_removal` +* `dm.use_deferred_removal` Enables use of deferred device removal if `libdm` and the kernel driver support the mechanism. @@ -545,7 +547,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. $ sudo dockerd --storage-opt dm.use_deferred_removal=true ``` -* `dm.use_deferred_deletion` +* `dm.use_deferred_deletion` Enables use of deferred device deletion for thin pool devices. By default, thin pool device deletion is synchronous. Before a container is deleted, @@ -571,7 +573,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. when unintentional leaking of mount point happens across multiple mount namespaces. -* `dm.min_free_space` +* `dm.min_free_space` Specifies the min free space percent in a thin pool require for new device creation to succeed. This check applies to both free data space as well @@ -619,7 +621,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. #### ZFS options -* `zfs.fsname` +* `zfs.fsname` Set zfs filesystem under which docker will create its own datasets. By default docker will pick up the zfs filesystem where docker graph @@ -633,7 +635,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. #### Btrfs options -* `btrfs.min_space` +* `btrfs.min_space` Specifies the mininum size to use when creating the subvolume which is used for containers. If user uses disk quota for btrfs when creating or running @@ -648,7 +650,7 @@ options for `zfs` start with `zfs` and options for `btrfs` start with `btrfs`. #### Overlay2 options -* `overlay2.override_kernel_check` +* `overlay2.override_kernel_check` Overrides the Linux kernel version check allowing overlay2. Support for specifying multiple lower directories needed by overlay2 was added to the @@ -676,19 +678,20 @@ Runtimes can be registered with the daemon either via the configuration file or using the `--add-runtime` command line argument. The following is an example adding 2 runtimes via the configuration: + ```json - "default-runtime": "runc", - "runtimes": { - "runc": { - "path": "runc" - }, - "custom": { - "path": "/usr/local/bin/my-runc-replacement", - "runtimeArgs": [ - "--debug" - ] - } +"default-runtime": "runc", +"runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] } +} ``` This is the same example via the command line: @@ -848,35 +851,35 @@ $ sudo dockerd \ The currently supported cluster store options are: -* `discovery.heartbeat` +* `discovery.heartbeat` Specifies the heartbeat timer in seconds which is used by the daemon as a keepalive mechanism to make sure discovery module treats the node as alive in the cluster. If not configured, the default value is 20 seconds. -* `discovery.ttl` +* `discovery.ttl` Specifies the ttl (time-to-live) in seconds which is used by the discovery module to timeout a node if a valid heartbeat is not received within the configured ttl value. If not configured, the default value is 60 seconds. -* `kv.cacertfile` +* `kv.cacertfile` Specifies the path to a local file with PEM encoded CA certificates to trust -* `kv.certfile` +* `kv.certfile` Specifies the path to a local file with a PEM encoded certificate. This certificate is used as the client cert for communication with the Key/Value store. -* `kv.keyfile` +* `kv.keyfile` Specifies the path to a local file with a PEM encoded private key. This private key is used as the client key for communication with the Key/Value store. -* `kv.path` +* `kv.path` Specifies the path in the Key/Value store. If not configured, the default value is 'docker/nodes'. @@ -1108,74 +1111,71 @@ This is a full example of the allowed configuration options on Linux: ```json { - "authorization-plugins": [], - "dns": [], - "dns-opts": [], - "dns-search": [], - "exec-opts": [], - "exec-root": "", - "storage-driver": "", - "storage-opts": [], - "labels": [], - "live-restore": true, - "log-driver": "", - "log-opts": {}, - "mtu": 0, - "pidfile": "", - "graph": "", - "cluster-store": "", - "cluster-store-opts": {}, - "cluster-advertise": "", - "max-concurrent-downloads": 3, - "max-concurrent-uploads": 5, - "debug": true, - "hosts": [], - "log-level": "", - "tls": true, - "tlsverify": true, - "tlscacert": "", - "tlscert": "", - "tlskey": "", - "swarm-default-advertise-addr": "", - "api-cors-header": "", - "selinux-enabled": false, - "userns-remap": "", - "group": "", - "cgroup-parent": "", - "default-ulimits": {}, - "init": false, - "init-path": "/usr/libexec/docker-init", - "ipv6": false, - "iptables": false, - "ip-forward": false, - "ip-masq": false, - "userland-proxy": false, - "userland-proxy-path": "/usr/libexec/docker-proxy", - "ip": "0.0.0.0", - "bridge": "", - "bip": "", - "fixed-cidr": "", - "fixed-cidr-v6": "", - "default-gateway": "", - "default-gateway-v6": "", - "icc": false, - "raw-logs": false, - "registry-mirrors": [], - "insecure-registries": [], - "disable-legacy-registry": false, - "default-runtime": "runc", - "oom-score-adjust": -500, - "runtimes": { - "runc": { - "path": "runc" - }, - "custom": { - "path": "/usr/local/bin/my-runc-replacement", - "runtimeArgs": [ - "--debug" - ] - } - } + "api-cors-header": "", + "authorization-plugins": [], + "bip": "", + "bridge": "", + "cgroup-parent": "", + "cluster-store": "", + "cluster-store-opts": {}, + "cluster-advertise": "", + "debug": true, + "default-gateway": "", + "default-gateway-v6": "", + "default-runtime": "runc", + "default-ulimits": {}, + "disable-legacy-registry": false, + "dns": [], + "dns-opts": [], + "dns-search": [], + "exec-opts": [], + "exec-root": "", + "fixed-cidr": "", + "fixed-cidr-v6": "", + "graph": "", + "group": "", + "hosts": [], + "icc": false, + "insecure-registries": [], + "ip": "0.0.0.0", + "iptables": false, + "ipv6": false, + "ip-forward": false, + "ip-masq": false, + "labels": [], + "live-restore": true, + "log-driver": "", + "log-level": "", + "log-opts": {}, + "max-concurrent-downloads": 3, + "max-concurrent-uploads": 5, + "mtu": 0, + "oom-score-adjust": -500, + "pidfile": "", + "raw-logs": false, + "registry-mirrors": [], + "runtimes": { + "runc": { + "path": "runc" + }, + "custom": { + "path": "/usr/local/bin/my-runc-replacement", + "runtimeArgs": [ + "--debug" + ] + } + }, + "selinux-enabled": false, + "storage-driver": "", + "storage-opts": [], + "swarm-default-advertise-addr": "", + "tls": true, + "tlscacert": "", + "tlscert": "", + "tlskey": "", + "tlsverify": true, + "userland-proxy": false, + "userns-remap": "" } ``` @@ -1190,36 +1190,36 @@ This is a full example of the allowed configuration options on Windows: ```json { "authorization-plugins": [], + "bridge": "", + "cluster-advertise": "", + "cluster-store": "", + "debug": true, + "default-ulimits": {}, + "disable-legacy-registry": false, "dns": [], "dns-opts": [], "dns-search": [], "exec-opts": [], - "storage-driver": "", - "storage-opts": [], + "fixed-cidr": "", + "graph": "", + "group": "", + "hosts": [], + "insecure-registries": [], "labels": [], "live-restore": true, "log-driver": "", + "log-level": "", "mtu": 0, "pidfile": "", - "graph": "", - "cluster-store": "", - "cluster-advertise": "", - "debug": true, - "hosts": [], - "log-level": "", - "tlsverify": true, + "raw-logs": false, + "registry-mirrors": [], + "storage-driver": "", + "storage-opts": [], + "swarm-default-advertise-addr": "", "tlscacert": "", "tlscert": "", "tlskey": "", - "swarm-default-advertise-addr": "", - "group": "", - "default-ulimits": {}, - "bridge": "", - "fixed-cidr": "", - "raw-logs": false, - "registry-mirrors": [], - "insecure-registries": [], - "disable-legacy-registry": false + "tlsverify": true } ``` diff --git a/engine/reference/commandline/events.md b/engine/reference/commandline/events.md index 789bb57ffb3..5f3e92b010b 100644 --- a/engine/reference/commandline/events.md +++ b/engine/reference/commandline/events.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/events/ +description: The events command description and usage +keywords: +- events, container, report +menu: + main: + parent: smn_cli +title: events +--- # events diff --git a/engine/reference/commandline/exec.md b/engine/reference/commandline/exec.md index 3f422de6622..ab52d556a05 100644 --- a/engine/reference/commandline/exec.md +++ b/engine/reference/commandline/exec.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/exec/ +description: The exec command description and usage +keywords: +- command, container, run, execute +menu: + main: + parent: smn_cli +title: exec +--- # exec diff --git a/engine/reference/commandline/export.md b/engine/reference/commandline/export.md index 54e6e01a6ea..b1c0d480ad2 100644 --- a/engine/reference/commandline/export.md +++ b/engine/reference/commandline/export.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/export/ +description: The export command description and usage +keywords: +- export, file, system, container +menu: + main: + parent: smn_cli +title: export +--- # export diff --git a/engine/reference/commandline/history.md b/engine/reference/commandline/history.md index 895fd55ea97..6f979401170 100644 --- a/engine/reference/commandline/history.md +++ b/engine/reference/commandline/history.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/history/ +description: The history command description and usage +keywords: +- docker, image, history +menu: + main: + parent: smn_cli +title: history +--- # history diff --git a/engine/reference/commandline/images.md b/engine/reference/commandline/images.md index af21a8197dc..cb75147a82e 100644 --- a/engine/reference/commandline/images.md +++ b/engine/reference/commandline/images.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/images/ +description: The images command description and usage +keywords: +- list, docker, images +menu: + main: + parent: smn_cli +title: images +--- # images diff --git a/engine/reference/commandline/import.md b/engine/reference/commandline/import.md index 2d2c88b4e87..15bd60bf112 100644 --- a/engine/reference/commandline/import.md +++ b/engine/reference/commandline/import.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/import/ +description: The import command description and usage +keywords: +- import, file, system, container +menu: + main: + parent: smn_cli +title: import +--- # import diff --git a/engine/reference/commandline/index.md b/engine/reference/commandline/index.md index ed8d2996a8b..3051948004f 100644 --- a/engine/reference/commandline/index.md +++ b/engine/reference/commandline/index.md @@ -1,16 +1,16 @@ - - - +--- +aliases: + - /reference/commandline/ +description: Docker's CLI command description and usage +keywords: +- Docker, Docker documentation, CLI, command line +menu: + main: + identifier: smn_cli_guide + parent: smn_cli + weight: -70 +title: Docker commands +--- # The Docker commands diff --git a/engine/reference/commandline/info.md b/engine/reference/commandline/info.md index 3bf813c0573..0e0a6fefcca 100644 --- a/engine/reference/commandline/info.md +++ b/engine/reference/commandline/info.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/info/ +description: The info command description and usage +keywords: +- display, docker, information +menu: + main: + parent: smn_cli +title: info +--- # info diff --git a/engine/reference/commandline/inspect.md b/engine/reference/commandline/inspect.md index d5c073f26c1..15931a2b837 100644 --- a/engine/reference/commandline/inspect.md +++ b/engine/reference/commandline/inspect.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/inspect/ +description: The inspect command description and usage +keywords: +- inspect, container, json +menu: + main: + parent: smn_cli +title: inspect +--- # inspect diff --git a/engine/reference/commandline/kill.md b/engine/reference/commandline/kill.md index 55b11efad28..bef7d6ef6af 100644 --- a/engine/reference/commandline/kill.md +++ b/engine/reference/commandline/kill.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/kill/ +description: The kill command description and usage +keywords: +- container, kill, signal +menu: + main: + parent: smn_cli +title: kill +--- # kill diff --git a/engine/reference/commandline/load.md b/engine/reference/commandline/load.md index be8ed05cf21..a527fbec6ce 100644 --- a/engine/reference/commandline/load.md +++ b/engine/reference/commandline/load.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/load/ +description: The load command description and usage +keywords: +- stdin, tarred, repository +menu: + main: + parent: smn_cli +title: load +--- # load diff --git a/engine/reference/commandline/login.md b/engine/reference/commandline/login.md index 3308ae5d6bc..48ae18adede 100644 --- a/engine/reference/commandline/login.md +++ b/engine/reference/commandline/login.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/login/ +description: The login command description and usage +keywords: +- registry, login, image +menu: + main: + parent: smn_cli +title: login +--- # login @@ -52,7 +54,7 @@ This is the list of currently available credentials helpers and where you can download them from: - D-Bus Secret Service: https://github.com/docker/docker-credential-helpers/releases -- Apple OS X keychain: https://github.com/docker/docker-credential-helpers/releases +- Apple macOS keychain: https://github.com/docker/docker-credential-helpers/releases - Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases ### Usage diff --git a/engine/reference/commandline/logout.md b/engine/reference/commandline/logout.md index a073b34f88a..5374ddb8f60 100644 --- a/engine/reference/commandline/logout.md +++ b/engine/reference/commandline/logout.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/logout/ +description: The logout command description and usage +keywords: +- logout, docker, registry +menu: + main: + parent: smn_cli +title: logout +--- # logout diff --git a/engine/reference/commandline/logs.md b/engine/reference/commandline/logs.md index 437e7096315..64cbc38092d 100644 --- a/engine/reference/commandline/logs.md +++ b/engine/reference/commandline/logs.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/logs/ +description: The logs command description and usage +keywords: +- logs, retrieve, docker +menu: + main: + parent: smn_cli +title: logs +--- # logs diff --git a/engine/reference/commandline/menu.md b/engine/reference/commandline/menu.md deleted file mode 100644 index 9ade86d26a6..00000000000 --- a/engine/reference/commandline/menu.md +++ /dev/null @@ -1,26 +0,0 @@ - - - - -# The Docker commands - -This section contains reference information on using Docker's command line -client. Each command has a reference page along with samples. If you are -unfamiliar with the command line, you should start by reading about how to -[Use the Docker command line](cli.md). - -You start the Docker daemon with the command line. How you start the daemon -affects your Docker containers. For that reason you should also make sure to -read the [`dockerd`](dockerd.md) reference page. - -For a list of Docker commands see [Command line reference guide](index.md). diff --git a/engine/reference/commandline/network_connect.md b/engine/reference/commandline/network_connect.md index 5a9ed866efa..8e123aaddb2 100644 --- a/engine/reference/commandline/network_connect.md +++ b/engine/reference/commandline/network_connect.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/network_connect/ +description: The network connect command description and usage +keywords: +- network, connect, user-defined +menu: + main: + parent: smn_cli +title: network connect +--- # network connect diff --git a/engine/reference/commandline/network_create.md b/engine/reference/commandline/network_create.md index 8ffed10972d..bc04f2c2ba8 100644 --- a/engine/reference/commandline/network_create.md +++ b/engine/reference/commandline/network_create.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/network_create/ +description: The network create command description and usage +keywords: +- network, create +menu: + main: + parent: smn_cli +title: network create +--- # network create diff --git a/engine/reference/commandline/network_disconnect.md b/engine/reference/commandline/network_disconnect.md index 2d43ccb1e93..3f32ae07c39 100644 --- a/engine/reference/commandline/network_disconnect.md +++ b/engine/reference/commandline/network_disconnect.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/network_disconnect/ +description: The network disconnect command description and usage +keywords: +- network, disconnect, user-defined +menu: + main: + parent: smn_cli +title: network disconnect +--- # network disconnect diff --git a/engine/reference/commandline/network_inspect.md b/engine/reference/commandline/network_inspect.md index 9aebc66b5ad..ad795b78224 100644 --- a/engine/reference/commandline/network_inspect.md +++ b/engine/reference/commandline/network_inspect.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/network_inspect/ +description: The network inspect command description and usage +keywords: +- network, inspect, user-defined +menu: + main: + parent: smn_cli +title: network inspect +--- # network inspect diff --git a/engine/reference/commandline/network_ls.md b/engine/reference/commandline/network_ls.md index 18a65da06af..3cbe03d298b 100644 --- a/engine/reference/commandline/network_ls.md +++ b/engine/reference/commandline/network_ls.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/network_ls/ +description: The network ls command description and usage +keywords: +- network, list, user-defined +menu: + main: + parent: smn_cli +title: network ls +--- # docker network ls diff --git a/engine/reference/commandline/network_rm.md b/engine/reference/commandline/network_rm.md index d57254636af..4133f84b864 100644 --- a/engine/reference/commandline/network_rm.md +++ b/engine/reference/commandline/network_rm.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/network_rm/ +description: the network rm command description and usage +keywords: +- network, rm, user-defined +menu: + main: + parent: smn_cli +title: network rm +--- # network rm diff --git a/engine/reference/commandline/node_demote.md b/engine/reference/commandline/node_demote.md index 2cec22ac3b0..3d151ad31c8 100644 --- a/engine/reference/commandline/node_demote.md +++ b/engine/reference/commandline/node_demote.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/node_demote/ +description: The node demote command description and usage +keywords: +- node, demote +menu: + main: + parent: smn_cli +title: node demote +--- # node demote diff --git a/engine/reference/commandline/node_inspect.md b/engine/reference/commandline/node_inspect.md index 2f3370adbc6..a3f60bb90f3 100644 --- a/engine/reference/commandline/node_inspect.md +++ b/engine/reference/commandline/node_inspect.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/node_inspect/ +description: The node inspect command description and usage +keywords: +- node, inspect +menu: + main: + parent: smn_cli +title: node inspect +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # node inspect diff --git a/engine/reference/commandline/node_ls.md b/engine/reference/commandline/node_ls.md index e918b7d11ab..2c40faee4fb 100644 --- a/engine/reference/commandline/node_ls.md +++ b/engine/reference/commandline/node_ls.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/node_ls/ +description: The node ls command description and usage +keywords: +- node, list +menu: + main: + parent: smn_cli +title: node ls +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # node ls diff --git a/engine/reference/commandline/node_promote.md b/engine/reference/commandline/node_promote.md index 7f5830d92a3..6f11fc495fe 100644 --- a/engine/reference/commandline/node_promote.md +++ b/engine/reference/commandline/node_promote.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/node_promote/ +description: The node promote command description and usage +keywords: +- node, promote +menu: + main: + parent: smn_cli +title: node promote +--- # node promote diff --git a/engine/reference/commandline/node_ps.md b/engine/reference/commandline/node_ps.md index 538132408af..abc0dbe10bd 100644 --- a/engine/reference/commandline/node_ps.md +++ b/engine/reference/commandline/node_ps.md @@ -1,13 +1,19 @@ - +--- +aliases: + - /reference/commandline/node_ps/ +aliases: +- /engine/reference/commandline/node_tasks/ +description: The node ps command description and usage +keywords: +- node, tasks +- ps +menu: + main: + parent: smn_cli +title: node ps +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # node ps diff --git a/engine/reference/commandline/node_rm.md b/engine/reference/commandline/node_rm.md index ee6dcf72121..5847e35b9d7 100644 --- a/engine/reference/commandline/node_rm.md +++ b/engine/reference/commandline/node_rm.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/node_rm/ +description: The node rm command description and usage +keywords: +- node, remove +menu: + main: + parent: smn_cli +title: node rm +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # node rm diff --git a/engine/reference/commandline/node_update.md b/engine/reference/commandline/node_update.md index 5205a7d96aa..fff98ea0113 100644 --- a/engine/reference/commandline/node_update.md +++ b/engine/reference/commandline/node_update.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/node_update/ +description: The node update command description and usage +keywords: +- resources, update, dynamically +menu: + main: + parent: smn_cli +title: node update +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. ## update diff --git a/engine/reference/commandline/pause.md b/engine/reference/commandline/pause.md index 629c6ed0915..731bd40cce1 100644 --- a/engine/reference/commandline/pause.md +++ b/engine/reference/commandline/pause.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/pause/ +description: The pause command description and usage +keywords: +- cgroups, container, suspend, SIGSTOP +menu: + main: + parent: smn_cli +title: pause +--- # pause diff --git a/engine/reference/commandline/plugin_disable.md b/engine/reference/commandline/plugin_disable.md index eae31975e10..3003eb82b66 100644 --- a/engine/reference/commandline/plugin_disable.md +++ b/engine/reference/commandline/plugin_disable.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/plugin_disable/ +advisory: experimental +description: the plugin disable command description and usage +keywords: +- plugin, disable +menu: + main: + parent: smn_cli +title: plugin disable +--- # plugin disable (experimental) diff --git a/engine/reference/commandline/plugin_enable.md b/engine/reference/commandline/plugin_enable.md index 44ef3c2fa3c..f732843f2ca 100644 --- a/engine/reference/commandline/plugin_enable.md +++ b/engine/reference/commandline/plugin_enable.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/plugin_enable/ +advisory: experimental +description: the plugin enable command description and usage +keywords: +- plugin, enable +menu: + main: + parent: smn_cli +title: plugin enable +--- # plugin enable (experimental) diff --git a/engine/reference/commandline/plugin_inspect.md b/engine/reference/commandline/plugin_inspect.md index e8c219d2661..0b50097afa7 100755 --- a/engine/reference/commandline/plugin_inspect.md +++ b/engine/reference/commandline/plugin_inspect.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/plugin_inspect/ +advisory: experimental +description: The plugin inspect command description and usage +keywords: +- plugin, inspect +menu: + main: + parent: smn_cli +title: plugin inspect +--- # plugin inspect (experimental) diff --git a/engine/reference/commandline/plugin_install.md b/engine/reference/commandline/plugin_install.md index f3b26ab0c83..cca4c71773f 100644 --- a/engine/reference/commandline/plugin_install.md +++ b/engine/reference/commandline/plugin_install.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/plugin_install/ +advisory: experimental +description: the plugin install command description and usage +keywords: +- plugin, install +menu: + main: + parent: smn_cli +title: plugin install +--- # plugin install (experimental) diff --git a/engine/reference/commandline/plugin_ls.md b/engine/reference/commandline/plugin_ls.md index c73fb72dd2a..86539feabd4 100644 --- a/engine/reference/commandline/plugin_ls.md +++ b/engine/reference/commandline/plugin_ls.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/plugin_ls/ +advisory: experimental +description: The plugin ls command description and usage +keywords: +- plugin, list +menu: + main: + parent: smn_cli +title: plugin ls +--- # plugin ls (experimental) diff --git a/engine/reference/commandline/plugin_rm.md b/engine/reference/commandline/plugin_rm.md index 6b6a2239b62..7f436557bb2 100644 --- a/engine/reference/commandline/plugin_rm.md +++ b/engine/reference/commandline/plugin_rm.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/plugin_rm/ +advisory: experimental +description: the plugin rm command description and usage +keywords: +- plugin, rm +menu: + main: + parent: smn_cli +title: plugin rm +--- # plugin rm (experimental) diff --git a/engine/reference/commandline/port.md b/engine/reference/commandline/port.md index e8da943c365..290581761a5 100644 --- a/engine/reference/commandline/port.md +++ b/engine/reference/commandline/port.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/port/ +description: The port command description and usage +keywords: +- port, mapping, container +menu: + main: + parent: smn_cli +title: port +--- # port diff --git a/engine/reference/commandline/ps.md b/engine/reference/commandline/ps.md index 541979e8197..16b8543756c 100644 --- a/engine/reference/commandline/ps.md +++ b/engine/reference/commandline/ps.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/ps/ +description: The ps command description and usage +keywords: +- container, running, list +menu: + main: + parent: smn_cli +title: ps +--- # ps diff --git a/engine/reference/commandline/pull.md b/engine/reference/commandline/pull.md index f10c1348631..3fcec64c6bd 100644 --- a/engine/reference/commandline/pull.md +++ b/engine/reference/commandline/pull.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/pull/ +description: The pull command description and usage +keywords: +- pull, image, hub, docker +menu: + main: + parent: smn_cli +title: pull +--- # pull diff --git a/engine/reference/commandline/push.md b/engine/reference/commandline/push.md index 9b70fd3516a..55a96427cb9 100644 --- a/engine/reference/commandline/push.md +++ b/engine/reference/commandline/push.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/push/ +description: The push command description and usage +keywords: +- share, push, image +menu: + main: + parent: smn_cli +title: push +--- # push diff --git a/engine/reference/commandline/rename.md b/engine/reference/commandline/rename.md index f5a4fe23ec6..1d391c98d75 100644 --- a/engine/reference/commandline/rename.md +++ b/engine/reference/commandline/rename.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/rename/ +description: The rename command description and usage +keywords: +- rename, docker, container +menu: + main: + parent: smn_cli +title: rename +--- # rename diff --git a/engine/reference/commandline/restart.md b/engine/reference/commandline/restart.md index 9e40a4f6c46..2286ef52e3e 100644 --- a/engine/reference/commandline/restart.md +++ b/engine/reference/commandline/restart.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/restart/ +description: The restart command description and usage +keywords: +- restart, container, Docker +menu: + main: + parent: smn_cli +title: restart +--- # restart diff --git a/engine/reference/commandline/rm.md b/engine/reference/commandline/rm.md index 319ef4dbbc4..fad972aad6c 100644 --- a/engine/reference/commandline/rm.md +++ b/engine/reference/commandline/rm.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/rm/ +description: The rm command description and usage +keywords: +- remove, Docker, container +menu: + main: + parent: smn_cli +title: rm +--- # rm diff --git a/engine/reference/commandline/rmi.md b/engine/reference/commandline/rmi.md index 328d9fe1403..a5e2f71fd60 100644 --- a/engine/reference/commandline/rmi.md +++ b/engine/reference/commandline/rmi.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/rmi/ +description: The rmi command description and usage +keywords: +- remove, image, Docker +menu: + main: + parent: smn_cli +title: rmi +--- # rmi diff --git a/engine/reference/commandline/run.md b/engine/reference/commandline/run.md index b169f9c4765..b2ca3d374ab 100644 --- a/engine/reference/commandline/run.md +++ b/engine/reference/commandline/run.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/run/ +description: The run command description and usage +keywords: +- run, command, container +menu: + main: + parent: smn_cli +title: run +--- # run diff --git a/engine/reference/commandline/save.md b/engine/reference/commandline/save.md index f7d1fdedcb8..da74b554101 100644 --- a/engine/reference/commandline/save.md +++ b/engine/reference/commandline/save.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/save/ +description: The save command description and usage +keywords: +- tarred, repository, backup +menu: + main: + parent: smn_cli +title: save +--- # save diff --git a/engine/reference/commandline/search.md b/engine/reference/commandline/search.md index 988db8bf1f4..3d77c197941 100644 --- a/engine/reference/commandline/search.md +++ b/engine/reference/commandline/search.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/search/ +description: The search command description and usage +keywords: +- search, hub, images +menu: + main: + parent: smn_cli +title: search +--- # search diff --git a/engine/reference/commandline/service_create.md b/engine/reference/commandline/service_create.md index 059996dadfd..3b522706cb5 100644 --- a/engine/reference/commandline/service_create.md +++ b/engine/reference/commandline/service_create.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/service_create/ +description: The service create command description and usage +keywords: +- service, create +menu: + main: + parent: smn_cli +title: service create +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # service create diff --git a/engine/reference/commandline/service_inspect.md b/engine/reference/commandline/service_inspect.md index e24927f433b..6c25fa88657 100644 --- a/engine/reference/commandline/service_inspect.md +++ b/engine/reference/commandline/service_inspect.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/service_inspect/ +description: The service inspect command description and usage +keywords: +- service, inspect +menu: + main: + parent: smn_cli +title: service inspect +--- # service inspect diff --git a/engine/reference/commandline/service_ls.md b/engine/reference/commandline/service_ls.md index 68c3ade6ce7..ea1fe7a8856 100644 --- a/engine/reference/commandline/service_ls.md +++ b/engine/reference/commandline/service_ls.md @@ -1,12 +1,13 @@ - +--- +aliases: + - /reference/commandline/service_ls/ +description: The service ls command description and usage +keywords: +- service, ls +title: service ls +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # service ls @@ -28,6 +29,7 @@ This command when run targeting a manager, lists services are running in the swarm. On a manager node: + ```bash ID NAME REPLICAS IMAGE COMMAND c8wgl7q4ndfd frontend 5/5 nginx:alpine diff --git a/engine/reference/commandline/service_ps.md b/engine/reference/commandline/service_ps.md index d6f9de5f279..2b225ad2b49 100644 --- a/engine/reference/commandline/service_ps.md +++ b/engine/reference/commandline/service_ps.md @@ -1,13 +1,17 @@ - +--- +aliases: + - /reference/commandline/service_ps/ +aliases: +- /engine/reference/commandline/service_tasks/ +description: The service ps command description and usage +keywords: +- service, tasks +- ps +menu: + main: + parent: smn_cli +title: service ps +--- # service ps diff --git a/engine/reference/commandline/service_rm.md b/engine/reference/commandline/service_rm.md index edd9b417ed1..f09fcba58e8 100644 --- a/engine/reference/commandline/service_rm.md +++ b/engine/reference/commandline/service_rm.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/service_rm/ +description: The service rm command description and usage +keywords: +- service, rm +menu: + main: + parent: smn_cli +title: service rm +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # service rm diff --git a/engine/reference/commandline/service_scale.md b/engine/reference/commandline/service_scale.md index f4356945e56..e730c643dd2 100644 --- a/engine/reference/commandline/service_scale.md +++ b/engine/reference/commandline/service_scale.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/service_scale/ +description: The service scale command description and usage +keywords: +- service, scale +menu: + main: + parent: smn_cli +title: service scale +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # service scale @@ -23,8 +27,8 @@ Options: ### Scale a service -If you scale a service, you set the *desired* number of replicas. Even though -the command returns directly, actual scaling of the service may take some time. +The scale command enables you to scale one or more services either up or down to the desired number of replicas. The command will return immediatly, but the actual scaling of the service may take some time. To stop all replicas of a service while keeping the service active in the swarm you can set the scale to 0. + For example, the following command scales the "frontend" service to 50 tasks. diff --git a/engine/reference/commandline/service_update.md b/engine/reference/commandline/service_update.md index dd65bb288ff..d8a8ed82298 100644 --- a/engine/reference/commandline/service_update.md +++ b/engine/reference/commandline/service_update.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/service_update/ +description: The service update command description and usage +keywords: +- service, update +menu: + main: + parent: smn_cli +title: service update +--- # service update diff --git a/engine/reference/commandline/stack_config.md b/engine/reference/commandline/stack_config.md index 9a39d271160..d9c36611809 100644 --- a/engine/reference/commandline/stack_config.md +++ b/engine/reference/commandline/stack_config.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/stack_config/ +advisory: experimental +description: The stack config command description and usage +keywords: +- stack, config +menu: + main: + parent: smn_cli +title: stack config +--- # stack config (experimental) diff --git a/engine/reference/commandline/stack_deploy.md b/engine/reference/commandline/stack_deploy.md index bcafb7f6868..837b8830e1e 100644 --- a/engine/reference/commandline/stack_deploy.md +++ b/engine/reference/commandline/stack_deploy.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/stack_deploy/ +advisory: experimental +description: The stack deploy command description and usage +keywords: +- stack, deploy, up +menu: + main: + parent: smn_cli +title: stack deploy +--- # stack deploy (experimental) diff --git a/engine/reference/commandline/stack_rm.md b/engine/reference/commandline/stack_rm.md index 5d4e4d57be2..8dda38af15a 100644 --- a/engine/reference/commandline/stack_rm.md +++ b/engine/reference/commandline/stack_rm.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/stack_rm/ +advisory: experimental +description: The stack rm command description and usage +keywords: +- stack, rm, remove, down +menu: + main: + parent: smn_cli +title: stack rm +--- # stack rm (experimental) diff --git a/engine/reference/commandline/stack_services.md b/engine/reference/commandline/stack_services.md index 8f28410bf3b..72125e8d064 100644 --- a/engine/reference/commandline/stack_services.md +++ b/engine/reference/commandline/stack_services.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/stack_services/ +advisory: experimental +description: The stack services command description and usage +keywords: +- stack, services +menu: + main: + parent: smn_cli +title: stack services +--- # stack services (experimental) diff --git a/engine/reference/commandline/stack_tasks.md b/engine/reference/commandline/stack_tasks.md index 24b00e69ccd..39b7ecb5c18 100644 --- a/engine/reference/commandline/stack_tasks.md +++ b/engine/reference/commandline/stack_tasks.md @@ -1,13 +1,15 @@ - +--- +aliases: + - /reference/commandline/stack_tasks/ +advisory: experimental +description: The stack tasks command description and usage +keywords: +- stack, tasks +menu: + main: + parent: smn_cli +title: stack tasks +--- # stack tasks (experimental) diff --git a/engine/reference/commandline/start.md b/engine/reference/commandline/start.md index 72ff7e1002a..2267766b9c8 100644 --- a/engine/reference/commandline/start.md +++ b/engine/reference/commandline/start.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/start/ +description: The start command description and usage +keywords: +- Start, container, stopped +menu: + main: + parent: smn_cli +title: start +--- # start diff --git a/engine/reference/commandline/stats.md b/engine/reference/commandline/stats.md index d6d6ed0e1fc..8d3b5f8fc9b 100644 --- a/engine/reference/commandline/stats.md +++ b/engine/reference/commandline/stats.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/stats/ +description: The stats command description and usage +keywords: +- container, resource, statistics +menu: + main: + parent: smn_cli +title: stats +--- # stats diff --git a/engine/reference/commandline/stop.md b/engine/reference/commandline/stop.md index 662255846f8..57c78b87648 100644 --- a/engine/reference/commandline/stop.md +++ b/engine/reference/commandline/stop.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/stop/ +description: The stop command description and usage +keywords: +- stop, SIGKILL, SIGTERM +menu: + main: + parent: smn_cli +title: stop +--- # stop diff --git a/engine/reference/commandline/swarm_init.md b/engine/reference/commandline/swarm_init.md index 6df0cdb1d06..05a38d4b4f1 100644 --- a/engine/reference/commandline/swarm_init.md +++ b/engine/reference/commandline/swarm_init.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/swarm_init/ +description: The swarm init command description and usage +keywords: +- swarm, init +menu: + main: + parent: smn_cli +title: swarm init +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # swarm init diff --git a/engine/reference/commandline/swarm_join.md b/engine/reference/commandline/swarm_join.md index c0a7d91cc3e..5c0c42f7c2c 100644 --- a/engine/reference/commandline/swarm_join.md +++ b/engine/reference/commandline/swarm_join.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/swarm_join/ +description: The swarm join command description and usage +keywords: +- swarm, join +menu: + main: + parent: smn_cli +title: swarm join +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # swarm join diff --git a/engine/reference/commandline/swarm_join_token.md b/engine/reference/commandline/swarm_join_token.md index f808d84244f..f1ab65757d9 100644 --- a/engine/reference/commandline/swarm_join_token.md +++ b/engine/reference/commandline/swarm_join_token.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/swarm_join_token/ +description: The swarm join-token command description and usage +keywords: +- swarm, join-token +menu: + main: + parent: smn_cli +title: swarm join-token +--- # swarm join-token diff --git a/engine/reference/commandline/swarm_leave.md b/engine/reference/commandline/swarm_leave.md index 112813466bb..08deaaa627c 100644 --- a/engine/reference/commandline/swarm_leave.md +++ b/engine/reference/commandline/swarm_leave.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/swarm_leave/ +description: The swarm leave command description and usage +keywords: +- swarm, leave +menu: + main: + parent: smn_cli +title: swarm leave +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # swarm leave @@ -30,6 +34,7 @@ without using `--force`. Only use `--force` in situations where the swarm will no longer be used after the manager leaves, such as in a single-node swarm. Consider the following swarm, as seen from the manager: + ```bash $ docker node ls ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS @@ -39,10 +44,12 @@ dvfxp4zseq4s0rih1selh0d20 * manager1 Ready Active Leader ``` To remove `worker2`, issue the following command from `worker2` itself: + ```bash $ docker swarm leave Node left the default swarm. ``` + To remove an inactive node, use the [`node rm`](node_rm.md) command instead. ## Related information diff --git a/engine/reference/commandline/swarm_update.md b/engine/reference/commandline/swarm_update.md index 5299be257df..49898100b05 100644 --- a/engine/reference/commandline/swarm_update.md +++ b/engine/reference/commandline/swarm_update.md @@ -1,12 +1,16 @@ - +--- +aliases: + - /reference/commandline/swarm_update/ +description: The swarm update command description and usage +keywords: +- swarm, update +menu: + main: + parent: smn_cli +title: swarm update +--- + +**Warning:** this command is part of the Swarm management feature introduced in Docker 1.12, and might be subject to non backward-compatible changes. # swarm update diff --git a/engine/reference/commandline/tag.md b/engine/reference/commandline/tag.md index 60692958f1f..728216e2a18 100644 --- a/engine/reference/commandline/tag.md +++ b/engine/reference/commandline/tag.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/tag/ +description: The tag command description and usage +keywords: +- tag, name, image +menu: + main: + parent: smn_cli +title: tag +--- # tag diff --git a/engine/reference/commandline/top.md b/engine/reference/commandline/top.md index 291f96ce093..007c9f17dd1 100644 --- a/engine/reference/commandline/top.md +++ b/engine/reference/commandline/top.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/top/ +description: The top command description and usage +keywords: +- container, running, processes +menu: + main: + parent: smn_cli +title: top +--- # top diff --git a/engine/reference/commandline/unpause.md b/engine/reference/commandline/unpause.md index e5c9d506e04..5d3cd0e20c3 100644 --- a/engine/reference/commandline/unpause.md +++ b/engine/reference/commandline/unpause.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/unpause/ +description: The unpause command description and usage +keywords: +- cgroups, suspend, container +menu: + main: + parent: smn_cli +title: unpause +--- # unpause diff --git a/engine/reference/commandline/update.md b/engine/reference/commandline/update.md index bacecd0106e..217d73510be 100644 --- a/engine/reference/commandline/update.md +++ b/engine/reference/commandline/update.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/update/ +description: The update command description and usage +keywords: +- resources, update, dynamically +menu: + main: + parent: smn_cli +title: update +--- ## update diff --git a/engine/reference/commandline/version.md b/engine/reference/commandline/version.md index e650e41a2d1..9689e11af5c 100644 --- a/engine/reference/commandline/version.md +++ b/engine/reference/commandline/version.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/version/ +description: The version command description and usage +keywords: +- version, architecture, api +menu: + main: + parent: smn_cli +title: version +--- # version diff --git a/engine/reference/commandline/volume_create.md b/engine/reference/commandline/volume_create.md index 65dbba2f641..c58e4906f14 100644 --- a/engine/reference/commandline/volume_create.md +++ b/engine/reference/commandline/volume_create.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/volume_create/ +description: The volume create command description and usage +keywords: +- volume, create +menu: + main: + parent: smn_cli +title: volume create +--- # volume create diff --git a/engine/reference/commandline/volume_inspect.md b/engine/reference/commandline/volume_inspect.md index fac9438e3d3..210f30bc4e0 100644 --- a/engine/reference/commandline/volume_inspect.md +++ b/engine/reference/commandline/volume_inspect.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/volume_inspect/ +description: The volume inspect command description and usage +keywords: +- volume, inspect +menu: + main: + parent: smn_cli +title: volume inspect +--- # volume inspect diff --git a/engine/reference/commandline/volume_ls.md b/engine/reference/commandline/volume_ls.md index 34e2ae927d4..0f9d67ec7e0 100644 --- a/engine/reference/commandline/volume_ls.md +++ b/engine/reference/commandline/volume_ls.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/volume_ls/ +description: The volume ls command description and usage +keywords: +- volume, list +menu: + main: + parent: smn_cli +title: volume ls +--- # volume ls diff --git a/engine/reference/commandline/volume_rm.md b/engine/reference/commandline/volume_rm.md index aa66684259c..3f63ffb798b 100644 --- a/engine/reference/commandline/volume_rm.md +++ b/engine/reference/commandline/volume_rm.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/volume_rm/ +description: the volume rm command description and usage +keywords: +- volume, rm +menu: + main: + parent: smn_cli +title: volume rm +--- # volume rm diff --git a/engine/reference/commandline/wait.md b/engine/reference/commandline/wait.md index b6fd68dbdb9..19a38cdf7ed 100644 --- a/engine/reference/commandline/wait.md +++ b/engine/reference/commandline/wait.md @@ -1,12 +1,14 @@ - +--- +aliases: + - /reference/commandline/wait/ +description: The wait command description and usage +keywords: +- container, stop, wait +menu: + main: + parent: smn_cli +title: wait +--- # wait diff --git a/engine/reference/glossary.md b/engine/reference/glossary.md index d386d52bbbf..902d61e3e6d 100644 --- a/engine/reference/glossary.md +++ b/engine/reference/glossary.md @@ -115,7 +115,7 @@ Examples : - Linux : ext4, aufs, btrfs, zfs - Windows : NTFS -- OS X : HFS+ +- macOS : HFS+ ## image @@ -276,7 +276,7 @@ A virtual machine is a program that emulates a complete computer and imitates de It shares physical hardware resources with other users but isolates the operating system. The end user has the same experience on a Virtual Machine as they would have on dedicated hardware. -Compared to to containers, a virtual machine is heavier to run, provides more isolation, +Compared to containers, a virtual machine is heavier to run, provides more isolation, gets its own set of resources and does minimal sharing. *Also known as : VM* diff --git a/engine/security/certificates.md b/engine/security/certificates.md index 698c06ebfe9..fb102c37ce8 100644 --- a/engine/security/certificates.md +++ b/engine/security/certificates.md @@ -67,7 +67,7 @@ key and then use the key to create the certificate. > **Note:** > These TLS commands will only generate a working set of certificates on Linux. -> The version of OpenSSL in Mac OS X is incompatible with the type of +> The version of OpenSSL in macOS is incompatible with the type of > certificate Docker requires. ## Troubleshooting tips diff --git a/engine/security/https.md b/engine/security/https.md index b106471de96..cafd7719f27 100644 --- a/engine/security/https.md +++ b/engine/security/https.md @@ -28,7 +28,7 @@ it will only connect to servers with a certificate signed by that CA. > **Warning**: > These TLS commands will only generate a working set of certificates on Linux. -> Mac OS X comes with a version of OpenSSL that is incompatible with the +> macOS comes with a version of OpenSSL that is incompatible with the > certificates that Docker requires. ## Create a CA, server and client keys with OpenSSL diff --git a/engine/security/https/README.md b/engine/security/https/README.md index 9bd340a5c88..a6fbdcbf700 100644 --- a/engine/security/https/README.md +++ b/engine/security/https/README.md @@ -1,10 +1,6 @@ - - - +--- +published: false +--- This is an initial attempt to make it easier to test the examples in the https.md doc. diff --git a/engine/security/non-events.md b/engine/security/non-events.md index e0689441203..7f98410a5db 100644 --- a/engine/security/non-events.md +++ b/engine/security/non-events.md @@ -89,3 +89,10 @@ Bugs *not* mitigated: the kernel's non-maskable interrupt handling allowed privilege escalation. Can be exploited in Docker containers because the `modify_ldt()` system call is not currently blocked using seccomp. +* [CVE-2016-5195](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-5195): +A race condition was found in the way the Linux kernel's memory subsystem +handled the copy-on-write (COW) breakage of private read-only memory mappings, +which allowed unprivileged local users to gain write access to read-only memory. +Also known as "dirty COW." +*Partial mitigations:* on some operating systems this vulnerability is mitigated +by the combination of seccomp filtering of `ptrace` and the fact that `/proc/self/mem` is read-only. diff --git a/engine/security/security.md b/engine/security/security.md index 7a17f869f89..52b2a76cfa0 100644 --- a/engine/security/security.md +++ b/engine/security/security.md @@ -1,14 +1,16 @@ - +--- +aliases: +- /engine/articles/security/ +- /security/security/ +description: Review of the Docker Daemon attack surface +keywords: +- Docker, Docker documentation, security +menu: + main: + parent: smn_secure_docker + weight: -99 +title: Docker security +--- # Docker security @@ -196,7 +198,7 @@ to the host. This won't affect regular web apps; but malicious users will find that the arsenal at their disposal has shrunk considerably! By default Docker drops all capabilities except [those -needed](https://github.com/docker/docker/blob/master/oci/defaults_linux.go#L64-L79), +needed](https://github.com/docker/docker/blob/master/oci/defaults_linux.go#L62-L77), a whitelist instead of a blacklist approach. You can see a full list of available capabilities in [Linux manpages](http://man7.org/linux/man-pages/man7/capabilities.7.html). diff --git a/engine/security/trust/trust_sandbox.md b/engine/security/trust/trust_sandbox.md index 6d288fb9c33..61e14f2d5ec 100644 --- a/engine/security/trust/trust_sandbox.md +++ b/engine/security/trust/trust_sandbox.md @@ -19,7 +19,7 @@ overview](content_trust.md). ### Prerequisites -These instructions assume you are running in Linux or Mac OS X. You can run +These instructions assume you are running in Linux or macOS. You can run this sandbox on a local machine or on a virtual machine. You will need to have privileges to run docker commands on your local machine or in the VM. @@ -34,7 +34,7 @@ Finally, you'll need to have a text editor installed on your local system or VM. ## What is in the sandbox? If you are just using trust out-of-the-box you only need your Docker Engine -client and access to the Docker hub. The sandbox mimics a +client and access to the Docker Hub. The sandbox mimics a production trust environment, and sets up these additional components. | Container | Description | @@ -70,8 +70,8 @@ the `trustsandbox` container, the Notary server, and the Registry server. 1. Create a new `trustsandbox` directory and change into it. - $ mkdir `trustsandbox` - $ cd `trustsandbox` + $ mkdir trustsandbox + $ cd trustsandbox 2. Create a filed called `docker-compose.yml` with your favorite editor. For example, using vim: diff --git a/engine/static_files/README.md b/engine/static_files/README.md index 0b93167bdea..3caa5d663df 100644 --- a/engine/static_files/README.md +++ b/engine/static_files/README.md @@ -1,8 +1,6 @@ - +--- +published: false +--- Static files dir ================ diff --git a/engine/swarm/admin_guide.md b/engine/swarm/admin_guide.md index 7df810a1196..b663913cf7e 100644 --- a/engine/swarm/admin_guide.md +++ b/engine/swarm/admin_guide.md @@ -263,3 +263,33 @@ The `--force-new-cluster` flag puts the Docker Engine into swarm mode as a manager node of a single-node swarm. It discards swarm membership information that existed before the loss of the quorum but it retains data necessary to the Swarm such as services, tasks and the list of worker nodes. + +### Forcing the swarm to rebalance + +Generally, you do not need to force the swarm to rebalance its tasks. When you +add a new node to a swarm, or a node reconnects to the swarm after a +period of unavailability, the swarm does not automatically give a workload to +the idle node. This is a design decision. If the swarm periodically shifted tasks +to different nodes for the sake of balance, the clients using those tasks would +be disrupted. The goal is to avoid disrupting running services for the sake of +balance across the swarm. When new tasks start, or when a node with running +tasks becomes unavailable, those tasks are given to less busy nodes. The goal +is eventual balance, with minimal disruption to the end user. + +If you are concerned about an even balance of load and don't mind disrupting +running tasks, you can force your swarm to re-balance by temporarily scaling +the service upward. + +Use `docker service inspect --pretty ` to see the configured scale +of a service. When you use `docker service scale`, the nodes with the lowest +number of tasks are targeted to receive the new workloads. There may be multiple +under-loaded nodes in your swarm. You may need to scale the service up by modest +increments a few times to achieve the balance you want across all the nodes. + +When the load is balanced to your satisfaction, you can scale the service back +down to the original scale. You can use `docker service ps` to assess the current +balance of your service across nodes. + +See also +[`docker service scale`](../reference/commandline/service_scale.md) and +[`docker service ps`](../reference/commandline/service_ps.md). diff --git a/engine/swarm/how-swarm-mode-works/menu.md b/engine/swarm/how-swarm-mode-works/menu.md deleted file mode 100644 index c83b74b53aa..00000000000 --- a/engine/swarm/how-swarm-mode-works/menu.md +++ /dev/null @@ -1,19 +0,0 @@ - - -# How swarm mode works guide - -## TOC - -* [How nodes work](nodes.md) -* [How services work](services.md) -* [How PKI works](pki.md) diff --git a/engine/swarm/key-concepts.md b/engine/swarm/key-concepts.md index e0e88e22b9e..81535bb0bf2 100644 --- a/engine/swarm/key-concepts.md +++ b/engine/swarm/key-concepts.md @@ -73,7 +73,9 @@ run on the assigned node or fail. The swarm manager uses **ingress load balancing** to expose the services you want to make available externally to the swarm. The swarm manager can automatically assign the service a **PublishedPort** or you can configure a -PublishedPort for the service in the 30000-32767 range. +PublishedPort for the service. You can specify any unused port. If you do not +specify a port, the swarm manager assigns the service a port in the 30000-32767 +range. External components, such as cloud load balancers, can access the service on the PublishedPort of any node in the cluster whether or not the node is currently diff --git a/engine/swarm/menu.md b/engine/swarm/menu.md deleted file mode 100644 index 369877a3211..00000000000 --- a/engine/swarm/menu.md +++ /dev/null @@ -1,21 +0,0 @@ - - - -## Use Docker Engine to create and manage a swarm - -This section contains the following topics: - -* [Docker swarm mode overview](index.md) -* [Docker swarm mode key concepts](key-concepts.md) -* [Getting Started with Docker swarm mode](swarm-tutorial/index.md) diff --git a/engine/swarm/networking.md b/engine/swarm/networking.md index 695d4df5676..db6d259bf0c 100644 --- a/engine/swarm/networking.md +++ b/engine/swarm/networking.md @@ -1,14 +1,17 @@ - +--- +description: Use swarm mode networking features +keywords: +- guide +- swarm mode +- swarm +- network +menu: + main: + identifier: networking-guide + parent: engine_swarm + weight: 16 +title: Attach services to an overlay network +--- # Attach services to an overlay network @@ -156,7 +159,7 @@ $ docker network inspect my-network ``` In the example above, the container `my-web.1.63s86gf6a0ms34mvboniev7bs` for the -`my-web` service is attached to the `my-network` network on node2. +`my-web` service is attached to the `my-network` network on node1. ## Use swarm mode service discovery @@ -172,9 +175,9 @@ active tasks. You can inspect the service to view the virtual IP. For example: -```bash +```liquid $ docker service inspect \ - --format='{{json .Endpoint.VirtualIPs}}' \ + --format='{% raw %}{{json .Endpoint.VirtualIPs}}{% endraw %}' \ my-web [{"NetworkID":"7m2rjx0a97n88wzr4nu8772r3" "Addr":"10.0.0.2/24"}] @@ -184,7 +187,7 @@ The following example shows how you can add a `busybox` service on the same network as the `nginx` service and the busybox service is able to access `nginx` using the DNS name `my-web`: -1. From a manager node, deploy a busybox service to the same network as +1. From a manager node, deploy a busybox service to the same network as `my-web`: ```bash @@ -195,7 +198,7 @@ using the DNS name `my-web`: sleep 3000 ``` -2. Lookup the node where `my-busybox` is running: +2. Lookup the node where `my-busybox` is running: ```bash $ docker service ps my-busybox @@ -204,7 +207,7 @@ using the DNS name `my-web`: my-busybox.1.1dok2cmx2mln5hbqve8ilnair busybox node1 Running Running 5 seconds ago ``` -3. From the node where the busybox task is running, open an interactive shell to +3. From the node where the busybox task is running, open an interactive shell to the busybox container: ```bash @@ -214,7 +217,7 @@ the busybox container: You can deduce the container name as ``+``. Alternatively, you can run `docker ps` on the node where the task is running. -4. From inside the busybox container, query the DNS to view the VIP for the +4. From inside the busybox container, query the DNS to view the VIP for the `my-web` service: ```bash @@ -230,8 +233,8 @@ the busybox container: >**Note:** the examples here use `nslookup`, but you can use `dig` or any available DNS query tool. -5. From inside the busybox container, query the DNS using a special query - to find the IP addresses of all the containers for the +5. From inside the busybox container, query the DNS using a special query +<tasks.SERVICE-NAME> to find the IP addresses of all the containers for the `my-web` service: ```bash @@ -246,7 +249,7 @@ the busybox container: Address 3: 10.0.9.5 my-web.3.66u2hcrz0miqpc8h0y0f3v7aw.my-network ``` -6. From inside the busybox container, run `wget` to access the nginx web server +6. From inside the busybox container, run `wget` to access the nginx web server running in the `my-web` service: ```bash diff --git a/engine/swarm/swarm-mode.md b/engine/swarm/swarm-mode.md index 6f2a09cee51..deda4e180d8 100644 --- a/engine/swarm/swarm-mode.md +++ b/engine/swarm/swarm-mode.md @@ -176,5 +176,5 @@ To add a worker to this swarm, run the following command: ## Learn More * [Join nodes to a swarm](join-nodes.md) -* `swarm init`[command line reference](../reference/commandline/swarm_init.md) +* `swarm init` [command line reference](../reference/commandline/swarm_init.md) * [Swarm mode tutorial](swarm-tutorial/index.md) diff --git a/engine/swarm/swarm-tutorial/index.md b/engine/swarm/swarm-tutorial/index.md index eb807e2f9d1..a64fa424b72 100644 --- a/engine/swarm/swarm-tutorial/index.md +++ b/engine/swarm/swarm-tutorial/index.md @@ -25,7 +25,7 @@ The tutorial guides you through the following activities: This tutorial uses Docker Engine CLI commands entered on the command line of a terminal window. You should be able to install Docker on networked machines and -be comfortable running commands in the shell of your choice. +be comfortable with running commands in the shell of your choice. If you are brand new to Docker, see [About Docker Engine](../../index.md). @@ -104,7 +104,7 @@ the IP address. Because other nodes contact the manager node on its IP address, you should use a fixed IP address. -You can run `ifconfig` on Linux or Mac OS X to see a list of the +You can run `ifconfig` on Linux or macOS to see a list of the available network interfaces. If you are using Docker Machine, you can get the manager IP with either @@ -121,6 +121,9 @@ The following ports must be available. On some systems, these ports are open by * **TCP** and **UDP port 7946** for communication among nodes * **TCP** and **UDP port 4789** for overlay network traffic +If you are planning on creating an overlay network with encryption (`--opt encrypted`), +you will also need to ensure protocol 50 (ESP) is open. + ## What's next? After you have set up your environment, you are ready to [create a swarm](create-swarm.md). diff --git a/engine/swarm/swarm-tutorial/menu.md b/engine/swarm/swarm-tutorial/menu.md deleted file mode 100644 index 17bc2c882d2..00000000000 --- a/engine/swarm/swarm-tutorial/menu.md +++ /dev/null @@ -1,19 +0,0 @@ - - -# Docker Engine swarm mode getting started tutorial - -## TOC - -- [Begin the tutorial](index.md) - Setup your environment to prepare - to build a swarm. diff --git a/engine/tutorials/dockerimages.md b/engine/tutorials/dockerimages.md index 7489ef3e4dd..774bf675f17 100644 --- a/engine/tutorials/dockerimages.md +++ b/engine/tutorials/dockerimages.md @@ -188,7 +188,7 @@ you'd like to update. Inside our running container first let's update Ruby: - root@0b2616b0e5a8:/# apt-get install -y ruby2.0-dev + root@0b2616b0e5a8:/# apt-get install -y ruby2.0-dev ruby2.0 Now let's add the `json` gem. diff --git a/engine/tutorials/dockervolumes.md b/engine/tutorials/dockervolumes.md index 307e5d54d66..2e304a6529e 100644 --- a/engine/tutorials/dockervolumes.md +++ b/engine/tutorials/dockervolumes.md @@ -100,11 +100,11 @@ In addition to creating a volume using the `-v` flag you can also mount a directory from your Docker engine's host into a container. ```bash -$ docker run -d -P --name web -v /src/webapp:/opt/webapp training/webapp python app.py +$ docker run -d -P --name web -v /src/webapp:/webapp training/webapp python app.py ``` This command mounts the host directory, `/src/webapp`, into the container at -`/opt/webapp`. If the path `/opt/webapp` already exists inside the container's +`/webapp`. If the path `/webapp` already exists inside the container's image, the `/src/webapp` mount overlays but does not remove the pre-existing content. Once the mount is removed, the content is accessible again. This is consistent with the expected behavior of the `mount` command. @@ -123,9 +123,9 @@ If you supply the `/foo` value, the Docker Engine creates a bind-mount. If you s the `foo` specification, the Docker Engine creates a named volume. If you are using Docker Machine on Mac or Windows, your Docker Engine daemon has only -limited access to your OS X or Windows filesystem. Docker Machine tries to -auto-share your `/Users` (OS X) or `C:\Users` (Windows) directory. So, you can -mount files or directories on OS X using. +limited access to your macOS or Windows filesystem. Docker Machine tries to +auto-share your `/Users` (macOS) or `C:\Users` (Windows) directory. So, you can +mount files or directories on macOS using. ```bash docker run -v /Users/:/ ... @@ -153,18 +153,12 @@ Docker volumes default to mount in read-write mode, but you can also set it to be mounted read-only. ```bash -$ docker run -d -P --name web -v /src/webapp:/opt/webapp:ro training/webapp python app.py +$ docker run -d -P --name web -v /src/webapp:/webapp:ro training/webapp python app.py ``` Here you've mounted the same `/src/webapp` directory but you've added the `ro` option to specify that the mount should be read-only. -Because of [limitations in the `mount` -function](http://lists.linuxfoundation.org/pipermail/containers/2015-April/035788.html), -moving subdirectories within the host's source directory can give -access from the container to the host's file system. This requires a malicious -user with access to host and its mounted directory. - >**Note**: The host directory is, by its nature, host-dependent. For this >reason, you can't mount a host directory from `Dockerfile` because built images >should be portable. A host directory wouldn't be available on all potential @@ -187,12 +181,12 @@ the other examples. The following command creates a named volume, called `my-named-volume`, using the `flocker` volume driver, and makes it available within the container -at `/opt/webapp`: +at `/webapp`: ```bash $ docker run -d -P \ --volume-driver=flocker \ - -v my-named-volume:/opt/webapp \ + -v my-named-volume:/webapp \ --name web training/webapp python app.py ``` @@ -206,7 +200,7 @@ using the `docker volume create` command. $ docker volume create -d flocker -o size=20GB my-named-volume $ docker run -d -P \ - -v my-named-volume:/opt/webapp \ + -v my-named-volume:/webapp \ --name web training/webapp python app.py ``` diff --git a/engine/tutorials/menu.md b/engine/tutorials/menu.md deleted file mode 100644 index febc955ac90..00000000000 --- a/engine/tutorials/menu.md +++ /dev/null @@ -1,16 +0,0 @@ - - -# Learn by example diff --git a/engine/tutorials/usingdocker.md b/engine/tutorials/usingdocker.md index 91ca148f693..40a3796e314 100644 --- a/engine/tutorials/usingdocker.md +++ b/engine/tutorials/usingdocker.md @@ -172,7 +172,7 @@ see the application. Our Python application is live! > **Note:** -> If you have been using a virtual machine on OS X, Windows or Linux, +> If you have been using a virtual machine on macOS, Windows or Linux, > you'll need to get the IP of the virtual host instead of using localhost. > You can do this by running the `docker-machine ip your_vm_name` from your command line or terminal application, for example: > diff --git a/engine/understanding-docker.md b/engine/understanding-docker.md index 08efa688f84..8d90e19ffdd 100644 --- a/engine/understanding-docker.md +++ b/engine/understanding-docker.md @@ -1,18 +1,18 @@ - +--- +aliases: +- /introduction/understanding-docker/ +- /engine/userguide/basics/ +- /engine/quickstart.md +- /engine/introduction/understanding-docker/ +description: Docker explained in depth +keywords: +- docker, introduction, documentation, about, technology, understanding +menu: + main: + parent: engine_use + weight: -90 +title: Docker Overview +--- # Docker Overview Docker is an open platform for developing, shipping, and running applications. diff --git a/engine/userguide/eng-image/dockerfile_best-practices.md b/engine/userguide/eng-image/dockerfile_best-practices.md index 58f1b46d276..0dc20b6712c 100644 --- a/engine/userguide/eng-image/dockerfile_best-practices.md +++ b/engine/userguide/eng-image/dockerfile_best-practices.md @@ -88,7 +88,7 @@ During the process of building an image Docker will step through the instructions in your `Dockerfile` executing each in the order specified. As each instruction is examined Docker will look for an existing image in its cache that it can reuse, rather than creating a new (duplicate) image. -If you do not want to use the cache at all you can use the ` --no-cache=true` +If you do not want to use the cache at all you can use the `--no-cache=true` option on the `docker build` command. However, if you do let Docker use its cache then it is very important to diff --git a/engine/userguide/labels-custom-metadata.md b/engine/userguide/labels-custom-metadata.md index 63d20342910..f05fe2e496a 100644 --- a/engine/userguide/labels-custom-metadata.md +++ b/engine/userguide/labels-custom-metadata.md @@ -85,7 +85,7 @@ Labels on swarm nodes and services can be updated dynamically. - Images and containers - [Adding labels to images](../reference/builder.md#label) - - [Overriding a container's labels at runtime](../reference/commandline/run.md#set-metadata-on-container-l-label-label-file) + - [Overriding a container's labels at runtime](../reference/commandline/run.md#set-metadata-on-container--l---label---label-file) - [Inspecting labels on images or containers](../reference/commandline/inspect.md) - [Filtering images by label](../reference/commandline/inspect.md#filtering) - [Filtering containers by label](../reference/commandline/ps.md#filtering) diff --git a/engine/userguide/networking/default_network/container-communication.md b/engine/userguide/networking/default_network/container-communication.md index d49d469c389..56e272a8a80 100644 --- a/engine/userguide/networking/default_network/container-communication.md +++ b/engine/userguide/networking/default_network/container-communication.md @@ -29,6 +29,7 @@ Docker will go set `ip_forward` to `1` for you when the server starts up. If you set `--ip-forward=false` and your system's kernel has it enabled, the `--ip-forward=false` option has no effect. To check the setting on your kernel or to turn it on manually: + ``` $ sysctl net.ipv4.conf.all.forwarding diff --git a/engine/userguide/networking/default_network/dockerlinks.md b/engine/userguide/networking/default_network/dockerlinks.md index b465756fea1..ef670ea1b88 100644 --- a/engine/userguide/networking/default_network/dockerlinks.md +++ b/engine/userguide/networking/default_network/dockerlinks.md @@ -1,13 +1,16 @@ - +--- +description: Learn how to connect Docker containers together. +aliases: + - /userguide/dockerlinks/ +keywords: +- Examples, Usage, user guide, links, linking, docker, documentation, examples, names, + name, container naming, port, map, network port, network +menu: + main: + parent: smn_networking_def + weight: -2 +title: Legacy container links +--- # Legacy container links @@ -99,8 +102,7 @@ configurations. For example, if you've bound the container port to the > **Note**: > This section covers the legacy link feature in the default `bridge` network. -> Please refer to [linking containers in user-defined networks] -> (../work-with-networks.md#linking-containers-in-user-defined-networks) +> Please refer to [linking containers in user-defined networks](../work-with-networks.md#linking-containers-in-user-defined-networks) > for more information on links in user-defined networks. Network port mappings are not the only way Docker containers can connect to one diff --git a/engine/userguide/networking/default_network/ipv6.md b/engine/userguide/networking/default_network/ipv6.md index 64a1b7e55b3..4f071c7f00a 100644 --- a/engine/userguide/networking/default_network/ipv6.md +++ b/engine/userguide/networking/default_network/ipv6.md @@ -71,6 +71,7 @@ $ sysctl net.ipv6.conf.eth0.accept_ra=2 Every new container will get an IPv6 address from the defined subnet. Further a default route will be added on `eth0` in the container via the address specified by the daemon option `--default-gateway-v6` if present, otherwise via `fe80::1`: + ``` docker run -it ubuntu bash -c "ip -6 addr show dev eth0; ip -6 route show" @@ -107,9 +108,9 @@ containers. ### Using NDP proxying -If your Docker host is only part of an IPv6 subnet but has not got an IPv6 +If your Docker host is the only part of an IPv6 subnet but has not got an IPv6 subnet assigned you can use NDP proxying to connect your containers via IPv6 to -the internet. For example your host has the IPv6 address `2001:db8::c001`, is +the internet. For example your host with the IPv6 address `2001:db8::c001`, is part of the subnet `2001:db8::/64` and your IaaS provider allows you to configure the IPv6 addresses `2001:db8::c000` to `2001:db8::c00f`: diff --git a/engine/userguide/networking/get-started-macvlan.md b/engine/userguide/networking/get-started-macvlan.md index 28391c5fdae..8c32d9f3f6c 100644 --- a/engine/userguide/networking/get-started-macvlan.md +++ b/engine/userguide/networking/get-started-macvlan.md @@ -1,13 +1,9 @@ - +--- +description: Use macvlan for container networking +keywords: +- Examples, Usage, network, docker, documentation, user guide, macvlan, cluster +title: Get started with macvlan network driver +--- # Macvlan Network Driver @@ -26,7 +22,7 @@ Macvlan offers a number of unique features and plenty of room for further innova - All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. - Kernel requirements: - + - To check your current kernel version, use `uname -r` to display your kernel version - Macvlan Linux kernel v3.9–3.19 and 4.0+ @@ -36,13 +32,13 @@ Macvlan Bridge mode has a unique MAC address per container used to track MAC to - Macvlan driver networks are attached to a parent Docker host interface. Examples are a physical interface such as `eth0`, a sub-interface for 802.1q VLAN tagging like `eth0.10` (`.10` representing VLAN `10`) or even bonded host adaptors which bundle two Ethernet interfaces into a single logical interface. -- The specified gateway is external to the host provided by the network infrastructure. +- The specified gateway is external to the host provided by the network infrastructure. - Each Macvlan Bridge mode Docker network is isolated from one another and there can be only one network attached to a parent interface at a time. There is a theoretical limit of 4,094 sub-interfaces per host adaptor that a Docker network could be attached to. - Any container inside the same subnet can talk to any other container in the same network without a gateway in `macvlan bridge`. -- The same `docker network` commands apply to the vlan drivers. +- The same `docker network` commands apply to the vlan drivers. - In Macvlan mode, containers on separate networks cannot reach one another without an external process routing between the two networks/subnets. This also applies to multiple subnets within the same `docker network @@ -83,14 +79,14 @@ ping -c 4 172.16.86.10 ``` Take a look at the containers ip and routing table: - + ``` ip a show eth0 eth0@if3: mtu 1500 qdisc noqueue state UNKNOWN link/ether 46:b2:6b:26:2f:69 brd ff:ff:ff:ff:ff:ff inet 172.16.86.2/24 scope global eth0 - + ip route default via 172.16.86.1 dev eth0 172.16.86.0/24 dev eth0 src 172.16.86.2 @@ -176,7 +172,7 @@ docker run --net=macvlan50 -it --name macvlan_test6 --rm alpine /bin/sh In the second network, tagged and isolated by the Docker host, `eth0.60` is the parent interface tagged with vlan id `60` specified with `-o parent=eth0.60`. The `macvlan_mode=` defaults to `macvlan_mode=bridge`. It can also be explicitly set with the same result as shown in the next example. ``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. +# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. docker network create -d macvlan \ --subnet=192.168.60.0/24 \ --gateway=192.168.60.1 \ diff --git a/engine/userguide/networking/index.md b/engine/userguide/networking/index.md index c519a2d633f..b1e6e4fe4a6 100644 --- a/engine/userguide/networking/index.md +++ b/engine/userguide/networking/index.md @@ -217,7 +217,7 @@ lo Link encap:Local Loopback RX bytes:0 (0.0 B) TX bytes:0 (0.0 B) ``` -Then use `ping`to send three ICMP requests and test the connectivity of the +Then use `ping` to send three ICMP requests and test the connectivity of the containers on this `bridge` network. ``` @@ -470,6 +470,9 @@ You should open the following ports between each of your hosts. Your key-value store service may require additional ports. Check your vendor's documentation and open any required ports. +If you are planning on creating an overlay network with encryption (`--opt encrypted`), +you will also need to ensure protocol 50 (ESP) is open. + Once you have several machines provisioned, you can use Docker Swarm to quickly form them into a swarm which includes a discovery service as well. @@ -551,8 +554,7 @@ server is unable to resolve the request it will be forwarded to any external DNS servers configured for the container. To facilitate this when the container is created, only the embedded DNS server reachable at `127.0.0.11` will be listed in the container's `resolv.conf` file. More information on embedded DNS server on -user-defined networks can be found in the [embedded DNS server in user-defined networks] -(configure-dns.md) +user-defined networks can be found in the [embedded DNS server in user-defined networks](configure-dns.md) ## Links diff --git a/engine/userguide/networking/menu.md b/engine/userguide/networking/menu.md deleted file mode 100644 index f168b70afba..00000000000 --- a/engine/userguide/networking/menu.md +++ /dev/null @@ -1,22 +0,0 @@ - - -# Docker networks feature overview - -This sections explains how to use the Docker networks feature. This feature allows users to define their own networks and connect containers to them. Using this feature you can create a network on a single host or a network that spans across multiple hosts. - -- [Understand Docker container networks](index.md) -- [Work with network commands](work-with-networks.md) -- [Get started with multi-host networking](get-started-overlay.md) - -If you are already familiar with Docker's default bridge network, `docker0` that network continues to be supported. It is created automatically in every installation. The default bridge network is also named `bridge`. To see a list of topics related to that network, read the articles listed in the [Docker default bridge network](default_network/index.md). diff --git a/engine/userguide/storagedriver/aufs-driver.md b/engine/userguide/storagedriver/aufs-driver.md index 229cc57926c..5dac0dd0777 100644 --- a/engine/userguide/storagedriver/aufs-driver.md +++ b/engine/userguide/storagedriver/aufs-driver.md @@ -191,7 +191,7 @@ also in this directory. A container's thin writable layer is stored in a directory under `/var/lib/docker/aufs/diff/`. With Docker 1.10 and higher, container IDs no longer correspond to directory names. However, the containers thin writable -layer still exists under here and is stacked by AUFS as the top writable layer +layer still exists here and is stacked by AUFS as the top writable layer and is where all changes to the container are stored. The directory exists even if the container is stopped. This means that restarting a container will not lose changes made to it. Once a container is deleted, it's thin writable layer @@ -211,7 +211,7 @@ containers uses the systems page cache very efficiently. - The AUFS storage driver can introduce significant latencies into container write performance. This is because the first time a container writes to any -file, the file has be located and copied into the containers top writable +file, the file has to be located and copied into the containers top writable layer. These latencies increase and are compounded when these files exist below many image layers and the files themselves are large. diff --git a/engine/userguide/storagedriver/btrfs-driver.md b/engine/userguide/storagedriver/btrfs-driver.md index dd5da2a229e..3218412349c 100644 --- a/engine/userguide/storagedriver/btrfs-driver.md +++ b/engine/userguide/storagedriver/btrfs-driver.md @@ -167,6 +167,8 @@ loaded. To verify this, use the following command: $ cat /proc/filesystems | grep btrfs + btrfs + ### Configure Btrfs on Ubuntu 14.04 LTS Assuming your system meets the prerequisites, do the following: @@ -277,7 +279,7 @@ density container use cases. - **Small writes**. Containers performing lots of small writes (including Docker hosts that start and stop many containers) can lead to poor use of Btrfs chunks. This can ultimately lead to out-of-space conditions on your Docker -host and stop it working. This is currently a major drawback to using current +host and stop working. This is currently a major drawback to use current versions of Btrfs. If you use the `btrfs` storage driver, closely monitor the free space on diff --git a/engine/userguide/storagedriver/device-mapper-driver.md b/engine/userguide/storagedriver/device-mapper-driver.md index ef445023cad..99b35e04f4a 100644 --- a/engine/userguide/storagedriver/device-mapper-driver.md +++ b/engine/userguide/storagedriver/device-mapper-driver.md @@ -1,12 +1,12 @@ - +--- +description: Learn how to optimize your use of device mapper driver. +keywords: +- container, storage, driver, device mapper +menu: + main: + parent: engine_driver +title: Device mapper storage in practice +--- # Docker and the Device Mapper storage driver @@ -302,34 +302,42 @@ assumes that the Docker daemon is in the `stopped` state. $ lvs -o+seg_monitor ``` -13. If the Docker daemon was previously started, clear your graph driver directory. +13. If the Docker daemon was previously started, move your existing graph driver + directory out of the way. - Clearing your graph driver removes any images, containers, and volumes in your - Docker installation. + Moving the graph driver removes any images, containers, and volumes in your + Docker installation. These commands move the contents of the + `/var/lib/docker` directory to a new directory named `/var/lib/docker.bk`. + If any of the following steps fail and you need to restore, you can remove + `/var/lib/docker` and replace it with `/var/lib/docker.bk`. - ```bash - $ rm -rf /var/lib/docker/* - ``` + ```bash + $ mkdir /var/lib/docker.bk + $ mv /var/lib/docker/* /var/lib/docker.bk + ``` 14. Configure the Docker daemon with specific devicemapper options. - There are two ways to do this. You can set options on the command line if you start the daemon there: + Now that your storage is configured, configure the Docker daemon to use it. There are two ways to do this. You can set options on the command line if you start the daemon there: - ```bash - --storage-driver=devicemapper --storage-opt=dm.thinpooldev=/dev/mapper/docker-thinpool --storage-opt dm.use_deferred_removal=true - ``` + ```bash + --storage-driver=devicemapper --storage-opt=dm.thinpooldev=/dev/mapper/docker-thinpool --storage-opt=dm.use_deferred_removal=true --storage-opt=dm.use_deferred_deletion=true + ``` You can also set them for startup in the `daemon.json` configuration, for example: - ```json - { - "storage-driver": "devicemapper", - "storage-opts": [ - "dm.thinpooldev=/dev/mapper/docker-thinpool", - "dm.use_deferred_removal=true" - ] - } - ``` + ```json + { + "storage-driver": "devicemapper", + "storage-opts": [ + "dm.thinpooldev=/dev/mapper/docker-thinpool", + "dm.use_deferred_removal=true", + "dm.use_deferred_deletion=true" + ] + } + ``` + + >**Note**: Always set both `dm.use_deferred_removal=true` and `dm.use_deferred_deletion=true` to prevent unintentionally leaking mount points. 15. If using systemd and modifying the daemon configuration via unit or drop-in file, reload systemd to scan for changes. @@ -355,11 +363,18 @@ view the logs use: $ journalctl -fu dm-event.service ``` +After you have verified that the configuration is correct, you can remove the +`/var/lib/docker.bk` directory which contains the previous configuration. + +```bash +$ rm -rf /var/lib/docker.bk +``` + If you run into repeated problems with thin pool, you can use the `dm.min_free_space` option to tune the Engine behavior. This value ensures that operations fail with a warning when the free space is at or near the minimum. For information, see the storage driver options in the Engine daemon reference. diff --git a/engine/userguide/storagedriver/selectadriver.md b/engine/userguide/storagedriver/selectadriver.md index 0677f5d7a2a..707636eb90d 100644 --- a/engine/userguide/storagedriver/selectadriver.md +++ b/engine/userguide/storagedriver/selectadriver.md @@ -12,7 +12,7 @@ weight = -1 # Select a storage driver This page describes Docker's storage driver feature. It lists the storage -driver's that Docker supports and the basic commands associated with managing +drivers that Docker supports and the basic commands associated with managing them. Finally, this page provides guidance on choosing a storage driver. The material on this page is intended for readers who already have an diff --git a/engine/userguide/storagedriver/zfs-driver.md b/engine/userguide/storagedriver/zfs-driver.md index e5b6dbcd8ec..f874d90b486 100644 --- a/engine/userguide/storagedriver/zfs-driver.md +++ b/engine/userguide/storagedriver/zfs-driver.md @@ -95,7 +95,7 @@ This process of creating child layers and containers from *read-only* snapshots Container reads with the `zfs` storage driver are very simple. A newly launched container is based on a ZFS clone. This clone initially shares all of its data with the dataset it was created from. This means that read operations with the - `zfs` storage driver are fast – even if the data being read was note + `zfs` storage driver are fast – even if the data being read was not copied into the container yet. This sharing of data blocks is shown in the diagram below. diff --git a/getting-started/index.md b/getting-started/index.md index 33fc11e7b5e..7ffc4985790 100644 --- a/getting-started/index.md +++ b/getting-started/index.md @@ -10,24 +10,19 @@ title: "Getting Started, Part 1: Orientation and Setup" This tutorial will create a simple application that runs in a cluster, so you get a sense of how to build distributed applications with the Docker platform. +We will achieve this in the following steps: -1. In part one, which you're reading now, we get set up and oriented. -2. In part two, we create a "Hello World" application that identifies itself. -3. In part three, we hook up a visitor counter. -4. In part four, we show how to scale this "Hello World" app as if it were very - high traffic, by setting up a cluster. -5. In part five, we show how to manage our cluster with a graphical user - interface tool, and roll out code updates. +1. Get set up and oriented, on this page. +2. [Create a "Hello World" application that identifies its environment](part2.md) +3. [Hook up a visitor counter](part3.md) +4. [Scale our app as if it were very high traffic, by setting up a cluster in + production](part4.md) The application itself is very simple so that you are not too distracted by what the code is doing. After all, the value of Docker is in how it can build, ship, and run applications; it's totally agnostic as to what your application actually does. -By the end of this tutorial, you should have a good sense of how the entire -platform works, from setting up your dev environment, running and testing your -code, and finally, building, deploying, and managing your application. - ## Setup Before we get started, make sure your system has the latest version of Docker @@ -35,6 +30,11 @@ installed. [Install Docker](/engine/installation/index.md){: class="button darkblue-btn"} +> Note: If you're in Linux, you'll want to install + [Docker Toolbox](../toolbox/index.md) so you get Docker Compose. + +## Let's go! + If you understand that container images package application code and their dependencies all together in a portable deliverable, and your environment has Docker installed, let's move on! diff --git a/getting-started/part2.md b/getting-started/part2.md index 3380178a91d..f8ef7825d41 100644 --- a/getting-started/part2.md +++ b/getting-started/part2.md @@ -13,26 +13,28 @@ In this section, you will write, build, run, and share an app, the Docker way. ## Your development environment -Normally if you were to start writing a Python app on your laptop, your first -order of business would be to install a Python runtime onto your machine. But, -that creates a situation where the environment on your machine has to be just so -in order for your app to run as expected. +In the past, if you were to start writing a Python app, your first +order of business was to install a Python runtime onto your machine. But, +that creates a situation where the environment on your machine has to be just +so in order for your app to run as expected; ditto for the server that runs +your app. -In Docker, you can just grab an image of Python runtime that is already set up, -and use that as a base for creating your app. Then, your build can include the -base Python image right alongside your app code, ensuring that your app and the -runtime it needs to run all travel together. +With Docker, you can just grab a portable Python runtime as an image, no +installation necessary. Then, your build can include the base Python image +right alongside your app code, ensuring that your app, its dependencies, and the +runtime, all travel together. -It's done with something called a Dockerfile. +These builds are configured with something called a `Dockerfile`. ## Your first Dockerfile -Create a folder and put this file in it, with the name `Dockerfile` (no -extension). This Dockerfile defines what goes on in the environment inside your -container. Things are virtualized inside this environment, which is isolated -from the rest of your system, so you have to map ports to the outside world, and +Create an empty directory and put this file in it, with the name `Dockerfile`. +`Dockerfile` will define what goes on in the environment inside your +container. Access to resources like networking interfaces and disk drives is +virtualized inside this environment, which is isolated from the rest of your +system, so you have to map ports to the outside world, and be specific about what files you want to "copy in" to that environment. However, -after doing that, you can expect that the build of your app with this +after doing that, you can expect that the build of your app defined in this `Dockerfile` will behave exactly the same wherever it runs. {% gist johndmulhausen/c31813e076827178216b74e6a6f4a087 %} @@ -41,34 +43,30 @@ This `Dockerfile` refers to a couple of things we haven't created yet, namely `app.py` and `requirements.txt`. We'll get there. But here's what this `Dockerfile` is saying: -- Go get the base Python 2.7 runtime +- Download the official image of the Python 2.7 runtime and include it here. - Create `/app` and set it as the current working directory inside the container -- Copy the contents of my current directory (on my machine) into `/app` (in this container image) -- Install any Python packages that I list inside what is now `/app/requirements.txt` inside the container -- Ensure that this container has port 80 open when it runs +- Copy the contents of the current directory on my machine into `/app` inside the container +- Install any Python packages that I list inside `requirements.txt` +- Ensure that port 80 is exposed to the world outside this container - Set an environment variable within this container named `NAME` to be the string `World` -- Finally, when the container runs, execute `python` and pass in what is now `/app/app.py` - -This paradigm is how developing with Docker essentially works. Make a -`Dockerfile` that includes the base image, grabs your code, installs -dependencies, initializes variables, and runs the command. +- Finally, execute `python` and pass in `app.py` as the "entry point" command, + the default command that is executed at runtime. ### The app itself -Grab these two files that were referred to in the above `Dockerfile` and place -them together with `Dockerfile`, all in the same folder. +Grab these two files and place them in the same folder as `Dockerfile`. {% gist johndmulhausen/074cc7f4c26a9a8f9164b20b22602ad7 %} {% gist johndmulhausen/8728902faede400c057f3205392bb9a8 %} -You're probably getting the picture by now. In `Dockerfile` we told the `pip` -package installer to install whatever was in `requirements.txt`, which we -now see is the Flask and Redis libraries for Python. The app itself is going to -print the environment variable of `NAME`, which we set as `World`, as well as +Now we see that the `Dockerfile` command `pip install requirements.txt` installs +the Flask and Redis libraries for Python. We can also see that app itself +prints the environment variable of `NAME`, which we set as `World`, as well as the output of a call to `socket.gethostname()`, which the Docker runtime is -going to answer with the container ID. Finally, because Redis isn't running -(we've only installed the Python library), we should expect that the attempt to -use it here will fail and show the error message. +going to answer with the container ID, which is sort of like the process ID for +an executable. Finally, because Redis isn't running +(as we've only installed the Python library, and not Redis itself), we should +expect that the attempt to use it here will fail and produce the error message. ## Build the App @@ -77,8 +75,7 @@ That's it! You don't need to have installed Python or anything in your system. It doesn't seem like you've really set up an environment with Python and Flask, but you have. Let's build and run your app and prove it. -Make sure you're in the directory where you saved the three files we've shown, -and you've got everything. +7Here's what `ls` should show: ```shell $ ls @@ -86,15 +83,13 @@ Dockerfile app.py requirements.txt ``` Now run the build command. This creates a Docker image, which we're going to -tag using `-t` so it has a friendly name, which you can use interchangeable -with the image ID in commands. +tag using `-t` so it has a friendly name. ```shell -docker build -t "friendlyhello" . +docker build -t friendlyhello . ``` -In the output spew you can see everything defined in the `Dockerfile` happening, -including the installation of the packages we specified in `requirements.txt`. +In the output spew you can see everything defined in the `Dockerfile` happening. Where is your built image? It's in your machine's local Docker image registry. Check it out: @@ -106,22 +101,25 @@ friendlyhello latest 326387cea398 47 seconds ago ## Run the app -We're going to run the app and route traffic from our machine's port 80 to the -port 80 we exposed +Run the app, mapping our machine's port 4000 to the container's exposed port 80 +using `-p`: ```shell -docker run -p 80:80 friendlyhello +docker run -p 4000:80 friendlyhello ``` You should see a notice that Python is serving your app at `http://0.0.0.0:80`. -You can go there, or just to `http://localhost`, and see your app, "Hello World" -text, the container ID, and the Redis error message, all printed out in -beautiful Times New Roman. +But that message coming from inside the container, which doesn't know you +actually want to access your app at: `http://localhost:4000`. Go there, and +you'll see the "Hello World" text, the container ID, and the Redis error +message, all printed out in beautiful Times New Roman. + +Hit `CTRL+C` in your terminal to quit. -Hit `CTRL+C` and let's run the app in the background, in detached mode. +Now let's run the app in the background, in detached mode: ```shell -docker run -d -p 80:80 friendlyhello +docker run -d -p 4000:80 friendlyhello ``` You get a hash ID of the container instance and then are kicked back to your @@ -133,25 +131,22 @@ CONTAINER ID IMAGE COMMAND CREATED 1fa4ab2cf395 friendlyhello "python app.py" 28 seconds ago Up 25 seconds ``` -You'll see that `CONTAINER ID` matches what's on `http://localhost`, if you -refresh the browser page. You can't `CTRL+C` now, so let's kill the process this -way. Use the value you see under `CONTAINER ID`: +You'll see that `CONTAINER ID` matches what's on `http://localhost:4000`, if you +refresh the browser page. Now use `docker stop` to end the process, using +`CONTAINER ID`, like so: ```shell -docker kill (containerID) +docker stop 1fa4ab2cf395 ``` -## Share the App - -Now let's test how portable this app really is. +## Share your image -Sign up for Docker Hub at [https://hub.docker.com/](https://hub.docker.com/). +Sign up a Docker account at [hub.docker.com](https://hub.docker.com/). Make note of your username. We're going to use it in a couple commands. Docker Hub is a public registry. A registry is a collection of accounts and -their various repositories. A repository is a collection of assets associated -with your account - like a GitHub repository, except the code is already built. - +their various repositories. A repository is a collection of tagged images like a +GitHub repository, except the code is already built. Log in your local machine to Docker Hub. @@ -187,9 +182,40 @@ and run this command: docker run YOURUSERNAME/YOURREPO:ARBITRARYTAG ``` +> Note: If you don't specify the `:ARBITRARYTAG` portion of these commands, + the tag of `:latest` will be assumed, both when you build and when you run + images. + You'll see this stranger of a machine pull your image, along with Python and all the dependencies from `requirements.txt`, and run your code. It all travels together in a neat little package, and the new machine didn't have to install anything but Docker to run it. +## Recap and cheat sheet for images and containers + +To recap: After calling `docker run`, you created and ran a container, based on +the image created when you called `docker build`. Images are defined in a +`Dockerfile`. A container is an instance of an image, and it has any package +installations, file writes, etc that happen after you call `docker run` and run +the app. And lastly, images are shared via a registry. + +```shell +docker build -t friendlyname . #Create image using this directory's Dockerfile +docker run -p 4000:80 friendlyname #Run image "friendlyname" mapping port 4000 to 80 +docker run -d -p 4000:80 friendlyname #Same thing, but in detached mode +docker ps #See a list of all running containers +docker stop #Gracefully stop the specified container +docker ps -a #See a list of all containers on this machine, even the ones not running +docker kill #Force shutdown of the specified container +docker rm #Remove the specified container from this machine +docker rm $(docker ps -a -q) #Remove all containers from this machine +docker images -a #Show all images that have been built or downloaded onto this machine +docker rmi #Remove the specified image from this machine +docker rmi $(docker images -q) #Remove all images from this machine +docker login #Log in this CLI session using your Docker credentials (to Docker Hub by default) +docker tag username/repository:tag #Tag on your local machine for upload +docker push username/repository:tag #Upload tagged image to registry (Docker Hub by default) +docker run username/repository:tag #Run image from a registry (Docker Hub by default) +``` + [On to "Getting Started, Part 3: Stateful, Multi-container Applications" >>](part3.md){: class="button darkblue-btn"} diff --git a/getting-started/part3.md b/getting-started/part3.md index f9a51c30937..de424fe44c6 100644 --- a/getting-started/part3.md +++ b/getting-started/part3.md @@ -9,8 +9,8 @@ wrote, built, ran, and shared our first Dockerized app, which all fit in a single container. In part 3, we will expand this application so that it is comprised of two -containers simultaneously: one running the web app we have already written, and -another that stores data on the web app's behalf. +containers running simultaneously: one running the web app we have already +written, and another that stores data on the web app's behalf. ## Understanding services @@ -28,24 +28,23 @@ and that's going to happen via a different executable entirely. In a distributed application, these different pieces of the app are called "services." For example, if you imagine a video sharing site, there will -probably be a service for storing application data in a database, another one +probably be a service for storing application data in a database, a service for video transcoding in the background after a user uploads something, a -service for streaming, and so on, and they all need to work in concert. +service for the front-end, and so on, and they all need to work in concert. -The easiest way to introduce the organization of your app into services using -containers is by using Docker Compose. We're going to add a data storage service +The easiest way to organize your containerized app into services using +is using Docker Compose. We're going to add a data storage service to our simple Hello World app. Don't worry, it's shockingly easy. ## Your first `docker-compose.yml` File -As you saw, a `Dockerfile` is a text file that defines a single Docker image. -But a `docker-compose.yml` file is a YAML markup file that is hierarchical in +A `docker-compose.yml` file is a YAML markup file that is hierarchical in structure, and defines how multiple Docker images should work together when they are running in containers. We saw that the "Hello World" app we created looked for a running instance of Redis, and if it failed, it produced an error message. All we need is a running -Redis instance, and that error message will be replaced with a visitor counter. +Redis instance, and that error message will be replaced with a visitor counter. Well, just as we grabbed the base image of Python earlier, we can grab the official image of Redis, and run that right alongside our app. @@ -63,14 +62,14 @@ image](https://store.docker.com/images/1f6ef28b-3e48-4da1-b838-5bd8710a2053)). This `docker-compose.yml` file tells Docker to do the following: -- Pull and run [the image we uploaded in step 2](/getting-started/part2/#/share-the-app) as a service called `web` -- Map port 80 on the host to the container's port 80 (so http://localhost:80 resolves properly) -- Link this container to the service we named `redis`; this ensures that the - dependency between `redis` and `web` is expressed, as well as the order of service - startup. -- Pull and run the official Redis image as a service called `redis` +- Pull and run [the image we uploaded to Docker Hub in step 2](/getting-started/part2/#/share-the-app) as a service called `web` +- Map port 4000 on the host to `web`'s port 80 +- Link the `web` service to the service we named `redis`; this ensures that the + dependency between `redis` and `web` is expressed, and these containers will + run together in the same subnet. +- Our service named `redis` just runs the official Redis image, so go get it from Docker Hub. -## Run your first multi-container app +## Run and scale up your first multi-container app Run this command in the directory where you saved `docker-compose.yml`: @@ -79,52 +78,63 @@ docker-compose up ``` This will pull all the necessary images and run them in concert. Now when you -visit `http://localhost`, you'll see a number next to the visitor counter +visit `http://localhost:4000`, you'll see a number next to the visitor counter instead of the error message. It really works -- just keep hitting refresh. -## Connecting to this instance of Redis +## Connecting to containers with port mapping With a containerized instance of Redis running, you're probably wondering -- how do I break through the wall of isolation and manage my data? The answer is, port mapping. [The page for the official Redis image](https://store.docker.com/images/1f6ef28b-3e48-4da1-b838-5bd8710a2053) -states that the normal management ports are open in their image, so you should +states that the normal management ports are open in their image, so you would be able to connect to it at `localhost:6379` if you add a `ports:` section to `docker-compose.yml` under `redis` that maps `6379` to your host, just as port -`80` is mapped for `web`. Same with MySQL or any other data solution; -containerized doesn't mean unreachable, it just means portable. Once you map -your ports, you can use your fave UI tools like MySQL Workbench, Redis Desktop -Manager, etc, to connect to your Dockerized instance. +`80` is mapped for `web`. Same with MySQL or any other data solution; once you +map your ports, you can use your fave UI tools like MySQL Workbench, Redis +Desktop Manager, etc, to connect to your Dockerized instance. Redis port mapping isn't necessary in `docker-compose.yml` because the two -services (`web` and `redis`) are linked, ensuring they run on the same host (VM -or physical machine). Within that host, the containers can talk to each other -in a private subnet that is automatically created by the Docker runtime, which -isn't accessible to the outside world, only to other containers. In our app, -we specify `EXPOSE 80` in our Dockerfile, and as you can see in the Redis -documentation, they specify `EXPOSE 6379` in the Dockerfile that defines the -official Redis image. But those ports aren't accessible outside of the private -subnet (or, in turn, reachable at `http://localhost`) until you map the host's -port 80 to the container's port 80, which is why we specified as much in -`docker run` previously, and `docker-compose.yml` just now. +services (`web` and `redis`) are linked, ensuring they run on the same host (VM +or physical machine), in a private subnet that is automatically created by the +Docker runtime. Containers within +that subnet can already talk to each other; it's connecting from the outside +that necessitates port mapping. + +## Cheat sheet and recap: Hosts, subnets, and Docker Compose + +You learned that by creating a `docker-compose.yml` file, you can define the +entire stack for your application. This ensures that your services run +together in a private subnet that lets them connect to each +other, but only to the world as specifically dircted. This means that if you +want to connect your favorite data management software to your data storage +service, you'll have to ensure the container has the proper port exposed and +your host has that port mapped to the container in `docker-compose.yml`. + +```shell +docker-compose up #Pull and run images specified in `docker-compose.yml` as services +docker-compose up -d #Same thing, but in background mode +docker-compose stop #Stop all running containers for this app +docker-compose rm -f #Remove all containers for this app +``` ## Get ready to scale Until now, I've been able to shield you from worrying too much about host management. That's because installing Docker always sets up a default way -to run containers in a single-host environment. Docker for Windows and Mac +to run containers on that machine. Docker for Windows and Mac comes with a virtual machine host running a lighweight operating system -we call Moby, which is just a very slimmed-down Linux. Docker for Linux -just works without a VM at all. And Docker for Windows can run Microsoft +we call Moby, which is just a very slimmed-down Linux. Docker for Linux +just works without a VM at all. And Docker for Windows can even run Microsoft Windows containers using native Hyper-V support. When you've run `docker -run` and `docker-compose up` so far, Docker has used these default hosts +run` and `docker-compose up` so far, Docker has used these solutions to run your containers. That's because we want you to be able to install Docker and get straight to the work of development and building images. But when it comes to getting your app into production, we all know that you're not going to run just one host machine that has Redis, Python, and all your other sevices. That won't scale. You need to learn how to run not -just multiple containers on one host, but multiple containers on multiple -hosts. And that's precisely what we're going to get into next. +just multiple containers on your local host, but multiple containers on +multiple hosts. And that's precisely what we're going to get into next. [On to "Part 4: Running our App in Production" >>](part4.md){: class="button darkblue-btn"} diff --git a/getting-started/part4.md b/getting-started/part4.md index 465a7adf8fc..3ea2c04846f 100644 --- a/getting-started/part4.md +++ b/getting-started/part4.md @@ -87,7 +87,7 @@ So, let's do that now. ### Configure Docker Cloud to manage to your AWS instances - Go to [cloud.docker.com](http://cloud.docker.com) and sign in with the - same Docker ID you used in [step 2](/getting-started/step2.md). + same Docker ID you used in [part 2](/getting-started/part2.md). - Click **Settings**, and in the Cloud Providers section, click the plug icon. - Enter the Role ARN string you copied earlier, e.g. `arn:aws:iam::123456789123:role/dockercloud-role`. - Click **Save**. @@ -98,15 +98,18 @@ into a swarm. ## Creating your first Swarm cluster 1. Go back to Docker Cloud by visiting [cloud.docker.com](https://cloud.docker.com). -2. Click **Node Clusters** in the left-navigation, then click the **Create** button. +2. Click **Node Clusters** in the left navigation, then click the **Create** button. This pulls up a form where you can create our cluster. 3. Leave everything default, except: - - Name: Give your cluster a name. - - Region: Select a region that's close to you. + - Name: Give your cluster a name + - Region: Select a region that's close to you - Provider: Set to "Amazon Web Services" - - Type/Size: -4. Launch the cluster by clicking **Launch node cluster**, and -5. + - Type/Size: Select the `t2.nano` option as that is free-tier +4. Launch the cluster by clicking **Launch node cluster**; this will spin + up a free-tier Amazon instance. +5. Now, click **Services** in the left navigation, then the **Create** button, + then the **globe icon**. +6. Search Docker Hub for the image you uploaded [On to next >>](part5.md){: class="button darkblue-btn"} diff --git a/googlecbe7fee896be512c.html b/googlecbe7fee896be512c.html new file mode 100644 index 00000000000..2791770031d --- /dev/null +++ b/googlecbe7fee896be512c.html @@ -0,0 +1 @@ +google-site-verification: googlecbe7fee896be512c.html \ No newline at end of file diff --git a/images/search-icon.png b/images/search-icon.png new file mode 100644 index 00000000000..0b076e1e8a9 Binary files /dev/null and b/images/search-icon.png differ diff --git a/index.md b/index.md index 0af9d45323d..0bd824bcc6c 100644 --- a/index.md +++ b/index.md @@ -23,7 +23,7 @@ layout: docs

Docker for Mac

-

A native application using the OS X sandbox security model which delivers all Docker tools to your Mac.

+

A native application using the macOS sandbox security model which delivers all Docker tools to your Mac.

@@ -125,7 +125,7 @@ layout: docs

Docker Machine

Automate container provisioning on your network or in - the cloud. Available for Windows, Mac OS X, or Linux.

+ the cloud. Available for Windows, macOS, or Linux.

diff --git a/js/menu.js b/js/menu.js index 163fd827eab..b2e186df72a 100644 --- a/js/menu.js +++ b/js/menu.js @@ -54,10 +54,9 @@ jQuery(document).ready(function(){ var index = 0; var currentHeader = 0, lastHeader = 0; - var output = ""; - $("h2, h3, h4").each(function() { + var output = "
    "; + $("h1, h2, h3, h4").each(function() { var li= "
  • " + $(this).text().replace("¶","") + "
  • "; - lastHeader = currentHeader; if( $(this).is("h2") ){ // h2 currentHeader = 2; @@ -68,18 +67,21 @@ jQuery(document).ready(function(){ // h4 currentHeader = 4; } - if (currentHeader > lastHeader) - { + console.log("currentHeader ",currentHeader, "lastHeader ",lastHeader, "text ", $(this).text()); + if (currentHeader > lastHeader) { // nest further - output += "
      " + li; - } else if (lastHeader < currentHeader) - { + output += "
        " + } + if (currentHeader < lastHeader && lastHeader > 0) { // close nesting - output += "
      " + li - } else { - // continue, no change in nesting - output += li; + console.log("Closing nesting because ", lastHeader, "is <", currentHeader); + for (i=0; i < (lastHeader - currentHeader); i++) + { + output += "
    " + } } + output += li; + lastHeader = currentHeader; /* if( $(this).is("h2") ){ prevH2List = $("
      "); diff --git a/kitematic/faq.md b/kitematic/faq.md index c2c84b7ac92..248d1cfff3f 100644 --- a/kitematic/faq.md +++ b/kitematic/faq.md @@ -29,7 +29,7 @@ the Docker Remote API. ### Which platforms does Kitematic support? -Right now Kitematic works on Mac OS X and Windows. Linux is planned in the +Right now Kitematic works on macOS and Windows. Linux is planned in the future. Review our product roadmap. diff --git a/kitematic/index.md b/kitematic/index.md index c592cc17552..a2972a17fce 100644 --- a/kitematic/index.md +++ b/kitematic/index.md @@ -11,8 +11,8 @@ menu: title: Kitematic --- -# Kitematic +# Kitematic -Kitematic, the Docker GUI, runs on Mac OS X and Windows operating systems. Beginning with the 1.8 Docker release, you use the Docker Toolbox to install Kitematic. See the [Mac OS X installation guide](https://docs.docker.com/installation/mac) or the [Windows installation guide](https://docs.docker.com/installation/windows) for details on installing with Docker Toolbox. +Kitematic, the Docker GUI, runs on macOS and Windows operating systems. Beginning with the 1.8 Docker release, you use the Docker Toolbox to install Kitematic. See the [macOS installation guide](/docker-for-mac/) or the [Windows installation guide](/docker-for-windows/) for details on installing with Docker Toolbox. For information about using Kitematic, take a look at the [User Guide](userguide.md). diff --git a/kitematic/rethinkdb-dev-database.md b/kitematic/rethinkdb-dev-database.md index 79bd15d990e..ff8dad7c75f 100644 --- a/kitematic/rethinkdb-dev-database.md +++ b/kitematic/rethinkdb-dev-database.md @@ -44,7 +44,7 @@ for you). This means you can now reach RethinkDB via a client driver at ### (Advanced) Saving Data into RethinkDB with a local Node.js App Now, you'll create the RethinkDB example chat application running on your local -OS X system to test drive your new containerized database. +macOS system to test drive your new containerized database. First, if you don't have it yet, [download and install Node.js](http://nodejs.org/). diff --git a/kitematic/userguide.md b/kitematic/userguide.md index 3f417552226..32577cd93c5 100644 --- a/kitematic/userguide.md +++ b/kitematic/userguide.md @@ -162,7 +162,7 @@ Kitematic will prompt you to confirm that you want to delete. To see the complete list of exposed ports, go to "Settings" then "Ports". This page lists all the container ports exposed, and the IP address and host-only -network port that you can access use to access that container from your OS X +network port that you can access use to access that container from your macOS system. ## Docker Command-line Access diff --git a/machine/DRIVER_SPEC.md b/machine/DRIVER_SPEC.md index 39f26a37f38..741adebf7ed 100644 --- a/machine/DRIVER_SPEC.md +++ b/machine/DRIVER_SPEC.md @@ -1,6 +1,6 @@ --- description: machine -draft: true +published: false keywords: - machine, orchestration, install, installation, docker, documentation menu: diff --git a/machine/RELEASE.md b/machine/RELEASE.md index ca430010174..8e9bd80c482 100644 --- a/machine/RELEASE.md +++ b/machine/RELEASE.md @@ -1,5 +1,5 @@ --- -draft: true +published: false --- # Docker Machine Release Process diff --git a/machine/drivers/aws.md b/machine/drivers/aws.md index c02d7144d9b..db3dfd8474a 100644 --- a/machine/drivers/aws.md +++ b/machine/drivers/aws.md @@ -72,7 +72,6 @@ You can use environment variables: - `--amazonec2-use-ebs-optimized-instance`: Create an EBS Optimized Instance, instance type must support it. - `--amazonec2-ssh-keypath`: Path to Private Key file to use for instance. Matching public key with .pub extension should exist - `--amazonec2-retries`: Set retry count for recoverable failures (use -1 to disable) -- `--amazonec2-userdata`: Path to custom User Data file. #### Environment variables and default values: @@ -103,7 +102,6 @@ You can use environment variables: | `--amazonec2-use-ebs-optimized-instance` | - | `false` | | `--amazonec2-ssh-keypath` | `AWS_SSH_KEYPATH` | - | | `--amazonec2-retries` | - | `5` | -| `--amazonec2-user-data` | `AWS_USERDATA` | - | ## Default AMIs diff --git a/machine/drivers/hyper-v.md b/machine/drivers/hyper-v.md index d11535b61bc..38262f93947 100644 --- a/machine/drivers/hyper-v.md +++ b/machine/drivers/hyper-v.md @@ -129,7 +129,7 @@ you can create these swarm nodes: `manager1`, `worker1`, `worker2`. PS C:\WINDOWS\system32> ``` -* Use the same process, driver and network switch to create the other nodes. +* Use the same process, driver and network switch to create the other nodes. For our example, the commands will look like this: diff --git a/machine/examples/ocean.md b/machine/examples/ocean.md index b204ab1c189..15e1e6fa156 100644 --- a/machine/examples/ocean.md +++ b/machine/examples/ocean.md @@ -21,45 +21,48 @@ If you have not done so already, go to Mac OS X +the macOS installation instructions or Windows +href="https://docs.docker.com/docker-for-windows/" target="_blank">Windows installation instructions. If you want only Docker Machine, you can install the Machine binaries directly by following the instructions in the next section. You can find the latest versions of the binaries are on the docker/machine release page on GitHub. @@ -27,15 +27,15 @@ If you want only Docker Machine, you can install the Machine binaries directly b 2. Download the Docker Machine binary and extract it to your PATH. - If you are running OS X or Linux: + If you are running macOS or Linux: - $ curl -L https://github.com/docker/machine/releases/download/v0.7.0/docker-machine-`uname -s`-`uname -m` > /usr/local/bin/docker-machine && \ + $ curl -L https://github.com/docker/machine/releases/download/v0.8.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine && \ chmod +x /usr/local/bin/docker-machine If you are running Windows with git bash $ if [[ ! -d "$HOME/bin" ]]; then mkdir -p "$HOME/bin"; fi && \ - curl -L https://github.com/docker/machine/releases/download/v0.7.0/docker-machine-Windows-x86_64.exe > "$HOME/bin/docker-machine.exe" && \ + curl -L https://github.com/docker/machine/releases/download/v0.8.2/docker-machine-Windows-x86_64.exe > "$HOME/bin/docker-machine.exe" && \ chmod +x "$HOME/bin/docker-machine.exe" Otherwise, download one of the releases from the docker/machine release page directly. @@ -43,7 +43,7 @@ If you want only Docker Machine, you can install the Machine binaries directly b 3. Check the installation by displaying the Machine version: $ docker-machine version - docker-machine version 0.7.0, build 61388e9 + docker-machine version 0.8.2, build e18a919 ## Installing bash completion scripts diff --git a/machine/reference/active.md b/machine/reference/active.md index caa7850d9a2..96a2ef84d7a 100644 --- a/machine/reference/active.md +++ b/machine/reference/active.md @@ -13,11 +13,16 @@ title: active See which machine is "active" (a machine is considered active if the `DOCKER_HOST` environment variable points to it). - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL - dev - virtualbox Running tcp://192.168.99.103:2376 - staging * digitalocean Running tcp://203.0.113.81:2376 - $ echo $DOCKER_HOST - tcp://203.0.113.81:2376 - $ docker-machine active - staging +```none +$ docker-machine ls + +NAME ACTIVE DRIVER STATE URL +dev - virtualbox Running tcp://192.168.99.103:2376 +staging * digitalocean Running tcp://203.0.113.81:2376 + +$ echo $DOCKER_HOST +tcp://203.0.113.81:2376 + +$ docker-machine active +staging +``` diff --git a/machine/reference/config.md b/machine/reference/config.md index 8f9a89a96be..640dc3c1c32 100644 --- a/machine/reference/config.md +++ b/machine/reference/config.md @@ -10,23 +10,26 @@ title: config # config - Usage: docker-machine config [OPTIONS] [arg...] +```none +Usage: docker-machine config [OPTIONS] [arg...] - Print the connection config for machine +Print the connection config for machine - Description: - Argument is a machine name. +Description: + Argument is a machine name. - Options: +Options: - --swarm Display the Swarm config instead of the Docker daemon + --swarm Display the Swarm config instead of the Docker daemon +``` +For example: -For example: - - $ docker-machine config dev - --tlsverify - --tlscacert="/Users/ehazlett/.docker/machines/dev/ca.pem" - --tlscert="/Users/ehazlett/.docker/machines/dev/cert.pem" - --tlskey="/Users/ehazlett/.docker/machines/dev/key.pem" - -H tcp://192.168.99.103:2376 +```none +$ docker-machine config dev \ +--tlsverify \ +--tlscacert="/Users/ehazlett/.docker/machines/dev/ca.pem" \ +--tlscert="/Users/ehazlett/.docker/machines/dev/cert.pem" \ +--tlskey="/Users/ehazlett/.docker/machines/dev/key.pem" \ +-H tcp://192.168.99.103:2376 +``` diff --git a/machine/reference/create.md b/machine/reference/create.md index 1f440058ab9..6191603c67d 100644 --- a/machine/reference/create.md +++ b/machine/reference/create.md @@ -15,17 +15,19 @@ Create a machine. Requires the `--driver` flag to indicate which provider (VirtualBox, DigitalOcean, AWS, etc.) the machine should be created on, and an argument to indicate the name of the created machine. - $ docker-machine create --driver virtualbox dev - Creating CA: /home/username/.docker/machine/certs/ca.pem - Creating client certificate: /home/username/.docker/machine/certs/cert.pem - Image cache does not exist, creating it at /home/username/.docker/machine/cache... - No default boot2docker iso found locally, downloading the latest release... - Downloading https://github.com/boot2docker/boot2docker/releases/download/v1.6.2/boot2docker.iso to /home/username/.docker/machine/cache/boot2docker.iso... - Creating VirtualBox VM... - Creating SSH key... - Starting VirtualBox VM... - Starting VM... - To see how to connect Docker to this machine, run: docker-machine env dev +```none +$ docker-machine create --driver virtualbox dev +Creating CA: /home/username/.docker/machine/certs/ca.pem +Creating client certificate: /home/username/.docker/machine/certs/cert.pem +Image cache does not exist, creating it at /home/username/.docker/machine/cache... +No default boot2docker iso found locally, downloading the latest release... +Downloading https://github.com/boot2docker/boot2docker/releases/download/v1.6.2/boot2docker.iso to /home/username/.docker/machine/cache/boot2docker.iso... +Creating VirtualBox VM... +Creating SSH key... +Starting VirtualBox VM... +Starting VM... +To see how to connect Docker to this machine, run: docker-machine env dev +``` ## Accessing driver-specific flags in the help text @@ -34,33 +36,35 @@ drivers. These largely control aspects of Machine's provisoning process (including the creation of Docker Swarm containers) that the user may wish to customize. - $ docker-machine create - Docker Machine Version: 0.5.0 (45e3688) - Usage: docker-machine create [OPTIONS] [arg...] - - Create a machine. - - Run 'docker-machine create --driver name' to include the create flags for that driver in the help text. - - Options: - - --driver, -d "none" Driver to create machine with. - --engine-install-url "https://get.docker.com" Custom URL to use for engine installation [$MACHINE_DOCKER_INSTALL_URL] - --engine-opt [--engine-opt option --engine-opt option] Specify arbitrary flags to include with the created engine in the form flag=value - --engine-insecure-registry [--engine-insecure-registry option --engine-insecure-registry option] Specify insecure registries to allow with the created engine - --engine-registry-mirror [--engine-registry-mirror option --engine-registry-mirror option] Specify registry mirrors to use [$ENGINE_REGISTRY_MIRROR] - --engine-label [--engine-label option --engine-label option] Specify labels for the created engine - --engine-storage-driver Specify a storage driver to use with the engine - --engine-env [--engine-env option --engine-env option] Specify environment variables to set in the engine - --swarm Configure Machine with Swarm - --swarm-image "swarm:latest" Specify Docker image to use for Swarm [$MACHINE_SWARM_IMAGE] - --swarm-master Configure Machine to be a Swarm master - --swarm-discovery Discovery service to use with Swarm - --swarm-strategy "spread" Define a default scheduling strategy for Swarm - --swarm-opt [--swarm-opt option --swarm-opt option] Define arbitrary flags for swarm - --swarm-host "tcp://0.0.0.0:3376" ip/socket to listen on for Swarm master - --swarm-addr addr to advertise for Swarm (default: detect and use the machine IP) - --swarm-experimental Enable Swarm experimental features +```none +$ docker-machine create +Docker Machine Version: 0.5.0 (45e3688) +Usage: docker-machine create [OPTIONS] [arg...] + +Create a machine. + +Run 'docker-machine create --driver name' to include the create flags for that driver in the help text. + +Options: + + --driver, -d "none" Driver to create machine with. + --engine-install-url "https://get.docker.com" Custom URL to use for engine installation [$MACHINE_DOCKER_INSTALL_URL] + --engine-opt [--engine-opt option --engine-opt option] Specify arbitrary flags to include with the created engine in the form flag=value + --engine-insecure-registry [--engine-insecure-registry option --engine-insecure-registry option] Specify insecure registries to allow with the created engine + --engine-registry-mirror [--engine-registry-mirror option --engine-registry-mirror option] Specify registry mirrors to use [$ENGINE_REGISTRY_MIRROR] + --engine-label [--engine-label option --engine-label option] Specify labels for the created engine + --engine-storage-driver Specify a storage driver to use with the engine + --engine-env [--engine-env option --engine-env option] Specify environment variables to set in the engine + --swarm Configure Machine with Swarm + --swarm-image "swarm:latest" Specify Docker image to use for Swarm [$MACHINE_SWARM_IMAGE] + --swarm-master Configure Machine to be a Swarm master + --swarm-discovery Discovery service to use with Swarm + --swarm-strategy "spread" Define a default scheduling strategy for Swarm + --swarm-opt [--swarm-opt option --swarm-opt option] Define arbitrary flags for swarm + --swarm-host "tcp://0.0.0.0:3376" ip/socket to listen on for Swarm master + --swarm-addr addr to advertise for Swarm (default: detect and use the machine IP) + --swarm-experimental Enable Swarm experimental features +``` Additionally, drivers can specify flags that Machine can accept as part of their plugin code. These allow users to customize the provider-specific parameters of @@ -70,43 +74,45 @@ geographical region (`--amazonec2-region us-west-1`), and so on. To see the provider-specific flags, simply pass a value for `--driver` when invoking the `create` help text. - $ docker-machine create --driver virtualbox --help - Usage: docker-machine create [OPTIONS] [arg...] - - Create a machine. - - Run 'docker-machine create --driver name' to include the create flags for that driver in the help text. - - Options: - - --driver, -d "none" Driver to create machine with. - --engine-env [--engine-env option --engine-env option] Specify environment variables to set in the engine - --engine-insecure-registry [--engine-insecure-registry option --engine-insecure-registry option] Specify insecure registries to allow with the created engine - --engine-install-url "https://get.docker.com" Custom URL to use for engine installation [$MACHINE_DOCKER_INSTALL_URL] - --engine-label [--engine-label option --engine-label option] Specify labels for the created engine - --engine-opt [--engine-opt option --engine-opt option] Specify arbitrary flags to include with the created engine in the form flag=value - --engine-registry-mirror [--engine-registry-mirror option --engine-registry-mirror option] Specify registry mirrors to use [$ENGINE_REGISTRY_MIRROR] - --engine-storage-driver Specify a storage driver to use with the engine - --swarm Configure Machine with Swarm - --swarm-addr addr to advertise for Swarm (default: detect and use the machine IP) - --swarm-discovery Discovery service to use with Swarm - --swarm-experimental Enable Swarm experimental features - --swarm-host "tcp://0.0.0.0:3376" ip/socket to listen on for Swarm master - --swarm-image "swarm:latest" Specify Docker image to use for Swarm [$MACHINE_SWARM_IMAGE] - --swarm-master Configure Machine to be a Swarm master - --swarm-opt [--swarm-opt option --swarm-opt option] Define arbitrary flags for swarm - --swarm-strategy "spread" Define a default scheduling strategy for Swarm - --virtualbox-boot2docker-url The URL of the boot2docker image. Defaults to the latest available version [$VIRTUALBOX_BOOT2DOCKER_URL] - --virtualbox-cpu-count "1" number of CPUs for the machine (-1 to use the number of CPUs available) [$VIRTUALBOX_CPU_COUNT] - --virtualbox-disk-size "20000" Size of disk for host in MB [$VIRTUALBOX_DISK_SIZE] - --virtualbox-host-dns-resolver Use the host DNS resolver [$VIRTUALBOX_HOST_DNS_RESOLVER] - --virtualbox-dns-proxy Proxy all DNS requests to the host [$VIRTUALBOX_DNS_PROXY] - --virtualbox-hostonly-cidr "192.168.99.1/24" Specify the Host Only CIDR [$VIRTUALBOX_HOSTONLY_CIDR] - --virtualbox-hostonly-nicpromisc "deny" Specify the Host Only Network Adapter Promiscuous Mode [$VIRTUALBOX_HOSTONLY_NIC_PROMISC] - --virtualbox-hostonly-nictype "82540EM" Specify the Host Only Network Adapter Type [$VIRTUALBOX_HOSTONLY_NIC_TYPE] - --virtualbox-import-boot2docker-vm The name of a Boot2Docker VM to import - --virtualbox-memory "1024" Size of memory for host in MB [$VIRTUALBOX_MEMORY_SIZE] - --virtualbox-no-share Disable the mount of your home directory +```none +$ docker-machine create --driver virtualbox --help +Usage: docker-machine create [OPTIONS] [arg...] + +Create a machine. + +Run 'docker-machine create --driver name' to include the create flags for that driver in the help text. + +Options: + + --driver, -d "none" Driver to create machine with. + --engine-env [--engine-env option --engine-env option] Specify environment variables to set in the engine + --engine-insecure-registry [--engine-insecure-registry option --engine-insecure-registry option] Specify insecure registries to allow with the created engine + --engine-install-url "https://get.docker.com" Custom URL to use for engine installation [$MACHINE_DOCKER_INSTALL_URL] + --engine-label [--engine-label option --engine-label option] Specify labels for the created engine + --engine-opt [--engine-opt option --engine-opt option] Specify arbitrary flags to include with the created engine in the form flag=value + --engine-registry-mirror [--engine-registry-mirror option --engine-registry-mirror option] Specify registry mirrors to use [$ENGINE_REGISTRY_MIRROR] + --engine-storage-driver Specify a storage driver to use with the engine + --swarm Configure Machine with Swarm + --swarm-addr addr to advertise for Swarm (default: detect and use the machine IP) + --swarm-discovery Discovery service to use with Swarm + --swarm-experimental Enable Swarm experimental features + --swarm-host "tcp://0.0.0.0:3376" ip/socket to listen on for Swarm master + --swarm-image "swarm:latest" Specify Docker image to use for Swarm [$MACHINE_SWARM_IMAGE] + --swarm-master Configure Machine to be a Swarm master + --swarm-opt [--swarm-opt option --swarm-opt option] Define arbitrary flags for swarm + --swarm-strategy "spread" Define a default scheduling strategy for Swarm + --virtualbox-boot2docker-url The URL of the boot2docker image. Defaults to the latest available version [$VIRTUALBOX_BOOT2DOCKER_URL] + --virtualbox-cpu-count "1" number of CPUs for the machine (-1 to use the number of CPUs available) [$VIRTUALBOX_CPU_COUNT] + --virtualbox-disk-size "20000" Size of disk for host in MB [$VIRTUALBOX_DISK_SIZE] + --virtualbox-host-dns-resolver Use the host DNS resolver [$VIRTUALBOX_HOST_DNS_RESOLVER] + --virtualbox-dns-proxy Proxy all DNS requests to the host [$VIRTUALBOX_DNS_PROXY] + --virtualbox-hostonly-cidr "192.168.99.1/24" Specify the Host Only CIDR [$VIRTUALBOX_HOSTONLY_CIDR] + --virtualbox-hostonly-nicpromisc "deny" Specify the Host Only Network Adapter Promiscuous Mode [$VIRTUALBOX_HOSTONLY_NIC_PROMISC] + --virtualbox-hostonly-nictype "82540EM" Specify the Host Only Network Adapter Type [$VIRTUALBOX_HOSTONLY_NIC_TYPE] + --virtualbox-import-boot2docker-vm The name of a Boot2Docker VM to import + --virtualbox-memory "1024" Size of memory for host in MB [$VIRTUALBOX_MEMORY_SIZE] + --virtualbox-no-share Disable the mount of your home directory +``` You may notice that some flags specify environment variables that they are associated with as well (located to the far left hand side of the row). If @@ -137,12 +143,14 @@ filesystem has been created, and so on. The following is an example usage: - $ docker-machine create -d virtualbox \ - --engine-label foo=bar \ - --engine-label spam=eggs \ - --engine-storage-driver overlay \ - --engine-insecure-registry registry.myco.com \ - foobarmachine +```none +$ docker-machine create -d virtualbox \ + --engine-label foo=bar \ + --engine-label spam=eggs \ + --engine-storage-driver overlay \ + --engine-insecure-registry registry.myco.com \ + foobarmachine +``` This will create a virtual machine running locally in Virtualbox which uses the `overlay` storage backend, has the key-value pairs `foo=bar` and `spam=eggs` as @@ -150,18 +158,20 @@ labels on the engine, and allows pushing / pulling from the insecure registry located at `registry.myco.com`. You can verify much of this by inspecting the output of `docker info`: - $ eval $(docker-machine env foobarmachine) - $ docker info - Containers: 0 - Images: 0 - Storage Driver: overlay - ... - Name: foobarmachine - ... - Labels: - foo=bar - spam=eggs - provider=virtualbox +```none +$ eval $(docker-machine env foobarmachine) +$ docker info +Containers: 0 +Images: 0 +Storage Driver: overlay +... +Name: foobarmachine +... +Labels: + foo=bar + spam=eggs + provider=virtualbox +``` The supported flags are as follows: @@ -181,10 +191,12 @@ for all containers, and always use the `syslog` [log driver](/engine/reference/run.md#logging-drivers-log-driver) you could run the following create command: - $ docker-machine create -d virtualbox \ - --engine-opt dns=8.8.8.8 \ - --engine-opt log-driver=syslog \ - gdns +```none +$ docker-machine create -d virtualbox \ + --engine-opt dns=8.8.8.8 \ + --engine-opt log-driver=syslog \ + gdns +``` Additionally, Docker Machine supports a flag, `--engine-env`, which can be used to specify arbitrary environment variables to be set within the engine with the syntax `--engine-env name=value`. For example, to specify that the engine should use `example.com` as the proxy server, you could run the following create command: @@ -217,13 +229,15 @@ you won't have to worry about it. Example create: - $ docker-machine create -d virtualbox \ - --swarm \ - --swarm-master \ - --swarm-discovery token:// \ - --swarm-strategy binpack \ - --swarm-opt heartbeat=5 \ - upbeat +```none +$ docker-machine create -d virtualbox \ + --swarm \ + --swarm-master \ + --swarm-discovery token:// \ + --swarm-strategy binpack \ + --swarm-opt heartbeat=5 \ + upbeat +``` This will set the swarm scheduling strategy to "binpack" (pack in containers as tightly as possible per host instead of spreading them out), and the "heartbeat" diff --git a/machine/reference/env.md b/machine/reference/env.md index 2f8b1ae07c7..fa0e8d0b404 100644 --- a/machine/reference/env.md +++ b/machine/reference/env.md @@ -13,37 +13,41 @@ title: env Set environment variables to dictate that `docker` should run a command against a particular machine. - $ docker-machine env --help +```none +$ docker-machine env --help - Usage: docker-machine env [OPTIONS] [arg...] +Usage: docker-machine env [OPTIONS] [arg...] - Display the commands to set up the environment for the Docker client +Display the commands to set up the environment for the Docker client - Description: - Argument is a machine name. +Description: + Argument is a machine name. - Options: +Options: - --swarm Display the Swarm config instead of the Docker daemon - --shell Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh], default is sh/bash - --unset, -u Unset variables instead of setting them - --no-proxy Add machine IP to NO_PROXY environment variable + --swarm Display the Swarm config instead of the Docker daemon + --shell Force environment to be configured for a specified shell: [fish, cmd, powershell, tcsh], default is sh/bash + --unset, -u Unset variables instead of setting them + --no-proxy Add machine IP to NO_PROXY environment variable +``` `docker-machine env machinename` will print out `export` commands which can be run in a subshell. Running `docker-machine env -u` will print `unset` commands which reverse this effect. - $ env | grep DOCKER - $ eval "$(docker-machine env dev)" - $ env | grep DOCKER - DOCKER_HOST=tcp://192.168.99.101:2376 - DOCKER_CERT_PATH=/Users/nathanleclaire/.docker/machines/.client - DOCKER_TLS_VERIFY=1 - DOCKER_MACHINE_NAME=dev - $ # If you run a docker command, now it will run against that host. - $ eval "$(docker-machine env -u)" - $ env | grep DOCKER - $ # The environment variables have been unset. +```none +$ env | grep DOCKER +$ eval "$(docker-machine env dev)" +$ env | grep DOCKER +DOCKER_HOST=tcp://192.168.99.101:2376 +DOCKER_CERT_PATH=/Users/nathanleclaire/.docker/machines/.client +DOCKER_TLS_VERIFY=1 +DOCKER_MACHINE_NAME=dev +$ # If you run a docker command, now it will run against that host. +$ eval "$(docker-machine env -u)" +$ env | grep DOCKER +$ # The environment variables have been unset. +``` The output described above is intended for the shells `bash` and `zsh` (if you're not sure which shell you're using, there's a very good possibility that @@ -55,12 +59,14 @@ If you are using `fish` and the `SHELL` environment variable is correctly set to the path where `fish` is located, `docker-machine env name` will print out the values in the format which `fish` expects: - set -x DOCKER_TLS_VERIFY 1; - set -x DOCKER_CERT_PATH "/Users/nathanleclaire/.docker/machine/machines/overlay"; - set -x DOCKER_HOST tcp://192.168.99.102:2376; - set -x DOCKER_MACHINE_NAME overlay - # Run this command to configure your shell: - # eval "$(docker-machine env overlay)" +```none +set -x DOCKER_TLS_VERIFY 1; +set -x DOCKER_CERT_PATH "/Users/nathanleclaire/.docker/machine/machines/overlay"; +set -x DOCKER_HOST tcp://192.168.99.102:2376; +set -x DOCKER_MACHINE_NAME overlay +# Run this command to configure your shell: +# eval "$(docker-machine env overlay)" +``` If you are on Windows and using either Powershell or `cmd.exe`, `docker-machine env` Docker Machine should now detect your shell automatically. If the automagic detection does not work you @@ -68,22 +74,26 @@ If you are on Windows and using either Powershell or `cmd.exe`, `docker-machine For Powershell: - $ docker-machine.exe env --shell powershell dev - $Env:DOCKER_TLS_VERIFY = "1" - $Env:DOCKER_HOST = "tcp://192.168.99.101:2376" - $Env:DOCKER_CERT_PATH = "C:\Users\captain\.docker\machine\machines\dev" - $Env:DOCKER_MACHINE_NAME = "dev" - # Run this command to configure your shell: - # docker-machine.exe env --shell=powershell dev | Invoke-Expression +```none +$ docker-machine.exe env --shell powershell dev +$Env:DOCKER_TLS_VERIFY = "1" +$Env:DOCKER_HOST = "tcp://192.168.99.101:2376" +$Env:DOCKER_CERT_PATH = "C:\Users\captain\.docker\machine\machines\dev" +$Env:DOCKER_MACHINE_NAME = "dev" +# Run this command to configure your shell: +# docker-machine.exe env --shell=powershell dev | Invoke-Expression +``` For `cmd.exe`: - $ docker-machine.exe env --shell cmd dev - set DOCKER_TLS_VERIFY=1 - set DOCKER_HOST=tcp://192.168.99.101:2376 - set DOCKER_CERT_PATH=C:\Users\captain\.docker\machine\machines\dev - set DOCKER_MACHINE_NAME=dev - # Run this command to configure your shell: copy and paste the above values into your command prompt +```none +$ docker-machine.exe env --shell cmd dev +set DOCKER_TLS_VERIFY=1 +set DOCKER_HOST=tcp://192.168.99.101:2376 +set DOCKER_CERT_PATH=C:\Users\captain\.docker\machine\machines\dev +set DOCKER_MACHINE_NAME=dev +# Run this command to configure your shell: copy and paste the above values into your command prompt +``` ## Excluding the created machine from proxies @@ -95,14 +105,16 @@ This is useful when using `docker-machine` with a local VM provider (e.g. `virtualbox` or `vmwarefusion`) in network environments where a HTTP proxy is required for internet access. - $ docker-machine env --no-proxy default - export DOCKER_TLS_VERIFY="1" - export DOCKER_HOST="tcp://192.168.99.104:2376" - export DOCKER_CERT_PATH="/Users/databus23/.docker/machine/certs" - export DOCKER_MACHINE_NAME="default" - export NO_PROXY="192.168.99.104" - # Run this command to configure your shell: - # eval "$(docker-machine env default)" +```none +$ docker-machine env --no-proxy default +export DOCKER_TLS_VERIFY="1" +export DOCKER_HOST="tcp://192.168.99.104:2376" +export DOCKER_CERT_PATH="/Users/databus23/.docker/machine/certs" +export DOCKER_MACHINE_NAME="default" +export NO_PROXY="192.168.99.104" +# Run this command to configure your shell: +# eval "$(docker-machine env default)" +``` You may also want to visit the [documentation on setting `HTTP_PROXY` for the created daemon using the `--engine-env` flag for `docker-machine diff --git a/machine/reference/help.md b/machine/reference/help.md index 748938f77b5..c43a7e2560e 100644 --- a/machine/reference/help.md +++ b/machine/reference/help.md @@ -10,22 +10,26 @@ title: help # help - Usage: docker-machine help [arg...] +```none +Usage: docker-machine help [arg...] - Shows a list of commands or help for one command +Shows a list of commands or help for one command +``` Usage: docker-machine help _subcommand_ For example: - $ docker-machine help config - Usage: docker-machine config [OPTIONS] [arg...] +```none +$ docker-machine help config +Usage: docker-machine config [OPTIONS] [arg...] - Print the connection config for machine +Print the connection config for machine - Description: - Argument is a machine name. +Description: + Argument is a machine name. - Options: +Options: - --swarm Display the Swarm config instead of the Docker daemon + --swarm Display the Swarm config instead of the Docker daemon +``` diff --git a/machine/reference/index.md b/machine/reference/index.md index a98a1e6f442..e344785f8cd 100644 --- a/machine/reference/index.md +++ b/machine/reference/index.md @@ -21,6 +21,7 @@ title: Command line reference - [ip](ip.md) - [kill](kill.md) - [ls](ls.md) +- [provision](provision.md) - [regenerate-certs](regenerate-certs.md) - [restart](restart.md) - [rm](rm.md) diff --git a/machine/reference/inspect.md b/machine/reference/inspect.md index 1358f0e20ce..cda92193bfc 100644 --- a/machine/reference/inspect.md +++ b/machine/reference/inspect.md @@ -11,15 +11,17 @@ title: inspect # inspect - Usage: docker-machine inspect [OPTIONS] [arg...] +```none +Usage: docker-machine inspect [OPTIONS] [arg...] - Inspect information about a machine +Inspect information about a machine - Description: - Argument is a machine name. +Description: + Argument is a machine name. - Options: - --format, -f Format the output using the given go template. +Options: + --format, -f Format the output using the given go template. +``` By default, this will render information about a machine as JSON. If a format is specified, the given template will be executed for each result. @@ -36,60 +38,63 @@ In addition to the `text/template` syntax, there are some additional functions, This is the default usage of `inspect`. - $ docker-machine inspect dev - { - "DriverName": "virtualbox", - "Driver": { - "MachineName": "docker-host-128be8d287b2028316c0ad5714b90bcfc11f998056f2f790f7c1f43f3d1e6eda", - "SSHPort": 55834, - "Memory": 1024, - "DiskSize": 20000, - "Boot2DockerURL": "", - "IPAddress": "192.168.5.99" - }, - ... - } +```none +$ docker-machine inspect dev + +{ + "DriverName": "virtualbox", + "Driver": { + "MachineName": "docker-host-128be8d287b2028316c0ad5714b90bcfc11f998056f2f790f7c1f43f3d1e6eda", + "SSHPort": 55834, + "Memory": 1024, + "DiskSize": 20000, + "Boot2DockerURL": "", + "IPAddress": "192.168.5.99" + }, + ... +} +``` **Get a machine's IP address:** For the most part, you can pick out any field from the JSON in a fairly straightforward manner. - {% raw %} - $ docker-machine inspect --format='{{.Driver.IPAddress}}' dev - 192.168.5.99 - {% endraw %} +```none +$ docker-machine inspect --format='{{.Driver.IPAddress}}' dev +192.168.5.99 +``` **Formatting details:** If you want a subset of information formatted as JSON, you can use the `json` function in the template. - {% raw %} - $ docker-machine inspect --format='{{json .Driver}}' dev-fusion - {"Boot2DockerURL":"","CPUS":8,"CPUs":8,"CaCertPath":"/Users/hairyhenderson/.docker/machine/certs/ca.pem","DiskSize":20000,"IPAddress":"172.16.62.129","ISO":"/Users/hairyhenderson/.docker/machine/machines/dev-fusion/boot2docker-1.5.0-GH747.iso","MachineName":"dev-fusion","Memory":1024,"PrivateKeyPath":"/Users/hairyhenderson/.docker/machine/certs/ca-key.pem","SSHPort":22,"SSHUser":"docker","SwarmDiscovery":"","SwarmHost":"tcp://0.0.0.0:3376","SwarmMaster":false} - {% endraw %} +```none +$ docker-machine inspect --format='{{json .Driver}}' dev-fusion +{"Boot2DockerURL":"","CPUS":8,"CPUs":8,"CaCertPath":"/Users/hairyhenderson/.docker/machine/certs/ca.pem","DiskSize":20000,"IPAddress":"172.16.62.129","ISO":"/Users/hairyhenderson/.docker/machine/machines/dev-fusion/boot2docker-1.5.0-GH747.iso","MachineName":"dev-fusion","Memory":1024,"PrivateKeyPath":"/Users/hairyhenderson/.docker/machine/certs/ca-key.pem","SSHPort":22,"SSHUser":"docker","SwarmDiscovery":"","SwarmHost":"tcp://0.0.0.0:3376","SwarmMaster":false} +``` While this is usable, it's not very human-readable. For this reason, there is `prettyjson`: - {% raw %} - $ docker-machine inspect --format='{{prettyjson .Driver}}' dev-fusion - { - "Boot2DockerURL": "", - "CPUS": 8, - "CPUs": 8, - "CaCertPath": "/Users/hairyhenderson/.docker/machine/certs/ca.pem", - "DiskSize": 20000, - "IPAddress": "172.16.62.129", - "ISO": "/Users/hairyhenderson/.docker/machine/machines/dev-fusion/boot2docker-1.5.0-GH747.iso", - "MachineName": "dev-fusion", - "Memory": 1024, - "PrivateKeyPath": "/Users/hairyhenderson/.docker/machine/certs/ca-key.pem", - "SSHPort": 22, - "SSHUser": "docker", - "SwarmDiscovery": "", - "SwarmHost": "tcp://0.0.0.0:3376", - "SwarmMaster": false - } - {% endraw %} +``` +$ docker-machine inspect --format='{{prettyjson .Driver}}' dev-fusion +{ + "Boot2DockerURL": "", + "CPUS": 8, + "CPUs": 8, + "CaCertPath": "/Users/hairyhenderson/.docker/machine/certs/ca.pem", + "DiskSize": 20000, + "IPAddress": "172.16.62.129", + "ISO": "/Users/hairyhenderson/.docker/machine/machines/dev-fusion/boot2docker-1.5.0-GH747.iso", + "MachineName": "dev-fusion", + "Memory": 1024, + "PrivateKeyPath": "/Users/hairyhenderson/.docker/machine/certs/ca-key.pem", + "SSHPort": 22, + "SSHUser": "docker", + "SwarmDiscovery": "", + "SwarmHost": "tcp://0.0.0.0:3376", + "SwarmMaster": false +} +``` diff --git a/machine/reference/ip.md b/machine/reference/ip.md index 4f863815f58..f0cfc6f0966 100644 --- a/machine/reference/ip.md +++ b/machine/reference/ip.md @@ -12,8 +12,11 @@ title: ip Get the IP address of one or more machines. - $ docker-machine ip dev - 192.168.99.104 - $ docker-machine ip dev dev2 - 192.168.99.104 - 192.168.99.105 +```none +$ docker-machine ip dev +192.168.99.104 + +$ docker-machine ip dev dev2 +192.168.99.104 +192.168.99.105 +``` diff --git a/machine/reference/kill.md b/machine/reference/kill.md index 8867cb2cb09..d6490b571bf 100644 --- a/machine/reference/kill.md +++ b/machine/reference/kill.md @@ -11,19 +11,23 @@ title: kill # kill - Usage: docker-machine kill [arg...] +```none +Usage: docker-machine kill [arg...] - Kill (abruptly force stop) a machine +Kill (abruptly force stop) a machine - Description: - Argument(s) are one or more machine names. +Description: + Argument(s) are one or more machine names. +``` For example: - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL - dev * virtualbox Running tcp://192.168.99.104:2376 - $ docker-machine kill dev - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL - dev * virtualbox Stopped +```none +$ docker-machine ls +NAME ACTIVE DRIVER STATE URL +dev * virtualbox Running tcp://192.168.99.104:2376 +$ docker-machine kill dev +$ docker-machine ls +NAME ACTIVE DRIVER STATE URL +dev * virtualbox Stopped +``` diff --git a/machine/reference/ls.md b/machine/reference/ls.md index c6ac98ff06e..d262ab21c49 100644 --- a/machine/reference/ls.md +++ b/machine/reference/ls.md @@ -10,16 +10,18 @@ title: ls # ls - Usage: docker-machine ls [OPTIONS] [arg...] +```none +Usage: docker-machine ls [OPTIONS] [arg...] - List machines +List machines - Options: +Options: - --quiet, -q Enable quiet mode - --filter [--filter option --filter option] Filter output based on conditions provided - --timeout, -t "10" Timeout in seconds, default to 10s - --format, -f Pretty-print machines using a Go template + --quiet, -q Enable quiet mode + --filter [--filter option --filter option] Filter output based on conditions provided + --timeout, -t "10" Timeout in seconds, default to 10s + --format, -f Pretty-print machines using a Go template +``` ## Timeout @@ -31,9 +33,11 @@ the -t flag for this purpose with a numerical value in seconds. ### Example - $ docker-machine ls -t 12 - NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS - default - virtualbox Running tcp://192.168.99.100:2376 v1.9.1 +```none +$ docker-machine ls -t 12 +NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS +default - virtualbox Running tcp://192.168.99.100:2376 v1.9.1 +``` ## Filtering @@ -50,25 +54,27 @@ The currently supported filters are: ### Examples - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS - dev - virtualbox Stopped - foo0 - virtualbox Running tcp://192.168.99.105:2376 v1.9.1 - foo1 - virtualbox Running tcp://192.168.99.106:2376 v1.9.1 - foo2 * virtualbox Running tcp://192.168.99.107:2376 v1.9.1 +```none +$ docker-machine ls +NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS +dev - virtualbox Stopped +foo0 - virtualbox Running tcp://192.168.99.105:2376 v1.9.1 +foo1 - virtualbox Running tcp://192.168.99.106:2376 v1.9.1 +foo2 * virtualbox Running tcp://192.168.99.107:2376 v1.9.1 - $ docker-machine ls --filter name=foo0 - NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS - foo0 - virtualbox Running tcp://192.168.99.105:2376 v1.9.1 +$ docker-machine ls --filter name=foo0 +NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS +foo0 - virtualbox Running tcp://192.168.99.105:2376 v1.9.1 - $ docker-machine ls --filter driver=virtualbox --filter state=Stopped - NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS - dev - virtualbox Stopped v1.9.1 +$ docker-machine ls --filter driver=virtualbox --filter state=Stopped +NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS +dev - virtualbox Stopped v1.9.1 - $ docker-machine ls --filter label=com.class.app=foo1 --filter label=com.class.app=foo2 - NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS - foo1 - virtualbox Running tcp://192.168.99.105:2376 v1.9.1 - foo2 * virtualbox Running tcp://192.168.99.107:2376 v1.9.1 +$ docker-machine ls --filter label=com.class.app=foo1 --filter label=com.class.app=foo2 +NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS +foo1 - virtualbox Running tcp://192.168.99.105:2376 v1.9.1 +foo2 * virtualbox Running tcp://192.168.99.107:2376 v1.9.1 +``` ## Formatting @@ -77,7 +83,7 @@ The formatting option (`--format`) will pretty-print machines using a Go templat Valid placeholders for the Go template are listed below: | Placeholder | Description | -| -------------- | ---------------------------------------- | +|:---------------|:-----------------------------------------| | .Name | Machine name | | .Active | Is the machine active? | | .ActiveHost | Is the machine an active non-swarm host? | @@ -96,17 +102,17 @@ when using the table directive, will include column headers as well. The following example uses a template without headers and outputs the `Name` and `Driver` entries separated by a colon for all running machines: - {% raw %} - $ docker-machine ls --format "{{.Name}}: {{.DriverName}}" - default: virtualbox - ec2: amazonec2 - {% endraw %} +```none +$ docker-machine ls --format "{{.Name}}: {{.DriverName}}" +default: virtualbox +ec2: amazonec2 +``` To list all machine names with their driver in a table format you can use: - {% raw %} - $ docker-machine ls --format "table {{.Name}} {{.DriverName}}" - NAME DRIVER - default virtualbox - ec2 amazonec2 - {% endraw %} +```none +$ docker-machine ls --format "table {{.Name}} {{.DriverName}}" +NAME DRIVER +default virtualbox +ec2 amazonec2 +``` diff --git a/machine/reference/provision.md b/machine/reference/provision.md index c137fab92e7..63dec475f97 100644 --- a/machine/reference/provision.md +++ b/machine/reference/provision.md @@ -19,10 +19,13 @@ originally specified Swarm or Engine configuration). Usage is `docker-machine provision [name]`. Multiple names may be specified. - $ docker-machine provision foo bar - Copying certs to the local machine directory... - Copying certs to the remote machine... - Setting Docker configuration on the remote daemon... +```none +$ docker-machine provision foo bar + +Copying certs to the local machine directory... +Copying certs to the remote machine... +Setting Docker configuration on the remote daemon... +``` The Machine provisioning process will: diff --git a/machine/reference/regenerate-certs.md b/machine/reference/regenerate-certs.md index deb4f32cd3d..94105636ef0 100644 --- a/machine/reference/regenerate-certs.md +++ b/machine/reference/regenerate-certs.md @@ -10,21 +10,26 @@ title: regenerate-certs # regenerate-certs - Usage: docker-machine regenerate-certs [OPTIONS] [arg...] +```none +Usage: docker-machine regenerate-certs [OPTIONS] [arg...] - Regenerate TLS Certificates for a machine +Regenerate TLS Certificates for a machine - Description: - Argument(s) are one or more machine names. +Description: + Argument(s) are one or more machine names. - Options: +Options: - --force, -f Force rebuild and do not prompt + --force, -f Force rebuild and do not prompt +``` Regenerate TLS certificates and update the machine with new certs. -For example: +For example: - $ docker-machine regenerate-certs dev - Regenerate TLS machine certs? Warning: this is irreversible. (y/n): y - Regenerating TLS certificates +```none +$ docker-machine regenerate-certs dev + +Regenerate TLS machine certs? Warning: this is irreversible. (y/n): y +Regenerating TLS certificates +``` diff --git a/machine/reference/restart.md b/machine/reference/restart.md index 5b685245c55..a1276cead45 100644 --- a/machine/reference/restart.md +++ b/machine/reference/restart.md @@ -11,16 +11,20 @@ title: restart # restart - Usage: docker-machine restart [arg...] +```none +Usage: docker-machine restart [arg...] - Restart a machine +Restart a machine + +Description: + Argument(s) are one or more machine names. +``` - Description: - Argument(s) are one or more machine names. - Restart a machine. Oftentimes this is equivalent to `docker-machine stop; docker-machine start`. But some cloud driver try to implement a clever restart which keeps the same ip address. - $ docker-machine restart dev - Waiting for VM to start... +``` +$ docker-machine restart dev +Waiting for VM to start... +``` diff --git a/machine/reference/rm.md b/machine/reference/rm.md index 25226d9174f..7a1c5c4492a 100644 --- a/machine/reference/rm.md +++ b/machine/reference/rm.md @@ -14,54 +14,58 @@ title: rm Remove a machine. This will remove the local reference as well as delete it on the cloud provider or virtualization management platform. - $ docker-machine rm --help +```none +$ docker-machine rm --help - Usage: docker-machine rm [OPTIONS] [arg...] +Usage: docker-machine rm [OPTIONS] [arg...] - Remove a machine +Remove a machine - Description: - Argument(s) are one or more machine names. +Description: + Argument(s) are one or more machine names. - Options: +Options: - --force, -f Remove local configuration even if machine cannot be removed, also implies an automatic yes (`-y`) - -y Assumes automatic yes to proceed with remove, without prompting further user confirmation + --force, -f Remove local configuration even if machine cannot be removed, also implies an automatic yes (`-y`) + -y Assumes automatic yes to proceed with remove, without prompting further user confirmation +``` ## Examples - $ docker-machine ls - NAME ACTIVE URL STATE URL SWARM DOCKER ERRORS - bar - virtualbox Running tcp://192.168.99.101:2376 v1.9.1 - baz - virtualbox Running tcp://192.168.99.103:2376 v1.9.1 - foo - virtualbox Running tcp://192.168.99.100:2376 v1.9.1 - qix - virtualbox Running tcp://192.168.99.102:2376 v1.9.1 +```none +$ docker-machine ls +NAME ACTIVE URL STATE URL SWARM DOCKER ERRORS +bar - virtualbox Running tcp://192.168.99.101:2376 v1.9.1 +baz - virtualbox Running tcp://192.168.99.103:2376 v1.9.1 +foo - virtualbox Running tcp://192.168.99.100:2376 v1.9.1 +qix - virtualbox Running tcp://192.168.99.102:2376 v1.9.1 - $ docker-machine rm baz - About to remove baz - Are you sure? (y/n): y - Successfully removed baz +$ docker-machine rm baz +About to remove baz +Are you sure? (y/n): y +Successfully removed baz - $ docker-machine ls - NAME ACTIVE URL STATE URL SWARM DOCKER ERRORS - bar - virtualbox Running tcp://192.168.99.101:2376 v1.9.1 - foo - virtualbox Running tcp://192.168.99.100:2376 v1.9.1 - qix - virtualbox Running tcp://192.168.99.102:2376 v1.9.1 +$ docker-machine ls +NAME ACTIVE URL STATE URL SWARM DOCKER ERRORS +bar - virtualbox Running tcp://192.168.99.101:2376 v1.9.1 +foo - virtualbox Running tcp://192.168.99.100:2376 v1.9.1 +qix - virtualbox Running tcp://192.168.99.102:2376 v1.9.1 - $ docker-machine rm bar qix - About to remove bar, qix - Are you sure? (y/n): y - Successfully removed bar - Successfully removed qix +$ docker-machine rm bar qix +About to remove bar, qix +Are you sure? (y/n): y +Successfully removed bar +Successfully removed qix - $ docker-machine ls - NAME ACTIVE URL STATE URL SWARM DOCKER ERRORS - foo - virtualbox Running tcp://192.168.99.100:2376 v1.9.1 +$ docker-machine ls +NAME ACTIVE URL STATE URL SWARM DOCKER ERRORS +foo - virtualbox Running tcp://192.168.99.100:2376 v1.9.1 - $ docker-machine rm -y foo - About to remove foo - Successfully removed foo +$ docker-machine rm -y foo +About to remove foo +Successfully removed foo +``` diff --git a/machine/reference/scp.md b/machine/reference/scp.md index eee9f895931..635892841aa 100644 --- a/machine/reference/scp.md +++ b/machine/reference/scp.md @@ -18,15 +18,17 @@ machine's case, you don't have to specify the name, just the path. Consider the following example: - $ cat foo.txt - cat: foo.txt: No such file or directory - $ docker-machine ssh dev pwd - /home/docker - $ docker-machine ssh dev 'echo A file created remotely! >foo.txt' - $ docker-machine scp dev:/home/docker/foo.txt . - foo.txt 100% 28 0.0KB/s 00:00 - $ cat foo.txt - A file created remotely! +```none +$ cat foo.txt +cat: foo.txt: No such file or directory +$ docker-machine ssh dev pwd +/home/docker +$ docker-machine ssh dev 'echo A file created remotely! >foo.txt' +$ docker-machine scp dev:/home/docker/foo.txt . +foo.txt 100% 28 0.0KB/s 00:00 +$ cat foo.txt +A file created remotely! +``` Just like how `scp` has a `-r` flag for copying files recursively, `docker-machine` has a `-r` flag for this feature. diff --git a/machine/reference/ssh.md b/machine/reference/ssh.md index fb160fc58ee..10c24569c6f 100644 --- a/machine/reference/ssh.md +++ b/machine/reference/ssh.md @@ -14,45 +14,54 @@ Log into or run a command on a machine using SSH. To login, just run `docker-machine ssh machinename`: - $ docker-machine ssh dev - ## . - ## ## ## == - ## ## ## ## === - /""""""""""""""""\___/ === - ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ - \______ o __/ - \ \ __/ - \____\______/ - _ _ ____ _ _ - | |__ ___ ___ | |_|___ \ __| | ___ ___| | _____ _ __ - | '_ \ / _ \ / _ \| __| __) / _` |/ _ \ / __| |/ / _ \ '__| - | |_) | (_) | (_) | |_ / __/ (_| | (_) | (__| < __/ | - |_.__/ \___/ \___/ \__|_____\__,_|\___/ \___|_|\_\___|_| - Boot2Docker version 1.4.0, build master : 69cf398 - Fri Dec 12 01:39:42 UTC 2014 - docker@boot2docker:~$ ls / - Users/ dev/ home/ lib/ mnt/ proc/ run/ sys/ usr/ - bin/ etc/ init linuxrc opt/ root/ sbin/ tmp var/ +```none +$ docker-machine ssh dev + ## . + ## ## ## == + ## ## ## ## === + /""""""""""""""""\___/ === + ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ + \______ o __/ + \ \ __/ + \____\______/ + _ _ ____ _ _ +| |__ ___ ___ | |_|___ \ __| | ___ ___| | _____ _ __ +| '_ \ / _ \ / _ \| __| __) / _` |/ _ \ / __| |/ / _ \ '__| +| |_) | (_) | (_) | |_ / __/ (_| | (_) | (__| < __/ | +|_.__/ \___/ \___/ \__|_____\__,_|\___/ \___|_|\_\___|_| +Boot2Docker version 1.4.0, build master : 69cf398 - Fri Dec 12 01:39:42 UTC 2014 +docker@boot2docker:~$ ls / +Users/ dev/ home/ lib/ mnt/ proc/ run/ sys/ usr/ +bin/ etc/ init linuxrc opt/ root/ sbin/ tmp var/ +``` + You can also specify commands to run remotely by appending them directly to the `docker-machine ssh` command, much like the regular `ssh` program works: - $ docker-machine ssh dev free - total used free shared buffers - Mem: 1023556 183136 840420 0 30920 - -/+ buffers: 152216 871340 - Swap: 1212036 0 1212036 +```none +$ docker-machine ssh dev free + +total used free shared buffers +Mem: 1023556 183136 840420 0 30920 +-/+ buffers: 152216 871340 +Swap: 1212036 0 1212036 +``` Commands with flags will work as well: - $ docker-machine ssh dev df -h - Filesystem Size Used Available Use% Mounted on - rootfs 899.6M 85.9M 813.7M 10% / - tmpfs 899.6M 85.9M 813.7M 10% / - tmpfs 499.8M 0 499.8M 0% /dev/shm - /dev/sda1 18.2G 58.2M 17.2G 0% /mnt/sda1 - cgroup 499.8M 0 499.8M 0% /sys/fs/cgroup - /dev/sda1 18.2G 58.2M 17.2G 0% - /mnt/sda1/var/lib/docker/aufs +```none +$ docker-machine ssh dev df -h + +Filesystem Size Used Available Use% Mounted on +rootfs 899.6M 85.9M 813.7M 10% / +tmpfs 899.6M 85.9M 813.7M 10% / +tmpfs 499.8M 0 499.8M 0% /dev/shm +/dev/sda1 18.2G 58.2M 17.2G 0% /mnt/sda1 +cgroup 499.8M 0 499.8M 0% /sys/fs/cgroup +/dev/sda1 18.2G 58.2M 17.2G 0% +/mnt/sda1/var/lib/docker/aufs +``` If you are using the "external" SSH type as detailed in the next section, you can include additional arguments to pass through to the `ssh` binary in the @@ -61,7 +70,9 @@ the command generated by Docker Machine). For instance, the following command will forward port 8080 from the `default` machine to `localhost` on your host computer: - $ docker-machine ssh default -L 8080:localhost:8080 +```bash +$ docker-machine ssh default -L 8080:localhost:8080 +``` ## Different types of SSH @@ -80,7 +91,9 @@ and Docker Machine will act sensibly out of the box. However, if you deliberately want to use the Go native version, you can do so with a global command line flag / environment variable like so: - $ docker-machine --native-ssh ssh dev +```bash +$ docker-machine --native-ssh ssh dev +``` There are some variations in behavior between the two methods, so please report any issues or inconsistencies if you come across them. diff --git a/machine/reference/start.md b/machine/reference/start.md index 0f3787f9ef9..9080a703aa6 100644 --- a/machine/reference/start.md +++ b/machine/reference/start.md @@ -11,14 +11,19 @@ title: start # start - Usage: docker-machine start [arg...] +```none +Usage: docker-machine start [arg...] - Start a machine +Start a machine + +Description: + Argument(s) are one or more machine names. +``` - Description: - Argument(s) are one or more machine names. For example: - $ docker-machine start dev - Starting VM... +```none +$ docker-machine start dev +Starting VM... +``` diff --git a/machine/reference/status.md b/machine/reference/status.md index 2fd048f235d..d9160fb2313 100644 --- a/machine/reference/status.md +++ b/machine/reference/status.md @@ -10,14 +10,18 @@ title: status # status - Usage: docker-machine status [arg...] +```none +Usage: docker-machine status [arg...] - Get the status of a machine +Get the status of a machine - Description: - Argument is a machine name. +Description: + Argument is a machine name. +``` For example: - $ docker-machine status dev - Running +``` +$ docker-machine status dev +Running +``` diff --git a/machine/reference/stop.md b/machine/reference/stop.md index e427b4c7495..bfdde98abe7 100644 --- a/machine/reference/stop.md +++ b/machine/reference/stop.md @@ -11,19 +11,26 @@ title: stop # stop - Usage: docker-machine stop [arg...] +```none +Usage: docker-machine stop [arg...] - Gracefully Stop a machine +Gracefully Stop a machine - Description: - Argument(s) are one or more machine names. +Description: + Argument(s) are one or more machine names. +``` For example: - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL - dev * virtualbox Running tcp://192.168.99.104:2376 - $ docker-machine stop dev - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL - dev * virtualbox Stopped +```none +$ docker-machine ls + +NAME ACTIVE DRIVER STATE URL +dev * virtualbox Running tcp://192.168.99.104:2376 + +$ docker-machine stop dev +$ docker-machine ls + +NAME ACTIVE DRIVER STATE URL +dev * virtualbox Stopped +``` diff --git a/machine/reference/upgrade.md b/machine/reference/upgrade.md index e911d5435c7..0e87dc18fc2 100644 --- a/machine/reference/upgrade.md +++ b/machine/reference/upgrade.md @@ -20,12 +20,15 @@ example, if the machine uses boot2docker for its OS, this command will download the latest boot2docker ISO and replace the machine's existing ISO with the latest. - $ docker-machine upgrade default - Stopping machine to do the upgrade... - Upgrading machine default... - Downloading latest boot2docker release to /home/username/.docker/machine/cache/boot2docker.iso... - Starting machine back up... - Waiting for VM to start... +```none +$ docker-machine upgrade default + +Stopping machine to do the upgrade... +Upgrading machine default... +Downloading latest boot2docker release to /home/username/.docker/machine/cache/boot2docker.iso... +Starting machine back up... +Waiting for VM to start... +``` > **Note**: If you are using a custom boot2docker ISO specified using > `--virtualbox-boot2docker-url` or an equivalent flag, running an upgrade on diff --git a/machine/reference/url.md b/machine/reference/url.md index 1c2cae972c6..0e1ee9b97ca 100644 --- a/machine/reference/url.md +++ b/machine/reference/url.md @@ -12,5 +12,7 @@ title: url Get the URL of a host - $ docker-machine url dev - tcp://192.168.99.109:2376 +```none +$ docker-machine url dev +tcp://192.168.99.109:2376 +``` diff --git a/notary/advanced_usage.md b/notary/advanced_usage.md index 19065290bad..d0a6059dc21 100644 --- a/notary/advanced_usage.md +++ b/notary/advanced_usage.md @@ -16,7 +16,7 @@ This page explains advanced uses of Notary client for users who are running their own Notary service. Make sure you have first read and understood how to [run your own Notary service](running_a_service.md) before continuing. -#### An important note about the examples +## An important note about the examples This document's command examples omit the `-s` and `-d` flags. If you do not know what these options do, please read the [Getting @@ -57,6 +57,7 @@ algorithm: ## Add and remove Targets It's simple to add targets to a trusted collection with notary CLI: + ``` $ notary add example.com/collection v1 my_file.txt ``` @@ -67,6 +68,7 @@ file path and one or more checksums of the contents. Note that this is an offline command, and we must run a `notary publish example.com/collection` for the add to take effect. To remove targets, we use the `notary remove` command, specifying the GUN and target name. + ``` $ notary remove example.com/collection v1 ``` @@ -247,7 +249,7 @@ To use the `targets/releases` role for pushing and pulling images with content t follow the steps above to add and publish the delegation role with notary. When adding the delegation, the `--all-paths` flag should be used to allow signing all tags. -# Files and state on disk +## Files and state on disk Notary stores state in its `trust_dir` directory, which is `~/.notary` by default or usually `~/.docker/trust` when enabling docker content trust. Within this diff --git a/notary/getting_started.md b/notary/getting_started.md index c71f3a94a4c..f3beba5dd77 100644 --- a/notary/getting_started.md +++ b/notary/getting_started.md @@ -32,7 +32,7 @@ freshness of your content. ## Install Notary -You can download precompiled notary binary for 64 bit Linux or Mac OS X from the +You can download precompiled notary binary for 64 bit Linux or macOS from the Notary repository's releases page on GitHub. Windows is not officially diff --git a/notary/reference/signer-config.md b/notary/reference/signer-config.md index 85509a40b52..4f7af85fa91 100644 --- a/notary/reference/signer-config.md +++ b/notary/reference/signer-config.md @@ -191,7 +191,9 @@ For example, the configuration above specifies the default password alias to be If this configuration is used, then you must: -`export NOTARY_SIGNER_PASSWORDALIAS1=mypassword` +``` +export NOTARY_SIGNER_PASSWORDALIAS1=mypassword +``` so that that Notary signer knows to encrypt all keys with the passphrase `mypassword`, and to decrypt any private key stored with password alias diff --git a/opensource/FAQ.md b/opensource/FAQ.md index 4baeb4c8fba..c0333351ac3 100644 --- a/opensource/FAQ.md +++ b/opensource/FAQ.md @@ -23,31 +23,39 @@ troubleshooting problems in your code contribution. ## How do I set my signature {#how-do-i-set-my-signature} -1. Change to the root of your `docker-fork` repository. +1. Change to the root of your `docker-fork` repository. - $ cd docker-fork + ``` + $ cd docker-fork + ``` -2. Set your `user.name` for the repository. +2. Set your `user.name` for the repository. - $ git config --local user.name "FirstName LastName" + ``` + $ git config --local user.name "FirstName LastName" + ``` -3. Set your `user.email` for the repository. +3. Set your `user.email` for the repository. - $ git config --local user.email "emailname@mycompany.com" + ``` + $ git config --local user.email "emailname@mycompany.com" + ``` ## How do I track changes from the docker repo upstream Set your local repo to track changes upstream, on the `docker` repository. -1. Change to the root of your Docker repository. +1. Change to the root of your Docker repository. - $ cd docker-fork - -2. Add a remote called `upstream` that points to `docker/docker` - - $ git remote add upstream https://github.com/docker/docker.git + ``` + $ cd docker-fork + ``` +2. Add a remote called `upstream` that points to `docker/docker` + ``` + $ git remote add upstream https://github.com/docker/docker.git + ``` ## How do I format my Go code @@ -89,37 +97,49 @@ leave a blank line to separate paragraphs. Always rebase and squash your commits before making a pull request. -1. Fetch any of the last minute changes from `docker/docker`. +1. Fetch any of the last minute changes from `docker/docker`. - $ git fetch upstream master + ``` + $ git fetch upstream master + ``` -3. Start an interactive rebase. +3. Start an interactive rebase. - $ git rebase -i upstream/master + ``` + $ git rebase -i upstream/master + ``` -4. Rebase opens an editor with a list of commits. +4. Rebase opens an editor with a list of commits. - pick 1a79f55 Tweak some of images - pick 3ce07bb Add a new line + ``` + pick 1a79f55 Tweak some of images + pick 3ce07bb Add a new line + ``` - If you run into trouble, `git --rebase abort` removes any changes and gets you + If you run into trouble, `git --rebase abort` removes any changes and gets you back to where you started. -4. Squash the `pick` keyword with `squash` on all but the first commit. +4. Squash the `pick` keyword with `squash` on all but the first commit. - pick 1a79f55 Tweak some of images - squash 3ce07bb Add a new line + ``` + pick 1a79f55 Tweak some of images + squash 3ce07bb Add a new line + ```` - After closing the file, `git` opens your editor again to edit the commit - message. + After closing the file, `git` opens your editor again to edit the commit + message. -5. Edit and save your commit message. +5. Edit and save your commit message. - $ git commit -s + ``` + $ git commit -s + ``` - Make sure your message includes your signature. + Make sure your message includes your signature. -8. Push any changes to your fork on GitHub, using the `-f` option to +8. Push any changes to your fork on GitHub, using the `-f` option to force the previous change to be overwritten. - $ git push -f origin my-keen-feature + ``` + $ git push -f origin my-keen-feature + ``` diff --git a/opensource/code.md b/opensource/code.md index 82ccb574f33..56eb1564207 100644 --- a/opensource/code.md +++ b/opensource/code.md @@ -28,36 +28,36 @@ If you are an experienced open source contributor you may be familiar with this workflow. If you are new or just need reminders, the steps below link to more detailed documentation in Docker's project contributors guide. -1. Get the software you need. This explains how to install a couple of tools used in our development environment. What you need (or don't need) might surprise you. -2. Configure Git and fork the repo. Your Git configuration can make it easier for you to contribute. - Configuration is especially key if are new to contributing or to Docker. + Configuration is especially key if you are new to contributing or to Docker. -3. Learn to work with the Docker development container. Docker developers run `docker` in `docker`. If you are a geek, this is a pretty cool experience. -4. Claim an issue to work on. We created a filter listing all open and unclaimed issues for Docker. 5. Work on the +href="http://docs.docker.com/opensource/workflow/work-issue/" target="_blank">Work on the issue. If you change or add code or docs to a project, you should test your changes as you work. This page explains how to + href="http://docs.docker.com/opensource/project/test-and-docs/" target="_blank">how to test in our development environment. Also, remember to always **sign your commits** as you work! To sign your @@ -68,7 +68,7 @@ issue. If you don't sign Gordon will get you! -6. Create a +6. Create a pull request. If you make a change to fix an issue, add reference to the issue in the pull @@ -77,10 +77,10 @@ pull request. ![Sign commits and issues](images/bonus.png) - We have also have checklist that describes [what each pull request + We also have checklist that describes [what each pull request needs](code.md#what-is-the-pre-pull-request-checklist). -7. Participate in the pull request review till a successful merge. diff --git a/opensource/get-help.md b/opensource/get-help.md index ff8a50bca1e..a0f5cc993ec 100644 --- a/opensource/get-help.md +++ b/opensource/get-help.md @@ -33,7 +33,7 @@ community members and developers. - Internet Relay Chat (IRC) + Internet Relay Chat (IRC)

      IRC a direct line to our most knowledgeable Docker users. @@ -76,11 +76,11 @@ platforms. Using Webchat from Freenode.net is a quick and easy way to get chatting. To register: -1. In your browser open https://webchat.freenode.net +1. In your browser open https://webchat.freenode.net ![Login to webchat screen](images/irc_connect.png) -2. Fill out the form. +2. Fill out the form. @@ -97,7 +97,7 @@ register:
      -3. Click on the "Connect" button. +3. Click on the "Connect" button. The browser connects you to Webchat. You'll see a lot of text. At the bottom of the Webchat web page is a command line bar. Just above the command line bar @@ -105,10 +105,12 @@ register: ![Registration needed screen](images/irc_after_login.png) -4. Register your nickname by entering the following command in the +4. Register your nickname by entering the following command in the command line bar: - /msg NickServ REGISTER yourpassword youremail@example.com + ``` + /msg NickServ REGISTER yourpassword youremail@example.com + ``` ![Registering screen](images/register_nic.png) @@ -120,14 +122,15 @@ command line bar: that you provided. This email will contain instructions for completing your registration. -5. Open your email client and look for the email. +5. Open your email client and look for the email. ![Login screen](images/register_email.png) -6. Back in the browser, complete the registration according to the email -by entering the following command into the webchat command line bar: +6. Back in the browser, complete the registration according to the email by entering the following command into the webchat command line bar: - /msg NickServ VERIFY REGISTER yournickname somecode + ``` + /msg NickServ VERIFY REGISTER yournickname somecode + ``` Your nickname is now registered to chat on freenode.net. @@ -139,24 +142,24 @@ IRCCloud is a web-based IRC client service that is hosted in the cloud. This is a Freemium product, meaning the free version is limited and you can pay for more features. To use IRCCloud: -1. Select the following link: +1. Select the following link: Join the #docker channel on chat.freenode.net The following web page is displayed in your browser: ![IRCCloud Register screen](images/irccloud-join.png) -2. If this is your first time using IRCCloud enter a valid email address in the +2. If this is your first time using IRCCloud enter a valid email address in the form. People who have already registered with IRCCloud can select the "sign in here" link. Additionally, people who are already registered with IRCCloud may have a cookie stored on their web browser that enables a quick start "let's go" link to be shown instead of the above form. In this case just select the "let's go" link and [jump ahead to start chatting](get-help.md#start-chatting) -3. After entering your email address in the form, check your email for an invite +3. After entering your email address in the form, check your email for an invite from IRCCloud and follow the instructions provided in the email. -4. After following the instructions in your email you should have an IRCCloud +4. After following the instructions in your email you should have an IRCCloud Client web page in your browser: ![IRCCloud](images/irccloud-register-nick.png) @@ -164,20 +167,24 @@ Client web page in your browser: The message shown above may appear indicating that you need to register your nickname. -5. To register your nickname enter the following message into the command line bar +5. To register your nickname enter the following message into the command line bar at the bottom of the IRCCloud Client: - /msg NickServ REGISTER yourpassword youremail@example.com + ``` + /msg NickServ REGISTER yourpassword youremail@example.com + ``` This command line bar is for chatting and entering in IRC commands. -6. Check your email for an invite to freenode.net: +6. Check your email for an invite to freenode.net: ![Login screen](images/register_email.png) -7. Back in the browser, complete the registration according to the email. +7. Back in the browser, complete the registration according to the email. - /msg NickServ VERIFY REGISTER yournickname somecode + ``` + /msg NickServ VERIFY REGISTER yournickname somecode + ``` ## Tips @@ -188,7 +195,7 @@ The procedures in this section apply to both IRC clients. Next time you return to log into chat, you may need to re-enter your password on the command line using this command: - /msg NickServ identify + /msg NickServ identify With Webchat if you forget or lose your password you'll need to join the `#freenode` channel and request them to reset it for you. @@ -198,17 +205,17 @@ With Webchat if you forget or lose your password you'll need to join the Join the `#docker` group using the following command in the command line bar of your IRC Client: - /j #docker + /j #docker You can also join the `#docker-dev` group: - /j #docker-dev + /j #docker-dev ### Start chatting To ask questions to the group just type messages in the command line bar: - ![Web Chat Screen](images/irc_chat.png) +![Web Chat Screen](images/irc_chat.png) ## Learning more about IRC diff --git a/opensource/kitematic/index.md b/opensource/kitematic/index.md index 85ee1690a93..19fadafbbc8 100644 --- a/opensource/kitematic/index.md +++ b/opensource/kitematic/index.md @@ -11,9 +11,9 @@ title: Contribute to Kitematic --- # Contribute to Kitematic -* [Get started] (get_started.md) +* [Get started](get_started.md) * [Find an issue on GitHub](find_issue.md) * [Set up for Kitematic development](set_up_dev.md) * [Develop in Kitematic (work on an issue)](work_issue.md) -* [Review your branch and create a pull request (PR)] (create_pr.md) +* [Review your branch and create a pull request (PR)](create_pr.md) * [Where to learn more](next_steps.md) diff --git a/opensource/kitematic/set_up_dev.md b/opensource/kitematic/set_up_dev.md index 5dfb1e226c6..bdabcc80c66 100644 --- a/opensource/kitematic/set_up_dev.md +++ b/opensource/kitematic/set_up_dev.md @@ -24,56 +24,65 @@ Kitematic is built on top of: To get started, you will need to install Node.js v4.2.1. Using Node Version Manager (NVM) makes the Node.js install easy. ### Windows: -1. Download latest release -2. Follow the installer steps to get NVM installed. Please note, you need to +2. Follow the installer steps to get NVM installed. Please note, you need to uninstall any existing versions of node.js before installing NVM for Windows; the above installer link will have an uninstaller available. - ![windows installer](images/nvm_install.jpeg) + ![windows installer](images/nvm_install.jpeg) -### Mac OSX/Linux: +### macOS/Linux: -1. Open a terminal window +1. Open a terminal window -2. Copy and paste the following install script: +2. Copy and paste the following install script: - curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.29.0/install.sh | bash + ``` + curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.29.0/install.sh | bash + ``` -3. To activate nvm, close the terminal window and re-open a new one. +3. To activate nvm, close the terminal window and re-open a new one. (Alternatively, you can source nvm from your current shell with the command `. ~/.nvm/nvm.sh` ) -(To learn more about working with NVM, see Mac OSX/Linux official nvm repo, Windows official nvm repo, and How To Install Node.js with NVM ON A VPS) +(To learn more about working with NVM, see macOS/Linux official nvm repo, Windows official nvm repo, and How To Install Node.js with NVM ON A VPS) ## Install Node.js -1. Install the proper Node.js version. +1. Install the proper Node.js version. - $ nvm install v4.2.1 + ``` + $ nvm install v4.2.1 + ``` -2. Make this version the default. +2. Make this version the default. - $ nvm alias default v4.2.1 + ``` + $ nvm alias default v4.2.1 + ``` -3. Run `node --version` to confirm that you are now using the proper Node.js version. +3. Run `node --version` to confirm that you are now using the proper Node.js version. - $ node --version + ``` + $ node --version + ``` ## Fork the master branch of Kitematic To fork the master branch of Kitematic: -1. Go to the docker/kitematic repository . +1. Go to the docker/kitematic repository . -2. Click **Fork** in the upper right corner of the GitHub interface. +2. Click **Fork** in the upper right corner of the GitHub interface. GitHub forks the repository to your GitHub account. The original `docker/kitematic` repository becomes a new fork `YOUR_ACCOUNT/kitematic` under your account. -3. Copy your fork's clone URL from GitHub. +3. Copy your fork's clone URL from GitHub. GitHub allows you to use HTTPS or SSH protocols for clones. You can use the `git` command line or clients like Subversion to clone a repository. This guide assume you are using the HTTPS protocol and the `git` command line. If you are comfortable with SSH and some other tool, feel free to use that instead. You'll need to convert what you see in the guide to what is appropriate to your tool. @@ -81,43 +90,57 @@ To fork the master branch of Kitematic: To clone your repository and create a branch for the issue: -1. Open a terminal window on your local host and change to your home directory. +1. Open a terminal window on your local host and change to your home directory. - $ cd ~ + ``` + $ cd ~ + ``` In Windows, you'll work in your Docker Quickstart Terminal window instead of Powershell or a `cmd` window. -2. Create a directory for your new repository and change into that directory. +2. Create a directory for your new repository and change into that directory. -3. From the root of your repository, clone the fork to your local host. +3. From the root of your repository, clone the fork to your local host. - $ git clone https://github.com/YOUR_USERNAME/kitematic.git + ``` + $ git clone https://github.com/YOUR_USERNAME/kitematic.git + ``` 4. Create and checkout a branch for the issue you will be working on. - $ git checkout -b 1191-branch + ``` + $ git checkout -b 1191-branch + ``` As previously mentioned, issue #1191 is set up as an example to use for this exercise. ## Set up your signature and upstream remote + You can set your signature globally or locally. -1. Set your `user.name` for the repository. +1. Set your `user.name` for the repository. - $ git config --local user.name "FirstName LastName" + ``` + $ git config --local user.name "FirstName LastName" + ``` -2. Set your `user.email` for the repository. +2. Set your `user.email` for the repository. - $ git config --local user.email "emailname@mycompany.com" + ``` + $ git config --local user.email "emailname@mycompany.com" + ``` -3. Check the result in your `git` configuration. +3. Check the result in your `git` configuration. - $ git config --local --list + ``` + $ git config --local --list + ``` -4. Set your local repository to track changes upstream, on the `kitematic` -repository. +4. Set your local repository to track changes upstream, on the `kitematic` repository. - $ git remote add upstream https://github.com/docker/kitematic.git + ``` + $ git remote add upstream https://github.com/docker/kitematic.git + ``` (To learn more, see Set up your signature and an upstream remote.) @@ -125,20 +148,27 @@ href="http://docs.docker.com/opensource/project/set-up-git/#set-your-signature-a ## Install dependencies, start Kitematic, and verify your setup Your Node.js install includes npm for package management. You'll use it to install supporting packages and start the app. -1. Verify that the package manager is running and check the version (at the time of this writing, v2.14.7). +1. Verify that the package manager is running and check the version (at the time of this writing, v2.14.7). - $ npm --version + ``` + $ npm --version + ``` -2. Install the package dependencies. +2. Install the package dependencies. - $ npm install + ``` + $ npm install + ``` -3. From the root of your kitematic repository, use the package manager to start Kitematic and confirm everything went well. +3. From the root of your kitematic repository, use the package manager to start Kitematic and confirm everything went well. - $ npm start + ``` + $ npm start + ``` All of the core files in Kitematic are in the `src` folder, which then follows the AltJS structure of: + ``` kitematic/ |--src/ @@ -149,8 +179,10 @@ kitematic/ |--components/ | |--MyComponent.react.js ``` + The `components` folder is where the layout files are, the `stores` represent the application logic and `actions` are the dispatcher for actions taken within the `components`. ## Where to go next + You are ready to start working on the issue. Go to [Develop in Kitematic (work on an issue)](work_issue.md). diff --git a/opensource/kitematic/work_issue.md b/opensource/kitematic/work_issue.md index 493ad9e3c1a..e347eadb386 100644 --- a/opensource/kitematic/work_issue.md +++ b/opensource/kitematic/work_issue.md @@ -15,18 +15,18 @@ For this tutorial, we will work on issue Atom as it's a full featured editor with great ES lint support to keep your code organized. +1. Open the project in your favorite editor - We recommend using Atom as it's a full featured editor with great ES lint support to keep your code organized. -2. Open the `ContainerSettingsGeneral.react.js` file which is found under the `src/components/` folder and look for the following piece of code, which is in fact the layout (like HTML in the browser): +2. Open the `ContainerSettingsGeneral.react.js` file which is found under the `src/components/` folder and look for the following piece of code, which is in fact the layout (like HTML in the browser) circa line ~200: ``` - return ( -

      - ... + return ( +
      + ... ``` - (line ~200) + -3. Above this code we will create a javascript variable that will allow us to display our container id: +3. Above this code we will create a javascript variable that will allow us to display our container id: ``` let shortId = ( @@ -41,39 +41,47 @@ To do this, edit the container `General Settings` layout. This snippet has been saved as a GitHub gist. -4. We then need to add the variable to the rendered view, by adding it below the `{rename}` tag. The new render code should look something like: +4. We then need to add the variable to the rendered view, by adding it below the `{rename}` tag. The new render code should look something like: - ``` - return ( -
      - {rename} - {shortId} ``` - At this point, the updated code should look similar to this: + return ( +
      + {rename} + {shortId} + ``` + + At this point, the updated code should look similar to this: ![Javascript to display container id in kitematic](images/settings-code-example.png) -5. Save the code changes, re-start Kitematic. +5. Save the code changes, re-start Kitematic. - $ npm start + ``` + $ npm start + ``` - Now, the container ID should show on the General Settings tab, along with the container name. + Now, the container ID should show on the General Settings tab, along with the container name. - ![Container ID](images/kitematic_gui_container_id.png) + ![Container ID](images/kitematic_gui_container_id.png) - *Note that the container ID in Kitematic matches the ID shown as output to the `docker ps` command.* + *Note that the container ID in Kitematic matches the ID shown as output to the `docker ps` command.* -6. You can close the Kitematic application now, and kill the running process in the terminal via CTRL+c. +6. You can close the Kitematic application now, and kill the running process in the terminal via CTRL+c. -7. Stage your changes by adding them. +7. Stage your changes by adding them. - $ git add src/components/ContainerSettingsGeneral.react.js + ``` + $ git add src/components/ContainerSettingsGeneral.react.js + ``` -8. Commit your code changes with a comment that explains what this fixes or closes. +8. Commit your code changes with a comment that explains what this fixes or closes. - $ git commit -s -m "added container ID to show on settings tab, fixes issue #1191" + ``` + $ git commit -s -m "added container ID to show on settings tab, fixes issue #1191" + ``` (See Coding Style in the general guidelines on Contributing to Docker.) ## Where to go next + At this point, you are ready to [Review your branch and create a pull request](create_pr.md) to merge your new feature into Kitematic. diff --git a/opensource/project/set-up-dev-env.md b/opensource/project/set-up-dev-env.md index fa895ad18ba..94c2bf48c22 100644 --- a/opensource/project/set-up-dev-env.md +++ b/opensource/project/set-up-dev-env.md @@ -30,7 +30,7 @@ you continue working with your fork on this branch. ## Task 1. Remove images and containers Docker developers run the latest stable release of the Docker software (with -Docker Machine if their machine is Mac OS X). They clean their local hosts of +Docker Machine if their machine is macOS). They clean their local hosts of unnecessary Docker artifacts such as stopped containers or unused images. Cleaning unnecessary artifacts isn't strictly necessary, but it is good practice, so it is included here. @@ -39,55 +39,55 @@ To remove unnecessary artifacts: 1. Verify that you have no unnecessary containers running on your host. - ```bash - $ docker ps -a - ``` + ```none + $ docker ps -a + ``` - You should see something similar to the following: + You should see something similar to the following: - ```bash - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - ``` + ```none + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + ``` - There are no running or stopped containers on this host. A fast way to - remove old containers is the following: + There are no running or stopped containers on this host. A fast way to + remove old containers is the following: - ```bash - $ docker rm $(docker ps -a -q) - ``` + ```none + $ docker rm $(docker ps -a -q) + ``` - This command uses `docker ps` to list all containers (`-a` flag) by numeric - IDs (`-q` flag). Then, the `docker rm` command removes the resulting list. - If you have running but unused containers, stop and then remove them with - the `docker stop` and `docker rm` commands. + This command uses `docker ps` to list all containers (`-a` flag) by numeric + IDs (`-q` flag). Then, the `docker rm` command removes the resulting list. + If you have running but unused containers, stop and then remove them with + the `docker stop` and `docker rm` commands. 2. Verify that your host has no dangling images. - ```bash - $ docker images - ``` + ```none + $ docker images + ``` - You should see something similar to the following: + You should see something similar to the following: - ```bash - REPOSITORY TAG IMAGE ID CREATED SIZE - ``` + ```none + REPOSITORY TAG IMAGE ID CREATED SIZE + ``` - This host has no images. You may have one or more _dangling_ images. A - dangling image is not used by a running container and is not an ancestor of - another image on your system. A fast way to remove dangling image is - the following: + This host has no images. You may have one or more _dangling_ images. A + dangling image is not used by a running container and is not an ancestor of + another image on your system. A fast way to remove dangling image is + the following: - ```bash - $ docker rmi -f $(docker images -q -a -f dangling=true) - ``` + ```none + $ docker rmi -f $(docker images -q -a -f dangling=true) + ``` - This command uses `docker images` to list all images (`-a` flag) by numeric - IDs (`-q` flag) and filter them to find dangling images (`-f dangling=true`). - Then, the `docker rmi` command forcibly (`-f` flag) removes - the resulting list. If you get a "docker: "rmi" requires a minimum of 1 argument." - message, that means there were no dangling images. To remove just one image, use the - `docker rmi ID` command. + This command uses `docker images` to list all images (`-a` flag) by numeric + IDs (`-q` flag) and filter them to find dangling images (`-f dangling=true`). + Then, the `docker rmi` command forcibly (`-f` flag) removes + the resulting list. If you get a "docker: "rmi" requires a minimum of 1 argument." + message, that means there were no dangling images. To remove just one image, use the + `docker rmi ID` command. ## Task 2. Start a development container @@ -99,126 +99,128 @@ can take over 15 minutes to complete. 1. Open a terminal. - Mac users, use `docker-machine status your_vm_name` to make sure your VM is running. You - may need to run `eval "$(docker-machine env your_vm_name)"` to initialize your - shell environment. + For Mac users, use `docker-machine status your_vm_name` to make sure your VM is running. You + may need to run `eval "$(docker-machine env your_vm_name)"` to initialize your + shell environment. 2. Change into the root of the `docker-fork` repository. - ```bash - $ cd ~/repos/docker-fork - ``` + ```none + $ cd ~/repos/docker-fork + ``` - If you are following along with this guide, you created a `dry-run-test` - branch when you set up Git for - contributing. + If you are following along with this guide, you created a `dry-run-test` + branch when you + set up Git for contributing. 3. Ensure you are on your `dry-run-test` branch. - ```bash - $ git checkout dry-run-test - ``` + ```none + $ git checkout dry-run-test + ``` - If you get a message that the branch doesn't exist, add the `-b` flag (`git checkout -b dry-run-test`) so the - command both creates the branch and checks it out. + If you get a message that the branch doesn't exist, add the `-b` flag (`git checkout -b dry-run-test`) so the + command both creates the branch and checks it out. 4. Use `make` to build a development environment image and run it in a container. - ```bash - $ make shell - ``` + ```none + $ make BIND_DIR=. shell + ``` - The command returns informational messages as it runs. The first build may - take a few minutes to create an image. Using the instructions in the - `Dockerfile`, the build may need to download source and other images. A - successful build returns a final message and opens a Bash shell into the - container. + The command returns informational messages as it runs. The first build may + take a few minutes to create an image. Using the instructions in the + `Dockerfile`, the build may need to download source and other images. A + successful build returns a final message and opens a Bash shell into the + container. - ```bash - Successfully built 3d872560918e - docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/docker/docker/bundles" -t "docker-dev:dry-run-test" bash - root@f31fa223770f:/go/src/github.com/docker/docker# - ``` + ```none + Successfully built 3d872560918e + docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/docker/docker/bundles" -t "docker-dev:dry-run-test" bash + root@f31fa223770f:/go/src/github.com/docker/docker# + ``` - At this point, your prompt reflects the container's BASH shell. + At this point, your prompt reflects the container's BASH shell. 5. List the contents of the current directory (`/go/src/github.com/docker/docker`). - You should see the image's source from the `/go/src/github.com/docker/docker` - directory. + You should see the image's source from the `/go/src/github.com/docker/docker` + directory. - ![List example](images/list_example.png) + ![List example](images/list_example.png) 6. Make a `docker` binary. - ```bash - root@a8b2885ab900:/go/src/github.com/docker/docker# hack/make.sh binary - ...output snipped... - bundles/1.12.0-dev already exists. Removing. + ```none + root@a8b2885ab900:/go/src/github.com/docker/docker# hack/make.sh binary + ...output snipped... + bundles/1.12.0-dev already exists. Removing. - ---> Making bundle: binary (in bundles/1.12.0-dev/binary) - Building: bundles/1.12.0-dev/binary/docker-1.12.0-dev - Created binary: bundles/1.12.0-dev/binary/docker-1.12.0-dev - Copying nested executables into bundles/1.12.0-dev/binary - ``` + ---> Making bundle: binary (in bundles/1.12.0-dev/binary) + Building: bundles/1.12.0-dev/binary/docker-1.12.0-dev + Created binary: bundles/1.12.0-dev/binary/docker-1.12.0-dev + Copying nested executables into bundles/1.12.0-dev/binary + ``` -7. Copy the binary to the container's `/usr/bin` directory. +7. Copy the binary to the container's `**/usr/bin/**` directory. - ```bash - root@a8b2885ab900:/go/src/github.com/docker/docker# cp bundles/1.12.0-dev/binary-client/docker* /usr/bin - root@a8b2885ab900:/go/src/github.com/docker/docker# cp bundles/1.12.0-dev/binary-daemon/docker* /usr/bin - ``` + ```none + root@a8b2885ab900:/go/src/github.com/docker/docker# cp bundles/1.12.0-dev/binary-client/docker* /usr/bin/ + root@a8b2885ab900:/go/src/github.com/docker/docker# cp bundles/1.12.0-dev/binary-daemon/docker* /usr/bin/ + ``` 8. Start the Engine daemon running in the background. - ```bash - root@a8b2885ab900:/go/src/github.com/docker/docker# docker daemon -D& - ...output snipped... - DEBU[0001] Registering POST, /networks/{id:.*}/connect - DEBU[0001] Registering POST, /networks/{id:.*}/disconnect - DEBU[0001] Registering DELETE, /networks/{id:.*} - INFO[0001] API listen on /var/run/docker.sock - DEBU[0003] containerd connection state change: READY - ``` + ```none + root@a8b2885ab900:/go/src/github.com/docker/docker# docker daemon -D& + ...output snipped... + DEBU[0001] Registering POST, /networks/{id:.*}/connect + DEBU[0001] Registering POST, /networks/{id:.*}/disconnect + DEBU[0001] Registering DELETE, /networks/{id:.*} + INFO[0001] API listen on /var/run/docker.sock + DEBU[0003] containerd connection state change: READY + ``` - The `-D` flag starts the daemon in debug mode. The `&` starts it as a - background process. You'll find these options useful when debugging code - development. + The `-D` flag starts the daemon in debug mode. The `&` starts it as a + background process. You'll find these options useful when debugging code + development. 9. Inside your container, check your Docker version. - ```bash - root@5f8630b873fe:/go/src/github.com/docker/docker# docker --version - Docker version 1.12.0-dev, build 6e728fb - ``` + ```none + root@5f8630b873fe:/go/src/github.com/docker/docker# docker --version + Docker version 1.12.0-dev, build 6e728fb + ``` - Inside the container you are running a development version. This is the version - on the current branch. It reflects the value of the `VERSION` file at the - root of your `docker-fork` repository. + Inside the container you are running a development version. This is the version + on the current branch. It reflects the value of the `VERSION` file at the + root of your `docker-fork` repository. 10. Run the `hello-world` image. - ```bash + ```none root@5f8630b873fe:/go/src/github.com/docker/docker# docker run hello-world ``` 11. List the image you just downloaded. - ```bash + ```none root@5f8630b873fe:/go/src/github.com/docker/docker# docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + hello-world latest c54a2cc56cbb 3 months ago 1.85 kB ``` 12. Open another terminal on your local host. 13. List the container running your development container. - ```bash + ```none ubuntu@ubuntu1404:~$ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a8b2885ab900 docker-dev:dry-run-test "hack/dind bash" 43 minutes ago Up 43 minutes hungry_payne ``` - Notice that the tag on the container is marked with the `dry-run-branch` name. + Notice that the tag on the container is marked with the `dry-run-test` branch name. ## Task 3. Make a code change @@ -235,7 +237,7 @@ you have: your development container Running the `make shell` command mounted your local Docker repository source into -your Docker container. When you start to developing code though, you'll +your Docker container. When you start to develop code though, you'll want to iterate code changes and builds inside the container. If you have followed this guide exactly, you have a BASH shell running a development container. @@ -247,52 +249,57 @@ example, you'll edit the help for the `attach` subcommand. 2. Make sure you are in your `docker-fork` repository. - ```bash - $ pwd - /Users/mary/go/src/github.com/moxiegirl/docker-fork - ``` + ```none + $ pwd + /Users/mary/go/src/github.com/moxiegirl/docker-fork + ``` - Your location should be different because, at least, your username is - different. + Your location should be different because, at least, your username is + different. -3. Open the `api/client/attach.go` file. +3. Open the `cli/command/container/attach.go` file. 4. Edit the command's help message. - For example, you can edit this line: + For example, you can edit this line: - ```go - noStdin := cmd.Bool([]string{"-no-stdin"}, false, "Do not attach STDIN") - ``` + ```go + flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") + ``` - And change it to this: + And change it to this: - ```go - noStdin := cmd.Bool([]string{"-no-stdin"}, false, "Do not attach STDIN (standard in)") - ``` + ```go + flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN (standard in)") + ``` -5. Save and close the `api/client/attach.go` file. +5. Save and close the `cli/command/container/attach.go` file. -6. Go to your running development container. +6. Go to your running docker development container shell. -7. Remake the binary and copy it to `usr/bin` +7. Rebuild the binary by using the command `hack/make.sh binary` in the docker development container shell. -8. Restart the Docker daemon with the new binary. +8. Copy the binaries to **/usr/bin** by entering the following commands in the docker development container shell. -9. View your change by display the `attach` help. + ``` + cp bundles/1.12.0-dev/binary-client/docker* /usr/bin/ + cp bundles/1.12.0-dev/binary-daemon/docker* /usr/bin/ + ``` - ```bash - root@b0cb4f22715d:/go/src/github.com/docker/docker# docker attach --help +9. To view your change, run the `docker attach --help` command in the docker development container shell. - Usage: docker attach [OPTIONS] CONTAINER + ```bash + root@b0cb4f22715d:/go/src/github.com/docker/docker# docker attach --help - Attach to a running container + Usage: docker attach [OPTIONS] CONTAINER - --detach-keys Override the key sequence for detaching a container - --help Print usage - --no-stdin Do not attach to STDIN (standard in) - --sig-proxy=true Proxy all received signals to the process - ``` + Attach to a running container + + --detach-keys Override the key sequence for detaching a container + --help Print usage + --no-stdin Do not attach to STDIN (standard in) + --sig-proxy=true Proxy all received signals to the process + ``` You've just done the basic workflow for changing the Engine code base. You made your code changes in your feature branch. Then, you updated the binary in your diff --git a/opensource/project/set-up-git.md b/opensource/project/set-up-git.md index 05a36794fae..4a35fbfda80 100644 --- a/opensource/project/set-up-git.md +++ b/opensource/project/set-up-git.md @@ -55,39 +55,39 @@ target="_blank">docker/docker repository. 5. Open a terminal window on your local host and change to your home directory. - ```bash - $ cd ~ - ``` + ```bash + $ cd ~ + ``` In Windows, you'll work in your Docker Quickstart Terminal window instead of Powershell or a `cmd` window. 6. Create a `repos` directory. - ```bash - $ mkdir repos - ``` + ```bash + $ mkdir repos + ``` 7. Change into your `repos` directory. - ```bash - $ cd repos - ``` + ```bash + $ cd repos + ``` 8. Clone the fork to your local host into a repository called `docker-fork`. - ```bash - $ git clone https://github.com/moxiegirl/docker.git docker-fork - ``` + ```bash + $ git clone https://github.com/moxiegirl/docker.git docker-fork + ``` Naming your local repo `docker-fork` should help make these instructions easier to follow; experienced coders don't typically change the name. 9. Change directory into your new `docker-fork` directory. - ```bash - $ cd docker-fork - ``` + ```bash + $ cd docker-fork + ``` Take a moment to familiarize yourself with the repository's contents. List the contents. @@ -117,55 +117,55 @@ To configure your username, email, and add a remote: 1. Change to the root of your `docker-fork` repository. - ```bash - $ cd docker-fork - ``` + ```bash + $ cd docker-fork + ``` 2. Set your `user.name` for the repository. - ```bash - $ git config --local user.name "FirstName LastName" - ``` + ```bash + $ git config --local user.name "FirstName LastName" + ``` 3. Set your `user.email` for the repository. - ```bash - $ git config --local user.email "emailname@mycompany.com" - ``` + ```bash + $ git config --local user.email "emailname@mycompany.com" + ``` 4. Set your local repo to track changes upstream, on the `docker` repository. - ```bash - $ git remote add upstream https://github.com/docker/docker.git - ``` + ```bash + $ git remote add upstream https://github.com/docker/docker.git + ``` 5. Check the result in your `git` configuration. - ```bash - $ git config --local -l - core.repositoryformatversion=0 - core.filemode=true - core.bare=false - core.logallrefupdates=true - remote.origin.url=https://github.com/moxiegirl/docker.git - remote.origin.fetch=+refs/heads/*:refs/remotes/origin/* - branch.master.remote=origin - branch.master.merge=refs/heads/master - user.name=Mary Anthony - user.email=mary@docker.com - remote.upstream.url=https://github.com/docker/docker.git - remote.upstream.fetch=+refs/heads/*:refs/remotes/upstream/* - ``` + ```bash + $ git config --local -l + core.repositoryformatversion=0 + core.filemode=true + core.bare=false + core.logallrefupdates=true + remote.origin.url=https://github.com/moxiegirl/docker.git + remote.origin.fetch=+refs/heads/*:refs/remotes/origin/* + branch.master.remote=origin + branch.master.merge=refs/heads/master + user.name=Mary Anthony + user.email=mary@docker.com + remote.upstream.url=https://github.com/docker/docker.git + remote.upstream.fetch=+refs/heads/*:refs/remotes/upstream/* + ``` To list just the remotes use: - ```bash - $ git remote -v - origin https://github.com/moxiegirl/docker.git (fetch) - origin https://github.com/moxiegirl/docker.git (push) - upstream https://github.com/docker/docker.git (fetch) - upstream https://github.com/docker/docker.git (push) - ``` + ```bash + $ git remote -v + origin https://github.com/moxiegirl/docker.git (fetch) + origin https://github.com/moxiegirl/docker.git (push) + upstream https://github.com/docker/docker.git (fetch) + upstream https://github.com/docker/docker.git (push) + ``` ## Task 3. Create and push a branch @@ -179,32 +179,34 @@ the branch to your fork on GitHub: 1. Open a terminal and go to the root of your `docker-fork`. - ```bash - $ cd docker-fork - ``` + ```bash + $ cd docker-fork + ``` 2. Create a `dry-run-test` branch. - ```bash - $ git checkout -b dry-run-test - ``` + ```bash + $ git checkout -b dry-run-test + ``` This command creates the branch and switches the repository to it. 3. Verify you are in your new branch. - $ git branch - * dry-run-test - master + ```bash + $ git branch + * dry-run-test + master + ``` The current branch has an * (asterisk) marker. So, these results shows you are on the right branch. 4. Create a `TEST.md` file in the repository's root. - ```bash - $ touch TEST.md - ``` + ```bash + $ touch TEST.md + ``` 5. Edit the file and add your email and location. @@ -216,43 +218,43 @@ the branch to your fork on GitHub: 7. Check the status of your branch. - ```bash - $ git status - On branch dry-run-test - Untracked files: - (use "git add ..." to include in what will be committed) - - TEST.md - - nothing added to commit but untracked files present (use "git add" to track) - ``` + ```bash + $ git status + On branch dry-run-test + Untracked files: + (use "git add ..." to include in what will be committed) + + TEST.md + + nothing added to commit but untracked files present (use "git add" to track) + ``` You've only changed the one file. It is untracked so far by git. 8. Add your file. - ```bash - $ git add TEST.md - ``` + ```bash + $ git add TEST.md + ``` That is the only _staged_ file. Stage is fancy word for work that Git is tracking. 9. Sign and commit your change. - ``` - $ git commit -s -m "Making a dry run test." - [dry-run-test 6e728fb] Making a dry run test - 1 file changed, 1 insertion(+) - create mode 100644 TEST.md - ``` + ```bash + $ git commit -s -m "Making a dry run test." + [dry-run-test 6e728fb] Making a dry run test + 1 file changed, 1 insertion(+) + create mode 100644 TEST.md + ``` Commit messages should have a short summary sentence of no more than 50 characters. Optionally, you can also include a more detailed explanation after the summary. Separate the summary from any explanation with an empty line. -8. Push your changes to GitHub. +10. Push your changes to GitHub. ```bash $ git push --set-upstream origin dry-run-test @@ -263,19 +265,21 @@ the branch to your fork on GitHub: Git prompts you for your GitHub username and password. Then, the command returns a result. - Counting objects: 13, done. - Compressing objects: 100% (2/2), done. - Writing objects: 100% (3/3), 320 bytes | 0 bytes/s, done. - Total 3 (delta 1), reused 0 (delta 0) - To https://github.com/moxiegirl/docker.git - * [new branch] dry-run-test -> dry-run-test - Branch dry-run-test set up to track remote branch dry-run-test from origin. + ```bash + Counting objects: 13, done. + Compressing objects: 100% (2/2), done. + Writing objects: 100% (3/3), 320 bytes | 0 bytes/s, done. + Total 3 (delta 1), reused 0 (delta 0) + To https://github.com/moxiegirl/docker.git + * [new branch] dry-run-test -> dry-run-test + Branch dry-run-test set up to track remote branch dry-run-test from origin. + ``` -9. Open your browser to GitHub. +11. Open your browser to GitHub. -10. Navigate to your Docker fork. +12. Navigate to your Docker fork. -11. Make sure the `dry-run-test` branch exists, that it has your commit, and the +13. Make sure the `dry-run-test` branch exists, that it has your commit, and the commit is signed. ![Branch Signature](images/branch-sig.png) diff --git a/opensource/project/software-req-win.md b/opensource/project/software-req-win.md index 86bdcf74e10..e10bdaa7099 100644 --- a/opensource/project/software-req-win.md +++ b/opensource/project/software-req-win.md @@ -11,7 +11,7 @@ title: Set up for development on Windows # Get the required software for Windows -This page explains how to get the software you need to use a a Windows Server +This page explains how to get the software you need to use a Windows Server 2012 or Windows 8 machine for Docker development. Before you begin contributing you must have: diff --git a/opensource/project/software-required.md b/opensource/project/software-required.md index 6d1cc12ac42..a72609cc967 100644 --- a/opensource/project/software-required.md +++ b/opensource/project/software-required.md @@ -9,9 +9,9 @@ menu: title: Get the required software --- -# Get the required software for Linux or OS X +# Get the required software for Linux or macOS -This page explains how to get the software you need to use a Linux or OS X +This page explains how to get the software you need to use a Linux or macOS machine for Docker development. Before you begin contributing you must have: * a GitHub account @@ -70,9 +70,9 @@ docker --version Docker version 1.11.0, build 4dc5990 ``` -On Mac OS X or Windows, you should have installed Docker Toolbox which includes +On macOS or Windows, you should have installed Docker Toolbox which includes Docker. You'll need to verify both Docker Machine and Docker. This -documentation was written on OS X using the following versions. +documentation was written on macOS using the following versions. ```bash $ docker-machine --version diff --git a/opensource/project/test-and-docs.md b/opensource/project/test-and-docs.md index cc6fb4a5110..4119f9b0b66 100644 --- a/opensource/project/test-and-docs.md +++ b/opensource/project/test-and-docs.md @@ -55,54 +55,35 @@ change an existing one. ## Run tests on your local host Before submitting a pull request with a code change, you should run the entire -Docker Engine test suite. The `Makefile` contains a target for the entire test suite. -The target's name is simply `test`. The `Makefile` contains several targets for +Docker Engine test suite. The `Makefile` contains a target for the entire test +suite, named `test`. Also, it contains several targets for testing: - - - - - - - - - - - - - - - - - - - - - - -
      TargetWhat this target does
      testRun the unit, integration and docker-py tests.
      test-unitRun just the unit tests.
      test-integration-cliRun the test for the integration command line interface.
      test-docker-pyRun the tests for Docker API client.
      - -Running the entire test suite on your current repository can take a half an hour -or more. To run the test suite, do the following: - -1. Open a terminal on your local host. - -2. Change to the root your Docker repository. +| Target | What this target does | +| ---------------------- | ---------------------------------------------- | +| `test` | Run the unit, integration, and docker-py tests | +| `test-unit` | Run just the unit tests | +| `test-integration-cli` | Run the integration tests for the CLI | +| `test-docker-py` | Run the tests for the Docker API client | + +Running the entire test suite on your current repository can take over half an +hour. To run the test suite, do the following: + +1. Open a terminal on your local host. + +2. Change to the root your Docker repository. ```bash $ cd docker-fork ``` -3. Make sure you are in your development branch. +3. Make sure you are in your development branch. ```bash $ git checkout dry-run-test ``` -4. Run the `make test` command. +4. Run the `make test` command. ```bash $ make test @@ -121,43 +102,11 @@ or more. To run the test suite, do the following: value on the basis of your host performance. When they complete successfully, you see the output concludes with something like this: - ```bash - PASS: docker_cli_pull_test.go:133: DockerHubPullSuite.TestPullClientDisconnect 1.127s - PASS: docker_cli_pull_test.go:16: DockerHubPullSuite.TestPullFromCentralRegistry 1.049s - PASS: docker_cli_pull_test.go:65: DockerHubPullSuite.TestPullFromCentralRegistryImplicitRefParts 9.795s - PASS: docker_cli_pull_test.go:42: DockerHubPullSuite.TestPullNonExistingImage 2.158s - PASS: docker_cli_pull_test.go:92: DockerHubPullSuite.TestPullScratchNotAllowed 0.044s - OK: 918 passed, 13 skipped - PASS - coverage: 72.9% of statements - ok github.com/docker/docker/integration-cli 1638.553s - ---> Making bundle: .integration-daemon-stop (in bundles/1.9.0-dev/test-integration-cli) - ++++ cat bundles/1.9.0-dev/test-integration-cli/docker.pid - +++ kill 9453 - +++ /etc/init.d/apparmor stop - * Clearing AppArmor profiles cache - ...done. - All profile caches have been cleared, but no profiles have been unloaded. - Unloading profiles will leave already running processes permanently - unconfined, which can lead to unexpected situations. - - To set a process to complain mode, use the command line tool - 'aa-complain'. To really tear down all profiles, run the init script - with the 'teardown' option." - - ---> Making bundle: test-docker-py (in bundles/1.9.0-dev/test-docker-py) - ---> Making bundle: .integration-daemon-start (in bundles/1.9.0-dev/test-docker-py) - +++ /etc/init.d/apparmor start - * Starting AppArmor profiles - Skipping profile in /etc/apparmor.d/disable: usr.sbin.rsyslogd - ...done. - +++ exec docker daemon --debug --host unix:///go/src/github.com/docker/docker/bundles/1.9.0-dev/test-docker-py/docker.sock --storage-driver overlay --exec-driver native --pidfile bundles/1.9.0-dev/test-docker-py/docker.pid --userland-proxy=true - ..............s..............s...................................... - ---------------------------------------------------------------------- + ```no-highlight Ran 68 tests in 79.135s ``` -## Run targets inside a development container +## Run targets inside a development container If you are working inside a Docker development container, you use the `hack/make.sh` script to run tests. The `hack/make.sh` script doesn't @@ -166,9 +115,9 @@ command line with multiple targets that does the same thing. Try this now. -1. Open a terminal and change to the `docker-fork` root. +1. Open a terminal and change to the `docker-fork` root. -2. Start a Docker development image. +2. Start a Docker development image. If you are following along with this guide, you should have a `dry-run-test` image. @@ -177,7 +126,7 @@ Try this now. $ docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/docker/docker dry-run-test /bin/bash ``` -3. Run the tests using the `hack/make.sh` script. +3. Run the tests using the `hack/make.sh` script. ```bash root@5f8630b873fe:/go/src/github.com/docker/docker# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py @@ -204,18 +153,24 @@ package or [gocheck](https://labix.org/gocheck) for our unit tests. You can use the `TESTDIRS` environment variable to run unit tests for a single package. - $ TESTDIRS='opts' make test-unit +```bash +$ TESTDIRS='opts' make test-unit +``` You can also use the `TESTFLAGS` environment variable to run a single test. The flag's value is passed as arguments to the `go test` command. For example, from your local host you can run the `TestBuild` test with this command: - $ TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit +```bash +$ TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit +``` On unit tests, it's better to use `TESTFLAGS` in combination with `TESTDIRS` to make it quicker to run a specific test. - $ TESTDIRS='opts' TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit +```bash +$ TESTDIRS='opts' TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit +``` ## Run integration tests @@ -224,11 +179,15 @@ You can use the `TESTFLAGS` environment variable to run a single test. The flag's value is passed as arguments to the `go test` command. For example, from your local host you can run the `TestBuild` test with this command: - $ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration-cli +```bash +$ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration-cli +``` To run the same test inside your Docker development container, you do this: - root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration-cli +```bash +root@5f8630b873fe:/go/src/github.com/docker/docker# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration-cli +``` ## Test the Windows binary against a Linux daemon @@ -240,98 +199,168 @@ run a Bash terminal on Windows. 1. If you don't have one open already, start a Git Bash terminal. - ![Git Bash](images/git_bash.png) + ![Git Bash](images/git_bash.png) -2. Change to the `docker` source directory. +2. Change to the `docker` source directory. - $ cd /c/gopath/src/github.com/docker/docker - -3. Set `DOCKER_REMOTE_DAEMON` as follows: + ```bash + $ cd /c/gopath/src/github.com/docker/docker + ``` - $ export DOCKER_REMOTE_DAEMON=1 +3. Set `DOCKER_REMOTE_DAEMON` as follows: -4. Set `DOCKER_TEST_HOST` to the `tcp://IP_ADDRESS:2376` value; substitute your -Linux machines actual IP address. For example: + ```bash + $ export DOCKER_REMOTE_DAEMON=1 + ``` - $ export DOCKER_TEST_HOST=tcp://213.124.23.200:2376 +4. Set `DOCKER_TEST_HOST` to the `tcp://IP_ADDRESS:2376` value; substitute your + Linux machines actual IP address. For example: -5. Make the binary and run the tests: + ```bash + $ export DOCKER_TEST_HOST=tcp://213.124.23.200:2376 + ``` - $ hack/make.sh binary test-integration-cli +5. Make the binary and run the tests: - Some tests are skipped on Windows for various reasons. You can see which - tests were skipped by re-running the make and passing in the + ```bash + $ hack/make.sh binary test-integration-cli + ``` + Some tests are skipped on Windows for various reasons. You can see which + tests were skipped by re-running the make and passing in the `TESTFLAGS='-test.v'` value. For example - $ TESTFLAGS='-test.v' hack/make.sh binary test-integration-cli + ```bash + $ TESTFLAGS='-test.v' hack/make.sh binary test-integration-cli + ``` - Should you wish to run a single test such as one with the name - 'TestExample', you can pass in `TESTFLAGS='-check.f TestExample'`. For - example + Should you wish to run a single test such as one with the name + 'TestExample', you can pass in `TESTFLAGS='-check.f TestExample'`. For + example - $TESTFLAGS='-check.f TestExample' hack/make.sh binary test-integration-cli + ```bash + $ TESTFLAGS='-check.f TestExample' hack/make.sh binary test-integration-cli + ``` You can now choose to make changes to the Docker source or the tests. If you -make any changes just run these commands again. +make any changes, just run these commands again. ## Build and test the documentation -The Docker documentation source files are under `docs`. The content is -written using extended Markdown. We use the static generator MkDocs to build Docker's -documentation. Of course, you don't need to install this generator -to build the documentation, it is included with container. +The Docker documentation source files are in a centralized repository at +[https://github.com/docker/docker.github.io](https://github.com/docker/docker.github.io). The content is +written using extended Markdown, which you can edit in a plain text editor such +Atom or Notepad. The docs are built using [Jekyll](https://jekyllrb.com/). + +Most documentation is developed in the centralized repository. The exceptions are +a project's API and CLI references and man pages, which are developed alongside +the project's code and periodically imported into the documentation repository. -You should always check your documentation for grammar and spelling. The best -way to do this is with an online grammar checker. +Always check your documentation for grammar and spelling. You can use +an online grammar checker such as [Hemingway Editor](http://www.hemingwayapp.com/) or a spelling or +grammar checker built into your text editor. If you spot spelling or grammar errors, +fixing them is one of the easiest ways to get started contributing to opensource +projects. When you change a documentation source file, you should test your change locally to make sure your content is there and any links work correctly. You -can build the documentation from the local host. The build starts a container -and loads the documentation into a server. As long as this container runs, you -can browse the docs. +can build the documentation from the local host. + +### Building the docs for docs.docker.com + +You can build the docs using a Docker container or by using Jekyll directly. +Using the Docker container requires no set-up but is slower. Using Jekyll +directly requires you to install some prerequisites, but is faster on each build. + +#### Using Docker Compose + +The easiest way to build the docs locally on macOS, Windows, or Linux is to use +`docker-compose`. If you have not yet installed `docker-compose`, +[follow these installation instructions](https://docs.docker.com/compose/install/). + +In the root of the repository, issue the following command: + +```bash +$ docker-compose up +``` + +This command will create and start service `docs` defined in `docker-compose.yml`, +which will build an image named `docs/docstage` and launch a container with Jekyll and all its dependencies configured +correctly. The container uses Jekyll to incrementally build and serve the site using the +files in the local repository. + +Go to [http://localhost:4000/](http://localhost:4000/) in your web browser to view the documentation. + +The container runs in the foreground. It will continue to run and incrementally build the site when changes are +detected, even if you change branches. + +To stop the container, use `CTRL+C`. + +To start the container again, use the following command: + +```bash +$ docker-compose start docs +``` + +#### Using Jekyll directly + +If for some reason you are unable to use Docker Compose, you can use Jekyll directly. -1. In a terminal, change to the root of your `docker-fork` repository. +**Prerequisites:** - $ cd ~/repos/docker-fork +- You need a recent version of Ruby installed. If you are on macOS, install Ruby + and Bundle using homebrew. -2. Make sure you are in your feature branch. + ```bash + brew install ruby + brew install bundle + ``` - $ git status - On branch dry-run-test - Your branch is up-to-date with 'origin/dry-run-test'. - nothing to commit, working directory clean +- Use `bundle` to install Jekyll and its dependencies from the bundle in the + centralized documentation repository. Within your clone of the + [https://github.com/docker/docker.github.io](https://github.com/docker/docker.github.io) + repository, issue the following command: -3. Build the documentation. + ```bash + bundle install + ``` - $ make docs +**To build the website locally:** - When the build completes, you'll see a final output message similar to the - following: + 1. Issue the `jekyll serve` command. This will build + the static website and serve it in a small web server on port 4000. If it + fails, examine and fix the errors and run the command again. - Successfully built ee7fe7553123 - docker run --rm -it -e AWS_S3_BUCKET -e NOCACHE -p 8000:8000 "docker-docs:dry-run-test" mkdocs serve - Running at: http://0.0.0.0:8000/ - Live reload enabled. - Hold ctrl+c to quit. + 2. You can keep making changes to the Markdown files in another terminal + window, and Jekyll will detect the changes and rebuild the relevant HTML + pages automatically. -4. Enter the URL in your browser. + 3. To stop the web server, issue `CTRL+C`. - If you are using Docker Machine, replace the default localhost address - (0.0.0.0) with your DOCKERHOST value. You can get this value at any time by - entering `docker-machine ip ` at the command line. +To serve the website using Github Pages on your fork, first +[enable Github Pages in your fork](https://pages.github.com/) or rename your fork +to `.github.io`, then push your +feature branch to your fork's Github Pages branch, which is `gh-pages` or `master`, +depending on whether you manually enabled Github Pages or renamed your fork. +Within a few minutes, the site will be available on your Github Pages URL. -5. Once in the documentation, look for the red notice to verify you are seeing the correct build. +Review your documentation changes on the local or Github Pages site. When you +are satisfied with your changes, submit your pull request. - ![Beta documentation](images/red_notice.png) -6. Navigate to your new or changed document. +### Reviewing the reference docs for your project -7. Review both the content and the links. +Some projects, such as Docker Engine, maintain reference documents, such as man +pages, CLI command references, and API endpoint references. These files are +maintained within each project and periodically imported into the centralized +documentation repository. If you change the behavior of a command or endpoint, +including changing the help text, be sure that the associated reference +documentation is updated as part of your pull request. -8. Return to your terminal and exit out of the running documentation container. +These reference documents are usually under the `docs/reference/` directory or +the `man/` directory. The best way to review them is to push the changes to +your fork and view the Markdown files in Github. The style will not match with +docs.docker.com, but you will be able to preview the changes. ## Where to go next diff --git a/opensource/project/who-written-for.md b/opensource/project/who-written-for.md index 03c2376b785..ed633c0fc93 100644 --- a/opensource/project/who-written-for.md +++ b/opensource/project/who-written-for.md @@ -1,4 +1,6 @@ --- +aliases: +- /project/who-written-for/ description: Introduction to project contribution at Docker keywords: - Gordon, introduction, turtle, machine, libcontainer, how to @@ -61,5 +63,5 @@ Please feel free to skim past information you find obvious or boring. ## How to get started Start by getting the software you require. If you are on Mac or Linux, go to -[get the required software for Linux or OS X](software-required.md). If you are +[get the required software for Linux or macOS](software-required.md). If you are on Windows, see [get the required software for Windows](software-req-win.md). diff --git a/opensource/ways/issues.md b/opensource/ways/issues.md index 3d96d914790..10fce4bfd6c 100644 --- a/opensource/ways/issues.md +++ b/opensource/ways/issues.md @@ -20,7 +20,7 @@ think are important. Triage is a great choice if you have an interest or experience in software product management or project management. -# What kind of issues can I triage? +## What kind of issues can I triage? Docker users and contributors create new issues if they want to: @@ -28,7 +28,7 @@ Docker users and contributors create new issues if they want to: * request a new feature * ask a question -# How do I triage? +## How do I triage? Follow these steps: @@ -36,56 +36,30 @@ Follow these steps: 2. Visit a Docker repository and press the **Watch** button. -This tells GitHub to notify you of new issues. Depending on your settings, -notification go to your GitHub or email inbox. Some of repositories you can watch are: - - - - - - - - - - - - - - - - - - - - - - - -
      docker/dockerDocker the open-source application container engine
      docker/machineSoftware for the easy and quick creation of Docker hosts on your computer, on cloud providers, and inside your own data center.
      kitematic/kitematicKitematic is a simple application for managing Docker containers on Mac OS X and Windows.
      docker/swarmNative clustering for Docker; manage several Docker hosts as a single, virtual host.
      docker/composeDefine and run complex applications using one or many interlinked containers.
      - - -See the complete list of -Docker repositories on GitHub. - -3. Choose an issue from the list of untriaged issues. - -4. Follow the the triage process to triage the issue. - -The triage process asks you to add both a `kind/` and a `exp/` label to each -issue. Because you are not a Docker maintainer, you add these through comments. -Simply add a `+label` keyword to an issue comment: - -![Example](../images/triage-label.png) - -For example, the `+exp/beginner` and `+kind/writing` labels would triage an issue as -beginner writing task. For descriptions of valid labels, see the the triage process + This tells GitHub to notify you of new issues. Depending on your settings, + notification go to your GitHub or email inbox. Some of repositories you can watch are: + + | Repository | Description | + | [docker/docker](https://github.com/docker/docker) | Docker the open-source application container engine | + | [docker/machine](https://github.com/docker/machine) | Software for the easy and quick creation of Docker hosts on your computer, on cloud providers, and inside your own data center. | + | [kitematic/kitematic](https://github.com/kitematic/kitematic) | Kitematic is a simple application for managing Docker containers on macOS and Windows. | + | [docker/swarm](https://github.com/docker/swarm) | Native clustering for Docker; manage several Docker hosts as a single, virtual host. | + | [docker/compose](ttps://github.com/docker/compose) | Define and run complex applications using one or many interlinked containers. | + + See the complete list of Docker repositories on GitHub. + +3. Choose an issue from the [list of untriaged issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+-label%3Akind%2Fproposal+-label%3Akind%2Fenhancement+-label%3Akind%2Fbug+-label%3Akind%2Fcleanup+-label%3Akind%2Fgraphics+-label%3Akind%2Fwriting+-label%3Akind%2Fsecurity+-label%3Akind%2Fquestion+-label%3Akind%2Fimprovement+-label%3Akind%2Ffeature). + +4. Follow the the triage process to triage the issue. + + The triage process asks you to add both a `kind/` and a `exp/` label to each + issue. Because you are not a Docker maintainer, you add these through comments. + Simply add a `+label` keyword to an issue comment: + + ![Example](../images/triage-label.png) + + For example, the `+exp/beginner` and `+kind/writing` labels would triage an issue as + beginner writing task. For descriptions of valid labels, see the the triage process 5. Triage another issue. diff --git a/opensource/workflow/find-an-issue.md b/opensource/workflow/find-an-issue.md index 6fc27d625e0..0c82c7d3d09 100644 --- a/opensource/workflow/find-an-issue.md +++ b/opensource/workflow/find-an-issue.md @@ -56,7 +56,6 @@ experience, and priority. You can filter using one or more labels. The kind and experience labels are useful for new contributors. The following table describes the kind labels. -kind/bug @@ -110,7 +109,6 @@ The following table describes the kind labels. The following table describes the experience level guidelines.
      - @@ -235,9 +233,6 @@ To sync your repository: From github.com:docker/docker * branch master -> FETCH_HEAD - This command says get all the changes from the `master` branch belonging to - the `upstream` remote. - 7. Rebase your local master with the `upstream/master`. $ git rebase upstream/master diff --git a/opensource/workflow/work-issue.md b/opensource/workflow/work-issue.md index 18c6f1bce81..9ca29017a44 100644 --- a/opensource/workflow/work-issue.md +++ b/opensource/workflow/work-issue.md @@ -11,7 +11,7 @@ title: Work on your issue # Work on your issue -The work you do for your issue depends on the specific issue you picked. +The work you do depends on the specific issue you picked. This section gives you a step-by-step workflow. Where appropriate, it provides command examples. @@ -25,7 +25,7 @@ Follow this workflow as you work: 1. Review the appropriate style guide. - If you are changing code, review the coding style guide. Changing documentation? Review the documentation style guide. diff --git a/registry/architecture.md b/registry/architecture.md index 91b704f8c41..c2aaa9f2d7a 100644 --- a/registry/architecture.md +++ b/registry/architecture.md @@ -1,5 +1,5 @@ --- -draft: true +published: false --- # Architecture diff --git a/registry/configuration.md b/registry/configuration.md index fa4d625af82..72c9597242c 100644 --- a/registry/configuration.md +++ b/registry/configuration.md @@ -748,7 +748,8 @@ interpretation of the options. no
      Exp Label - Specify a `duration` by providing an integer and a unit. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For example, `3000s` is a valid duration; there should be no space between the integer and unit. If you do not specify a `duration` or specify an integer without a time unit, this defaults to 20 minutes. + {% capture text %}Specify a `duration` by providing an integer and a unit. Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For example, `3000s` is a valid duration; there should be no space between the integer and unit. If you do not specify a `duration` or specify an integer without a time unit, this defaults to 20 minutes.{% endcapture %} + {{ text | markdownify }}
      @@ -1831,7 +1832,7 @@ conjunction with the S3 storage driver. The storage middleware name. Currently cloudfront is an accepted value. - disabled + disabled Set to false to easily disable the middleware. @@ -1860,7 +1861,6 @@ The following example illustrates these values: keypairid: asecret duration: 60 - >**Note**: Cloudfront keys exist separately to other AWS keys. See >[the documentation on AWS credentials](http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) >for more information. diff --git a/registry/glossary.md b/registry/glossary.md index 61c8d1dc3fd..2eb1626a281 100644 --- a/registry/glossary.md +++ b/registry/glossary.md @@ -1,5 +1,5 @@ --- -draft: true +published: false --- # Glossary diff --git a/registry/menu.md b/registry/menu.md deleted file mode 100644 index def2cd5c9c2..00000000000 --- a/registry/menu.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -description: High-level overview of the Registry -keywords: -- registry, on-prem, images, tags, repository, distribution -menu: - main: - identifier: smn_registry - parent: mn_components -title: Docker Registry -type: menu ---- - -# Overview of Docker Registry Documentation - -The Docker Registry documentation includes the following topics: - -* [Docker Registry Introduction](index.md) -* [Understanding the Registry](introduction.md) -* [Deploying a registry server](deploying.md) -* [Registry Configuration Reference](configuration.md) -* [Notifications](notifications.md) -* [Recipes](recipes/index.md) -* [Getting help](help.md) diff --git a/registry/migration.md b/registry/migration.md index 167c5a680aa..e46441cb0a9 100644 --- a/registry/migration.md +++ b/registry/migration.md @@ -1,5 +1,5 @@ --- -draft: true +published: false --- # Migrating a 1.0 registry to 2.0 diff --git a/registry/recipes/index.md b/registry/recipes/index.md index 495370798ee..482a48943a8 100644 --- a/registry/recipes/index.md +++ b/registry/recipes/index.md @@ -33,5 +33,5 @@ At this point, it's assumed that: * [using Apache as an authenticating proxy](apache.md) * [using Nginx as an authenticating proxy](nginx.md) - * [running a Registry on OS X](osx-setup-guide.md) + * [running a Registry on macOS](osx-setup-guide.md) * [mirror the Docker Hub](mirror.md) diff --git a/registry/recipes/menu.md b/registry/recipes/menu.md deleted file mode 100644 index 1755009e269..00000000000 --- a/registry/recipes/menu.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -description: Registry Recipes -keywords: -- registry, on-prem, images, tags, repository, distribution -menu: - main: - identifier: smn_recipes - parent: smn_registry - weight: 6 -title: Recipes -type: menu ---- - -# Recipes - -## The List - - * [using Apache as an authenticating proxy](apache.md) - * [using Nginx as an authenticating proxy](nginx.md) - * [running a Registry on OS X](osx-setup-guide.md) - * [mirror the Docker Hub](mirror.md) diff --git a/registry/recipes/mirror.md b/registry/recipes/mirror.md index 75ea964f8a4..6e66f73a005 100644 --- a/registry/recipes/mirror.md +++ b/registry/recipes/mirror.md @@ -62,6 +62,8 @@ In order to access private images on the Docker Hub, a username and password can > :warn: if you specify a username and password, it's very important to understand that private resources that this user has access to on the Hub will be made available on your mirror. It's thus paramount that you secure your mirror by implementing authentication if you expect these resources to stay private! +> :warn: in order for the scheduler to clean up old entries, delete must be enabled in the registry configuration. See the [Registry Configuration Reference](../configuration.md) for more details. + ### Configuring the Docker daemon You will need to pass the `--registry-mirror` option to your Docker daemon on startup: diff --git a/registry/recipes/osx-setup-guide.md b/registry/recipes/osx-setup-guide.md index 0d0c443d58d..f926f8c9e66 100644 --- a/registry/recipes/osx-setup-guide.md +++ b/registry/recipes/osx-setup-guide.md @@ -1,32 +1,32 @@ --- -description: Explains how to run a registry on OS X +description: Explains how to run a registry on macOS keywords: -- registry, on-prem, images, tags, repository, distribution, OS X, recipe, advanced +- registry, on-prem, images, tags, repository, distribution, macOS, recipe, advanced menu: main: parent: smn_recipes -title: Running on OS X +title: Running on macOS --- -# OS X Setup Guide +# macOS Setup Guide ## Use-case -This is useful if you intend to run a registry server natively on OS X. +This is useful if you intend to run a registry server natively on macOS. ### Alternatives -You can start a VM on OS X, and deploy your registry normally as a container using Docker inside that VM. +You can start a VM on macOS, and deploy your registry normally as a container using Docker inside that VM. The simplest road to get there is traditionally to use the [docker Toolbox](https://www.docker.com/toolbox), or [docker-machine](/machine/index.md), which usually relies on the [boot2docker](http://boot2docker.io/) iso inside a VirtualBox VM. ### Solution -Using the method described here, you install and compile your own from the git repository and run it as an OS X agent. +Using the method described here, you install and compile your own from the git repository and run it as an macOS agent. ### Gotchas -Production services operation on OS X is out of scope of this document. Be sure you understand well these aspects before considering going to production with this. +Production services operation on macOS is out of scope of this document. Be sure you understand well these aspects before considering going to production with this. ## Setup golang on your machine diff --git a/registry/spec/api.md.tmpl b/registry/spec/api.md.tmpl deleted file mode 100644 index c44418f4d36..00000000000 --- a/registry/spec/api.md.tmpl +++ /dev/null @@ -1,1219 +0,0 @@ ---- -description: Specification for the Registry API. -keywords: -- registry, on-prem, images, tags, repository, distribution, api, advanced -menu: - main: - parent: smn_registry_ref -title: HTTP API V2 ---- - -# Docker Registry HTTP API V2 - -## Introduction - -The _Docker Registry HTTP API_ is the protocol to facilitate distribution of -images to the docker engine. It interacts with instances of the docker -registry, which is a service to manage information about docker images and -enable their distribution. The specification covers the operation of version 2 -of this API, known as _Docker Registry HTTP API V2_. - -While the V1 registry protocol is usable, there are several problems with the -architecture that have led to this new version. The main driver of this -specification is a set of changes to the docker the image format, covered in -[docker/docker#8093](https://github.com/docker/docker/issues/8093). -The new, self-contained image manifest simplifies image definition and improves -security. This specification will build on that work, leveraging new properties -of the manifest format to improve performance, reduce bandwidth usage and -decrease the likelihood of backend corruption. - -For relevant details and history leading up to this specification, please see -the following issues: - -- [docker/docker#8093](https://github.com/docker/docker/issues/8093) -- [docker/docker#9015](https://github.com/docker/docker/issues/9015) -- [docker/docker-registry#612](https://github.com/docker/docker-registry/issues/612) - -### Scope - -This specification covers the URL layout and protocols of the interaction -between docker registry and docker core. This will affect the docker core -registry API and the rewrite of docker-registry. Docker registry -implementations may implement other API endpoints, but they are not covered by -this specification. - -This includes the following features: - -- Namespace-oriented URI Layout -- PUSH/PULL registry server for V2 image manifest format -- Resumable layer PUSH support -- V2 Client library implementation - -While authentication and authorization support will influence this -specification, details of the protocol will be left to a future specification. -Relevant header definitions and error codes are present to provide an -indication of what a client may encounter. - -#### Future - -There are features that have been discussed during the process of cutting this -specification. The following is an incomplete list: - -- Immutable image references -- Multiple architecture support -- Migration from v2compatibility representation - -These may represent features that are either out of the scope of this -specification, the purview of another specification or have been deferred to a -future version. - -### Use Cases - -For the most part, the use cases of the former registry API apply to the new -version. Differentiating use cases are covered below. - -#### Image Verification - -A docker engine instance would like to run verified image named -"library/ubuntu", with the tag "latest". The engine contacts the registry, -requesting the manifest for "library/ubuntu:latest". An untrusted registry -returns a manifest. Before proceeding to download the individual layers, the -engine verifies the manifest's signature, ensuring that the content was -produced from a trusted source and no tampering has occurred. After each layer -is downloaded, the engine verifies the digest of the layer, ensuring that the -content matches that specified by the manifest. - -#### Resumable Push - -Company X's build servers lose connectivity to docker registry before -completing an image layer transfer. After connectivity returns, the build -server attempts to re-upload the image. The registry notifies the build server -that the upload has already been partially attempted. The build server -responds by only sending the remaining data to complete the image file. - -#### Resumable Pull - -Company X is having more connectivity problems but this time in their -deployment datacenter. When downloading an image, the connection is -interrupted before completion. The client keeps the partial data and uses http -`Range` requests to avoid downloading repeated data. - -#### Layer Upload De-duplication - -Company Y's build system creates two identical docker layers from build -processes A and B. Build process A completes uploading the layer before B. -When process B attempts to upload the layer, the registry indicates that its -not necessary because the layer is already known. - -If process A and B upload the same layer at the same time, both operations -will proceed and the first to complete will be stored in the registry (Note: -we may modify this to prevent dogpile with some locking mechanism). - -### Changes - -The V2 specification has been written to work as a living document, specifying -only what is certain and leaving what is not specified open or to future -changes. Only non-conflicting additions should be made to the API and accepted -changes should avoid preventing future changes from happening. - -This section should be updated when changes are made to the specification, -indicating what is different. Optionally, we may start marking parts of the -specification to correspond with the versions enumerated here. - -Each set of changes is given a letter corresponding to a set of modifications -that were applied to the baseline specification. These are merely for -reference and shouldn't be used outside the specification other than to -identify a set of modifications. - -
      -
      l
      -
      -
        -
      • Document TOOMANYREQUESTS error code.
      • -
      -
      - -
      k
      -
      -
        -
      • Document use of Accept and Content-Type headers in manifests endpoint.
      • -
      -
      - -
      j
      -
      -
        -
      • Add ability to mount blobs across repositories.
      • -
      -
      - -
      i
      -
      -
        -
      • Clarified expected behavior response to manifest HEAD request.
      • -
      -
      - -
      h
      -
      -
        -
      • All mention of tarsum removed.
      • -
      -
      - -
      g
      -
      -
        -
      • Clarify behavior of pagination behavior with unspecified parameters.
      • -
      -
      - -
      f
      -
      -
        -
      • Specify the delete API for layers and manifests.
      • -
      -
      - -
      e
      -
      -
        -
      • Added support for listing registry contents.
      • -
      • Added pagination to tags API.
      • -
      • Added common approach to support pagination.
      • -
      -
      - -
      d
      -
      -
        -
      • Allow repository name components to be one character.
      • -
      • Clarified that single component names are allowed.
      • -
      -
      - -
      c
      -
      -
        -
      • Added section covering digest format.
      • -
      • Added more clarification that manifest cannot be deleted by tag.
      • -
      -
      - -
      b
      -
      -
        -
      • Added capability of doing streaming upload to PATCH blob upload.
      • -
      • Updated PUT blob upload to no longer take final chunk, now requires entire data or no data.
      • -
      • Removed `416 Requested Range Not Satisfiable` response status from PUT blob upload.
      • -
      -
      - -
      a
      -
      -
        -
      • Added support for immutable manifest references in manifest endpoints.
      • -
      • Deleting a manifest by tag has been deprecated.
      • -
      • Specified `Docker-Content-Digest` header for appropriate entities.
      • -
      • Added error code for unsupported operations.
      • -
      -
      -
      - -## Overview - -This section covers client flows and details of the API endpoints. The URI -layout of the new API is structured to support a rich authentication and -authorization model by leveraging namespaces. All endpoints will be prefixed -by the API version and the repository name: - - /v2// - -For example, an API endpoint that will work with the `library/ubuntu` -repository, the URI prefix will be: - - /v2/library/ubuntu/ - -This scheme provides rich access control over various operations and methods -using the URI prefix and http methods that can be controlled in variety of -ways. - -Classically, repository names have always been two path components where each -path component is less than 30 characters. The V2 registry API does not -enforce this. The rules for a repository name are as follows: - -1. A repository name is broken up into _path components_. A component of a - repository name must be at least one lowercase, alpha-numeric characters, - optionally separated by periods, dashes or underscores. More strictly, it - must match the regular expression `[a-z0-9]+(?:[._-][a-z0-9]+)*`. -2. If a repository name has two or more path components, they must be - separated by a forward slash ("/"). -3. The total length of a repository name, including slashes, must be less the - 256 characters. - -These name requirements _only_ apply to the registry API and should accept a -superset of what is supported by other docker ecosystem components. - -All endpoints should support aggressive http caching, compression and range -headers, where appropriate. The new API attempts to leverage HTTP semantics -where possible but may break from standards to implement targeted features. - -For detail on individual endpoints, please see the [_Detail_](#detail) -section. - -### Errors - -Actionable failure conditions, covered in detail in their relevant sections, -are reported as part of 4xx responses, in a json response body. One or more -errors will be returned in the following format: - - { - "errors:" [{ - "code": , - "message": , - "detail": - }, - ... - ] - } - -The `code` field will be a unique identifier, all caps with underscores by -convention. The `message` field will be a human readable string. The optional -`detail` field may contain arbitrary json data providing information the -client can use to resolve the issue. - -While the client can take action on certain error codes, the registry may add -new error codes over time. All client implementations should treat unknown -error codes as `UNKNOWN`, allowing future error codes to be added without -breaking API compatibility. For the purposes of the specification error codes -will only be added and never removed. - -For a complete account of all error codes, please see the [_Errors_](#errors-2) -section. - -### API Version Check - -A minimal endpoint, mounted at `/v2/` will provide version support information -based on its response statuses. The request format is as follows: - - GET /v2/ - -If a `200 OK` response is returned, the registry implements the V2(.1) -registry API and the client may proceed safely with other V2 operations. -Optionally, the response may contain information about the supported paths in -the response body. The client should be prepared to ignore this data. - -If a `401 Unauthorized` response is returned, the client should take action -based on the contents of the "WWW-Authenticate" header and try the endpoint -again. Depending on access control setup, the client may still have to -authenticate against different resources, even if this check succeeds. - -If `404 Not Found` response status, or other unexpected status, is returned, -the client should proceed with the assumption that the registry does not -implement V2 of the API. - -When a `200 OK` or `401 Unauthorized` response is returned, the -"Docker-Distribution-API-Version" header should be set to "registry/2.0". -Clients may require this header value to determine if the endpoint serves this -API. When this header is omitted, clients may fallback to an older API version. - -### Content Digests - -This API design is driven heavily by [content addressability](http://en.wikipedia.org/wiki/Content-addressable_storage). -The core of this design is the concept of a content addressable identifier. It -uniquely identifies content by taking a collision-resistant hash of the bytes. -Such an identifier can be independently calculated and verified by selection -of a common _algorithm_. If such an identifier can be communicated in a secure -manner, one can retrieve the content from an insecure source, calculate it -independently and be certain that the correct content was obtained. Put simply, -the identifier is a property of the content. - -To disambiguate from other concepts, we call this identifier a _digest_. A -_digest_ is a serialized hash result, consisting of a _algorithm_ and _hex_ -portion. The _algorithm_ identifies the methodology used to calculate the -digest. The _hex_ portion is the hex-encoded result of the hash. - -We define a _digest_ string to match the following grammar: -``` -digest := algorithm ":" hex -algorithm := /[A-Fa-f0-9_+.-]+/ -hex := /[A-Fa-f0-9]+/ -``` - -Some examples of _digests_ include the following: - -digest | description | -----------------------------------------------------------------------------------|------------------------------------------------ -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b | Common sha256 based digest | - -While the _algorithm_ does allow one to implement a wide variety of -algorithms, compliant implementations should use sha256. Heavy processing of -input before calculating a hash is discouraged to avoid degrading the -uniqueness of the _digest_ but some canonicalization may be performed to -ensure consistent identifiers. - -Let's use a simple example in pseudo-code to demonstrate a digest calculation: -``` -let C = 'a small string' -let B = sha256(C) -let D = 'sha256:' + EncodeHex(B) -let ID(C) = D -``` - -Above, we have bytestring `C` passed into a function, `SHA256`, that returns a -bytestring `B`, which is the hash of `C`. `D` gets the algorithm concatenated -with the hex encoding of `B`. We then define the identifier of `C` to `ID(C)` -as equal to `D`. A digest can be verified by independently calculating `D` and -comparing it with identifier `ID(C)`. - -#### Digest Header - -To provide verification of http content, any response may include a -`Docker-Content-Digest` header. This will include the digest of the target -entity returned in the response. For blobs, this is the entire blob content. For -manifests, this is the manifest body without the signature content, also known -as the JWS payload. Note that the commonly used canonicalization for digest -calculation may be dependent on the mediatype of the content, such as with -manifests. - -The client may choose to ignore the header or may verify it to ensure content -integrity and transport security. This is most important when fetching by a -digest. To ensure security, the content should be verified against the digest -used to fetch the content. At times, the returned digest may differ from that -used to initiate a request. Such digests are considered to be from different -_domains_, meaning they have different values for _algorithm_. In such a case, -the client may choose to verify the digests in both domains or ignore the -server's digest. To maintain security, the client _must_ always verify the -content against the _digest_ used to fetch the content. - -> __IMPORTANT:__ If a _digest_ is used to fetch content, the client should use -> the same digest used to fetch the content to verify it. The header -> `Docker-Content-Digest` should not be trusted over the "local" digest. - -### Pulling An Image - -An "image" is a combination of a JSON manifest and individual layer files. The -process of pulling an image centers around retrieving these two components. - -The first step in pulling an image is to retrieve the manifest. For reference, -the relevant manifest fields for the registry are the following: - - field | description | -----------|------------------------------------------------| -name | The name of the image. | -tag | The tag for this version of the image. | -fsLayers | A list of layer descriptors (including digest) | -signature | A JWS used to verify the manifest content | - -For more information about the manifest format, please see -[docker/docker#8093](https://github.com/docker/docker/issues/8093). - -When the manifest is in hand, the client must verify the signature to ensure -the names and layers are valid. Once confirmed, the client will then use the -digests to download the individual layers. Layers are stored in as blobs in -the V2 registry API, keyed by their digest. - -#### Pulling an Image Manifest - -The image manifest can be fetched with the following url: - -``` -GET /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -The client should include an Accept header indicating which manifest content -types it supports. For more details on the manifest formats and their content -types, see [manifest-v2-1.md](manifest-v2-1.md) and -[manifest-v2-2.md](manifest-v2-2.md). In a successful response, the Content-Type -header will indicate which manifest type is being returned. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful, the image -manifest will be returned, with the following format (see -[docker/docker#8093](https://github.com/docker/docker/issues/8093) for details): - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": - } - -The client should verify the returned manifest signature for authenticity -before fetching layers. - -##### Existing Manifests - -The image manifest can be checked for existence with the following url: - -``` -HEAD /v2//manifests/ -``` - -The `name` and `reference` parameter identify the image and are required. The -reference may include a tag or digest. - -A `404 Not Found` response will be returned if the image is unknown to the -registry. If the image exists and the response is successful the response will -be as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - - -#### Pulling a Layer - -Layers are stored in the blob portion of the registry, keyed by digest. -Pulling a layer is carried out by a standard http request. The URL is as -follows: - - GET /v2//blobs/ - -Access to a layer will be gated by the `name` of the repository but is -identified uniquely in the registry by `digest`. - -This endpoint may issue a 307 (302 for /blobs/uploads/ -``` - -The parameters of this request are the image namespace under which the layer -will be linked. Responses to this request are covered below. - -##### Existing Layers - -The existence of a layer can be checked via a `HEAD` request to the blob store -API. The request should be formatted as follows: - -``` -HEAD /v2//blobs/ -``` - -If the layer with the digest specified in `digest` is available, a 200 OK -response will be received, with no actual body content (this is according to -http specification). The response will look as follows: - -``` -200 OK -Content-Length: -Docker-Content-Digest: -``` - -When this response is received, the client can assume that the layer is -already available in the registry under the given name and should take no -further action to upload the layer. Note that the binary digests may differ -for the existing registry layer, but the digests will be guaranteed to match. - -##### Uploading the Layer - -If the POST request is successful, a `202 Accepted` response will be returned -with the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -The rest of the upload process can be carried out with the returned url, -called the "Upload URL" from the `Location` header. All responses to the -upload url, whether sending data or getting status, will be in this format. -Though the URI format (`/v2//blobs/uploads/`) for the `Location` -header is specified, clients should treat it as an opaque url and should never -try to assemble it. While the `uuid` parameter may be an actual UUID, this -proposal imposes no constraints on the format and clients should never impose -any. - -If clients need to correlate local upload state with remote upload state, the -contents of the `Docker-Upload-UUID` header should be used. Such an id can be -used to key the last used location header when implementing resumable uploads. - -##### Upload Progress - -The progress and chunk coordination of the upload process will be coordinated -through the `Range` header. While this is a non-standard use of the `Range` -header, there are examples of [similar approaches](https://developers.google.com/youtube/v3/guides/using_resumable_upload_protocol) in APIs with heavy use. -For an upload that just started, for an example with a 1000 byte layer file, -the `Range` header would be as follows: - -``` -Range: bytes=0-0 -``` - -To get the status of an upload, issue a GET request to the upload URL: - -``` -GET /v2//blobs/uploads/ -Host: -``` - -The response will be similar to the above, except will return 204 status: - -``` -204 No Content -Location: /v2//blobs/uploads/ -Range: bytes=0- -Docker-Upload-UUID: -``` - -Note that the HTTP `Range` header byte ranges are inclusive and that will be -honored, even in non-standard use cases. - -##### Monolithic Upload - -A monolithic upload is simply a chunked upload with a single chunk and may be -favored by clients that would like to avoided the complexity of chunking. To -carry out a "monolithic" upload, one can simply put the entire content blob to -the provided URL: - -``` -PUT /v2//blobs/uploads/?digest= -Content-Length: -Content-Type: application/octet-stream - - -``` - -The "digest" parameter must be included with the PUT request. Please see the -[_Completed Upload_](#completed-upload) section for details on the parameters -and expected responses. - -##### Chunked Upload - -To carry out an upload of a chunk, the client can specify a range header and -only include that part of the layer file: - -``` -PATCH /v2//blobs/uploads/ -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -There is no enforcement on layer chunk splits other than that the server must -receive them in order. The server may enforce a minimum chunk size. If the -server cannot accept the chunk, a `416 Requested Range Not Satisfiable` -response will be returned and will include a `Range` header indicating the -current status: - -``` -416 Requested Range Not Satisfiable -Location: /v2//blobs/uploads/ -Range: 0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -If this response is received, the client should resume from the "last valid -range" and upload the subsequent chunk. A 416 will be returned under the -following conditions: - -- Invalid Content-Range header format -- Out of order chunk: the range of the next chunk must start immediately after - the "last valid range" from the previous response. - -When a chunk is accepted as part of the upload, a `202 Accepted` response will -be returned, including a `Range` header with the current upload status: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -##### Completed Upload - -For an upload to be considered complete, the client must submit a `PUT` -request on the upload endpoint with a digest parameter. If it is not provided, -the upload will not be considered complete. The format for the final chunk -will be as follows: - -``` -PUT /v2//blob/uploads/?digest= -Content-Length: -Content-Range: - -Content-Type: application/octet-stream - - -``` - -Optionally, if all chunks have already been uploaded, a `PUT` request with a -`digest` parameter and zero-length body may be sent to complete and validated -the upload. Multiple "digest" parameters may be provided with different -digests. The server may verify none or all of them but _must_ notify the -client if the content is rejected. - -When the last chunk is received and the layer has been validated, the client -will receive a `201 Created` response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -###### Digest Parameter - -The "digest" parameter is designed as an opaque parameter to support -verification of a successful transfer. For example, an HTTP URI parameter -might be as follows: - -``` -sha256:6c3c624b58dbbcd3c0dd82b4c53f04194d1247c6eebdaab7c610cf7d66709b3b -``` - -Given this parameter, the registry will verify that the provided content does -match this digest. - -##### Canceling an Upload - -An upload can be cancelled by issuing a DELETE request to the upload endpoint. -The format will be as follows: - -``` -DELETE /v2//blobs/uploads/ -``` - -After this request is issued, the upload uuid will no longer be valid and the -registry server will dump all intermediate data. While uploads will time out -if not completed, clients should issue this request if they encounter a fatal -error but still have the ability to issue an http request. - -##### Cross Repository Blob Mount - -A blob may be mounted from another repository that the client has read access -to, removing the need to upload a blob already known to the registry. To issue -a blob mount instead of an upload, a POST request should be issued in the -following format: - -``` -POST /v2//blobs/uploads/?mount=&from= -Content-Length: 0 -``` - -If the blob is successfully mounted, the client will receive a `201 Created` -response: - -``` -201 Created -Location: /v2//blobs/ -Content-Length: 0 -Docker-Content-Digest: -``` - -The `Location` header will contain the registry URL to access the accepted -layer file. The `Docker-Content-Digest` header returns the canonical digest of -the uploaded blob which may differ from the provided digest. Most clients may -ignore the value but if it is used, the client should verify the value against -the uploaded blob data. - -If a mount fails due to invalid repository or digest arguments, the registry -will fall back to the standard upload behavior and return a `202 Accepted` with -the upload URL in the `Location` header: - -``` -202 Accepted -Location: /v2//blobs/uploads/ -Range: bytes=0- -Content-Length: 0 -Docker-Upload-UUID: -``` - -This behavior is consistent with older versions of the registry, which do not -recognize the repository mount query parameters. - -Note: a client may issue a HEAD request to check existence of a blob in a source -repository to distinguish between the registry not supporting blob mounts and -the blob not existing in the expected repository. - -##### Errors - -If an 502, 503 or 504 error is received, the client should assume that the -download can proceed due to a temporary condition, honoring the appropriate -retry mechanism. Other 5xx errors should be treated as terminal. - -If there is a problem with the upload, a 4xx error will be returned indicating -the problem. After receiving a 4xx response (except 416, as called out above), -the upload will be considered failed and the client should take appropriate -action. - -Note that the upload url will not be available forever. If the upload uuid is -unknown to the registry, a `404 Not Found` response will be returned and the -client must restart the upload process. - -### Deleting a Layer - -A layer may be deleted from the registry via its `name` and `digest`. A -delete may be issued with the following request format: - - DELETE /v2//blobs/ - -If the blob exists and has been successfully deleted, the following response -will be issued: - - 202 Accepted - Content-Length: None - -If the blob had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -If a layer is deleted which is referenced by a manifest in the registry, -then the complete images will not be resolvable. - -#### Pushing an Image Manifest - -Once all of the layers for an image are uploaded, the client can upload the -image manifest. An image can be pushed using the following request format: - - PUT /v2//manifests/ - Content-Type: - - { - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": - }, - ... - ] - ], - "history": , - "signature": , - ... - } - -The `name` and `reference` fields of the response body must match those -specified in the URL. The `reference` field may be a "tag" or a "digest". The -content type should match the type of the manifest being uploaded, as specified -in [manifest-v2-1.md](manifest-v2-1.md) and [manifest-v2-2.md](manifest-v2-2.md). - -If there is a problem with pushing the manifest, a relevant 4xx response will -be returned with a JSON error message. Please see the -[_PUT Manifest_](#put-manifest) section for details on possible error codes that -may be returned. - -If one or more layers are unknown to the registry, `BLOB_UNKNOWN` errors are -returned. The `detail` field of the error response will have a `digest` field -identifying the missing blob. An error is returned for each unknown blob. The -response format is as follows: - - { - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": - } - }, - ... - ] - } - -### Listing Repositories - -Images are stored in collections, known as a _repository_, which is keyed by a -`name`, as seen throughout the API specification. A registry instance may -contain several repositories. The list of available repositories is made -available through the _catalog_. - -The catalog for a given registry can be retrieved with the following request: - -``` -GET /v2/_catalog -``` - -The response will be in the following format: - -``` -200 OK -Content-Type: application/json - -{ - "repositories": [ - , - ... - ] -} -``` - -Note that the contents of the response are specific to the registry -implementation. Some registries may opt to provide a full catalog output, -limit it based on the user's access level or omit upstream results, if -providing mirroring functionality. Subsequently, the presence of a repository -in the catalog listing only means that the registry *may* provide access to -the repository at the time of the request. Conversely, a missing entry does -*not* mean that the registry does not have the repository. More succinctly, -the presence of a repository only guarantees that it is there but not that it -is _not_ there. - -For registries with a large number of repositories, this response may be quite -large. If such a response is expected, one should use pagination. A registry -may also limit the amount of responses returned even if pagination was not -explicitly requested. In this case the `Link` header will be returned along -with the results, and subsequent results can be obtained by following the link -as if pagination had been initially requested. - -For details of the `Link` header, please see the [_Pagination_](#pagination) -section. - -#### Pagination - -Paginated catalog results can be retrieved by adding an `n` parameter to the -request URL, declaring that the response should be limited to `n` results. -Starting a paginated flow begins as follows: - -``` -GET /v2/_catalog?n= -``` - -The above specifies that a catalog response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "repositories": [ - , - ... - ] -} -``` - -The above includes the _first_ `n` entries from the result set. To get the -_next_ `n` entries, one can create a URL where the argument `last` has the -value from `repositories[len(repositories)-1]`. If there are indeed more -results, the URL for the next block is encoded in an -[RFC5988](https://tools.ietf.org/html/rfc5988) `Link` header, as a "next" -relation. The presence of the `Link` header communicates to the client that -the entire result set has not been returned and another request must be -issued. If the header is not present, the client can assume that all results -have been received. - -> __NOTE:__ In the request template above, note that the brackets -> are required. For example, if the url is -> `http://example.com/v2/_catalog?n=20&last=b`, the value of the header would -> be `; rel="next"`. Please see -> [RFC5988](https://tools.ietf.org/html/rfc5988) for details. - -Compliant client implementations should always use the `Link` header -value when proceeding through results linearly. The client may construct URLs -to skip forward in the catalog. - -To get the next result set, a client would issue the request as follows, using -the URL encoded in the described `Link` header: - -``` -GET /v2/_catalog?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set. - -The catalog result set is represented abstractly as a lexically sorted list, -where the position in that list can be specified by the query term `last`. The -entries in the response start _after_ the term specified by `last`, up to `n` -entries. - -The behavior of `last` is quite simple when demonstrated with an example. Let -us say the registry has the following repositories: - -``` -a -b -c -d -``` - -If the value of `n` is 2, _a_ and _b_ will be returned on the first response. -The `Link` header returned on the response will have `n` set to 2 and last set -to _b_: - -``` -Link: <?n=2&last=b>; rel="next" -``` - -The client can then issue the request with the above value from the `Link` -header, receiving the values _c_ and _d_. Note that `n` may change on the second -to last response or be fully omitted, depending on the server implementation. - -### Listing Image Tags - -It may be necessary to list all of the tags under a given repository. The tags -for an image repository can be retrieved with the following request: - - GET /v2//tags/list - -The response will be in the following format: - - 200 OK - Content-Type: application/json - - { - "name": , - "tags": [ - , - ... - ] - } - -For repositories with a large number of tags, this response may be quite -large. If such a response is expected, one should use the pagination. - -#### Pagination - -Paginated tag results can be retrieved by adding the appropriate parameters to -the request URL described above. The behavior of tag pagination is identical -to that specified for catalog pagination. We cover a simple flow to highlight -any differences. - -Starting a paginated flow may begin as follows: - -``` -GET /v2//tags/list?n= -``` - -The above specifies that a tags response should be returned, from the start of -the result set, ordered lexically, limiting the number of results to `n`. The -response to such a request would look as follows: - -``` -200 OK -Content-Type: application/json -Link: <?n=&last=>; rel="next" - -{ - "name": , - "tags": [ - , - ... - ] -} -``` - -To get the next result set, a client would issue the request as follows, using -the value encoded in the [RFC5988](https://tools.ietf.org/html/rfc5988) `Link` -header: - -``` -GET /v2//tags/list?n=&last= -``` - -The above process should then be repeated until the `Link` header is no longer -set in the response. The behavior of the `last` parameter, the provided -response result, lexical ordering and encoding of the `Link` header are -identical to that of catalog pagination. - -### Deleting an Image - -An image may be deleted from the registry via its `name` and `reference`. A -delete may be issued with the following request format: - - DELETE /v2//manifests/ - -For deletes, `reference` *must* be a digest or the delete will fail. If the -image exists and has been successfully deleted, the following response will be -issued: - - 202 Accepted - Content-Length: None - -If the image had already been deleted or did not exist, a `404 Not Found` -response will be issued instead. - -> **Note** When deleting a manifest from a registry version 2.3 or later, the -> following header must be used when `HEAD` or `GET`-ing the manifest to obtain -> the correct digest to delete: - - Accept: application/vnd.docker.distribution.manifest.v2+json - -> for more details, see: [compatibility.md](../compatibility.md#content-addressable-storage-cas) - -## Detail - -> **Note**: This section is still under construction. For the purposes of -> implementation, if any details below differ from the described request flows -> above, the section below should be corrected. When they match, this note -> should be removed. - -The behavior of the endpoints are covered in detail in this section, organized -by route and entity. All aspects of the request and responses are covered, -including headers, parameters and body formats. Examples of requests and their -corresponding responses, with success and failure, are enumerated. - -> **Note**: The sections on endpoint detail are arranged with an example -> request, a description of the request, followed by information about that -> request. - -A list of methods and URIs are covered in the table below: - -|Method|Path|Entity|Description| -|------|----|------|-----------| -{{range $route := .RouteDescriptors}}{{range $method := .Methods}}| {{$method.Method}} | `{{$route.Path|prettygorilla}}` | {{$route.Entity}} | {{$method.Description}} | -{{end}}{{end}} - -The detail for each endpoint is covered in the following sections. - -### Errors - -The error codes encountered via the API are enumerated in the following table: - -|Code|Message|Description| -|----|-------|-----------| -{{range $err := .ErrorDescriptors}} `{{$err.Value}}` | {{$err.Message}} | {{$err.Description|removenewlines}} -{{end}} - -{{range $route := .RouteDescriptors}} -### {{.Entity}} - -{{.Description}} - -{{range $method := $route.Methods}} - -#### {{.Method}} {{$route.Entity}} - -{{.Description}} - -{{if .Requests}}{{range .Requests}}{{if .Name}} -##### {{.Name}}{{end}} - -``` -{{$method.Method}} {{$route.Path|prettygorilla}}{{range $i, $param := .QueryParameters}}{{if eq $i 0}}?{{else}}&{{end}}{{$param.Name}}={{$param.Format}}{{end}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} - -{{if or .Headers .PathParameters .QueryParameters}} -The following parameters should be specified on the request: - -|Name|Kind|Description| -|----|----|-----------| -{{range .Headers}}|`{{.Name}}`|header|{{.Description}}| -{{end}}{{range .PathParameters}}|`{{.Name}}`|path|{{.Description}}| -{{end}}{{range .QueryParameters}}|`{{.Name}}`|query|{{.Description}}| -{{end}}{{end}} - -{{if .Successes}} -{{range .Successes}} -###### On Success: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Fields}}The following fields may be returned in the response body: - -|Name|Description| -|----|-----------| -{{range .Fields}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{if .Headers}} -The following headers will be returned with the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}}{{end}}{{end}} - -{{if .Failures}} -{{range .Failures}} -###### On Failure: {{if .Name}}{{.Name}}{{else}}{{.StatusCode | statustext}}{{end}} - -``` -{{.StatusCode}} {{.StatusCode | statustext}}{{range .Headers}} -{{.Name}}: {{.Format}}{{end}}{{if .Body.ContentType}} -Content-Type: {{.Body.ContentType}}{{end}}{{if .Body.Format}} - -{{.Body.Format}}{{end}} -``` - -{{.Description}} -{{if .Headers}} -The following headers will be returned on the response: - -|Name|Description| -|----|-----------| -{{range .Headers}}|`{{.Name}}`|{{.Description}}| -{{end}}{{end}} - -{{if .ErrorCodes}} -The error codes that may be included in the response body are enumerated below: - -|Code|Message|Description| -|----|-------|-----------| -{{range $err := .ErrorCodes}}| `{{$err.Descriptor.Value}}` | {{$err.Descriptor.Message}} | {{$err.Descriptor.Description|removenewlines}} | -{{end}} - -{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} - -{{end}} diff --git a/registry/spec/implementations.md b/registry/spec/implementations.md index a365db6c4a7..3474653502f 100644 --- a/registry/spec/implementations.md +++ b/registry/spec/implementations.md @@ -1,5 +1,5 @@ --- -draft: true +published: false --- # Distribution API Implementations diff --git a/registry/spec/json.md b/registry/spec/json.md index 8e149a34d41..e5d0d304e81 100644 --- a/registry/spec/json.md +++ b/registry/spec/json.md @@ -1,6 +1,6 @@ --- description: Explains registry JSON objects -draft: true +published: false keywords: - registry, service, images, repository, json menu: diff --git a/registry/spec/menu.md b/registry/spec/menu.md deleted file mode 100644 index 0e39f6b7a25..00000000000 --- a/registry/spec/menu.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: Explains registry JSON objects -keywords: -- registry, service, images, repository, json -menu: - main: - identifier: smn_registry_ref - parent: smn_registry - weight: 7 -title: Reference -type: menu ---- - - - diff --git a/registry/storage-drivers/menu.md b/registry/storage-drivers/menu.md deleted file mode 100644 index c58f57de47a..00000000000 --- a/registry/storage-drivers/menu.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: Storage Drivers -keywords: -- registry, on-prem, images, tags, repository, distribution -menu: - main: - identifier: smn_storagedrivers - parent: smn_registry - weight: 7 -title: Storage Drivers -type: menu ---- - - - diff --git a/robots.txt b/robots.txt index f4165dfbdc8..1944517166b 100644 --- a/robots.txt +++ b/robots.txt @@ -1,3 +1,9 @@ - User-agent: * -Disallow: / +Disallow: /v1.4/ +Disallow: /v1.5/ +Disallow: /v1.6/ +Disallow: /v1.7/ +Disallow: /v1.8/ +Disallow: /v1.9/ +Disallow: /v1.10/ +Disallow: /v1.11/ diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000000..901d1190115 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,19 @@ +--- +layout: null +--- + + + + + https://docs.docker.com/ + {{ site.time | date_to_xmlschema }} + +{% for page in site.pages %} + https://docs.docker.com{{ page.url }} + {% if page.date %}{{ page.date | date_to_xmlschema }}{% else %}{{ site.time | date_to_xmlschema }}{% endif %} +{% endfor %} + diff --git a/swarm/install-w-machine.md b/swarm/install-w-machine.md index 62cba5faee2..6cadb472fc2 100644 --- a/swarm/install-w-machine.md +++ b/swarm/install-w-machine.md @@ -15,7 +15,7 @@ on your local machine using Docker Machine and VirtualBox. ## Prerequisites -Make sure your local system has VirtualBox installed. If you are using Mac OS X +Make sure your local system has VirtualBox installed. If you are using macOS or Windows and have installed Docker, you should have VirtualBox already installed. @@ -35,61 +35,68 @@ This discovery service associates a token with instances of the Docker Daemon running on each node. Other discovery service backends such as `etcd`, `consul`, and `zookeeper` are [available](discovery.md). -1. List the machines on your system. +1. List the machines on your system. - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL SWARM - docker-vm * virtualbox Running tcp://192.168.99.100:2376 + ```bash + $ docker-machine ls + NAME ACTIVE DRIVER STATE URL SWARM + docker-vm * virtualbox Running tcp://192.168.99.100:2376 + ``` - This example was run a Mac OSX system with Docker Toolbox installed. So, the - `docker-vm` virtual machine is in the list. + This example was run a macOS system with Docker Toolbox installed. So, the `docker-vm` virtual machine is in the list. -2. Create a VirtualBox machine called `local` on your system. +2. Create a VirtualBox machine called `local` on your system. - $ docker-machine create -d virtualbox local - INFO[0000] Creating SSH key... - INFO[0000] Creating VirtualBox VM... - INFO[0005] Starting VirtualBox VM... - INFO[0005] Waiting for VM to start... - INFO[0050] "local" has been created and is now the active machine. - INFO[0050] To point your Docker client at it, run this in your shell: eval "$(docker-machine env local)" + ```bash + $ docker-machine create -d virtualbox local + INFO[0000] Creating SSH key... + INFO[0000] Creating VirtualBox VM... + INFO[0005] Starting VirtualBox VM... + INFO[0005] Waiting for VM to start... + INFO[0050] "local" has been created and is now the active machine. + INFO[0050] To point your Docker client at it, run this in your shell: eval "$(docker-machine env local)" + ``` -3. Load the `local` machine configuration into your shell. +3. Load the `local` machine configuration into your shell. - $ eval "$(docker-machine env local)" + ``` + $ eval "$(docker-machine env local)" + ``` -4. Generate a discovery token using the Docker Swarm image. +4. Generate a discovery token using the Docker Swarm image. - The command below runs the `swarm create` command in a container. If you - haven't got the `swarm:latest` image on your local machine, Docker pulls it - for you. + The command below runs the `swarm create` command in a container. If you + haven't got the `swarm:latest` image on your local machine, Docker pulls it + for you. - $ docker run swarm create - Unable to find image 'swarm:latest' locally - latest: Pulling from swarm - de939d6ed512: Pull complete - 79195899a8a4: Pull complete - 79ad4f2cc8e0: Pull complete - 0db1696be81b: Pull complete - ae3b6728155e: Pull complete - 57ec2f5f3e06: Pull complete - 73504b2882a3: Already exists - swarm:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. - Digest: sha256:aaaf6c18b8be01a75099cc554b4fb372b8ec677ae81764dcdf85470279a61d6f - Status: Downloaded newer image for swarm:latest - fe0cc96a72cf04dba8c1c4aa79536ec3 + ```none + $ docker run swarm create + Unable to find image 'swarm:latest' locally + latest: Pulling from swarm + de939d6ed512: Pull complete + 79195899a8a4: Pull complete + 79ad4f2cc8e0: Pull complete + 0db1696be81b: Pull complete + ae3b6728155e: Pull complete + 57ec2f5f3e06: Pull complete + 73504b2882a3: Already exists + swarm:latest: The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security. + Digest: sha256:aaaf6c18b8be01a75099cc554b4fb372b8ec677ae81764dcdf85470279a61d6f + Status: Downloaded newer image for swarm:latest + fe0cc96a72cf04dba8c1c4aa79536ec3 + ``` - The `swarm create` command returned the `fe0cc96a72cf04dba8c1c4aa79536ec3` - token. + The `swarm create` command returned the `fe0cc96a72cf04dba8c1c4aa79536ec3` + token. - **Note**: This command relies on Docker Swarm's hosted discovery service. If - this service is having issues, this command may fail. In this case, see - information on using other types of [discovery backends](discovery.md). Check - the [status page](http://status.docker.com/) for service availability. + **Note**: This command relies on Docker Swarm's hosted discovery service. If + this service is having issues, this command may fail. In this case, see + information on using other types of [discovery backends](discovery.md). Check + the [status page](http://status.docker.com/) for service availability. -5. Save the token in a safe place. +5. Save the token in a safe place. - You'll use this token in the next step to create a Docker Swarm. + You'll use this token in the next step to create a Docker Swarm. ## Launch the Swarm manager @@ -103,53 +110,63 @@ daemons and you can communicate with them using the Docker remote API. In this section, you create a swarm manager and two nodes. -1. Create a swarm manager under VirtualBox. +1. Create a swarm manager under VirtualBox. - docker-machine create \ - -d virtualbox \ - --swarm \ - --swarm-master \ - --swarm-discovery token:// \ - swarm-master + ``` + docker-machine create \ + -d virtualbox \ + --swarm \ + --swarm-master \ + --swarm-discovery token:// \ + swarm-master + ``` - For example: + For example: - $ docker-machine create -d virtualbox --swarm --swarm-master --swarm-discovery token://fe0cc96a72cf04dba8c1c4aa79536ec3 swarm-master - INFO[0000] Creating SSH key... - INFO[0000] Creating VirtualBox VM... - INFO[0005] Starting VirtualBox VM... - INFO[0005] Waiting for VM to start... - INFO[0060] "swarm-master" has been created and is now the active machine. - INFO[0060] To point your Docker client at it, run this in your shell: eval "$(docker-machine env swarm-master)" + ```none + $ docker-machine create -d virtualbox --swarm --swarm-master --swarm-discovery token://fe0cc96a72cf04dba8c1c4aa79536ec3 swarm-master + INFO[0000] Creating SSH key... + INFO[0000] Creating VirtualBox VM... + INFO[0005] Starting VirtualBox VM... + INFO[0005] Waiting for VM to start... + INFO[0060] "swarm-master" has been created and is now the active machine. + INFO[0060] To point your Docker client at it, run this in your shell: eval "$(docker-machine env swarm-master)" + ``` -2. Open your VirtualBox Manager, it should contain the `local` machine and the -new `swarm-master` machine. +2. Open your VirtualBox Manager, it should contain the `local` machine and the + new `swarm-master` machine. - ![VirtualBox](images/virtual-box.png) + ![VirtualBox](images/virtual-box.png) -3. Create a swarm node. +3. Create a swarm node. - docker-machine create \ - -d virtualbox \ - --swarm \ - --swarm-discovery token:// \ - swarm-agent-00 + ```bash + docker-machine create \ + -d virtualbox \ + --swarm \ + --swarm-discovery token:// \ + swarm-agent-00 + ``` - For example: + For example: - $ docker-machine create -d virtualbox --swarm --swarm-discovery token://fe0cc96a72cf04dba8c1c4aa79536ec3 swarm-agent-00 - INFO[0000] Creating SSH key... - INFO[0000] Creating VirtualBox VM... - INFO[0005] Starting VirtualBox VM... - INFO[0006] Waiting for VM to start... - INFO[0066] "swarm-agent-00" has been created and is now the active machine. - INFO[0066] To point your Docker client at it, run this in your shell: eval "$(docker-machine env swarm-agent-00)" + ```bash + $ docker-machine create -d virtualbox --swarm --swarm-discovery token://fe0cc96a72cf04dba8c1c4aa79536ec3 swarm-agent-00 + INFO[0000] Creating SSH key... + INFO[0000] Creating VirtualBox VM... + INFO[0005] Starting VirtualBox VM... + INFO[0006] Waiting for VM to start... + INFO[0066] "swarm-agent-00" has been created and is now the active machine. + INFO[0066] To point your Docker client at it, run this in your shell: eval "$(docker-machine env swarm-agent-00)" + ``` -3. Add another agent called `swarm-agent-01`. +3. Add another agent called `swarm-agent-01`. - $ docker-machine create -d virtualbox --swarm --swarm-discovery token://fe0cc96a72cf04dba8c1c4aa79536ec3 swarm-agent-01 + ```bash + $ docker-machine create -d virtualbox --swarm --swarm-discovery token://fe0cc96a72cf04dba8c1c4aa79536ec3 swarm-agent-01 + ``` - You should see the two agents in your VirtualBox Manager. + You should see the two agents in your VirtualBox Manager. ## Direct your swarm @@ -157,75 +174,85 @@ In this step, you connect to the swarm machine, display information related to your swarm, and start an image on your swarm. -1. Point your Docker environment to the machine running the swarm master. - - $ eval $(docker-machine env --swarm swarm-master) - - -2. Get information on your new swarm using the `docker` command. - - $ docker info - Containers: 4 - Strategy: spread - Filters: affinity, health, constraint, port, dependency - Nodes: 3 - swarm-agent-00: 192.168.99.105:2376 - └ Containers: 1 - └ Reserved CPUs: 0 / 8 - └ Reserved Memory: 0 B / 1.023 GiB - swarm-agent-01: 192.168.99.106:2376 - └ Containers: 1 - └ Reserved CPUs: 0 / 8 - └ Reserved Memory: 0 B / 1.023 GiB - swarm-master: 192.168.99.104:2376 - └ Containers: 2 - └ Reserved CPUs: 0 / 8 - - You can see that each agent and the master all have port `2376` exposed. When you create a swarm, you can use any port you like and even different ports on different nodes. Each swarm node runs the swarm agent container. - - The master is running both the swarm manager and a swarm agent container. This isn't recommended in a production environment because it can cause problems with agent failover. However, it is perfectly fine to do this in a learning environment like this one. - -3. Check the images currently running on your swarm. - - $ docker ps -a - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 78be991b58d1 swarm:latest "/swarm join --addr 3 minutes ago Up 2 minutes 2375/tcp swarm-agent-01/swarm-agent - da5127e4f0f9 swarm:latest "/swarm join --addr 6 minutes ago Up 6 minutes 2375/tcp swarm-agent-00/swarm-agent - ef395f316c59 swarm:latest "/swarm join --addr 16 minutes ago Up 16 minutes 2375/tcp swarm-master/swarm-agent - 45821ca5208e swarm:latest "/swarm manage --tls 16 minutes ago Up 16 minutes 2375/tcp, 192.168.99.104:3376->3376/tcp swarm-master/swarm-agent-master - -4. Run the Docker `hello-world` test image on your swarm. - - - $ docker run hello-world - Hello from Docker. - This message shows that your installation appears to be working correctly. - - To generate this message, Docker took the following steps: - 1. The Docker client contacted the Docker daemon. - 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. - (Assuming it was not already locally available.) - 3. The Docker daemon created a new container from that image which runs the - executable that produces the output you are currently reading. - 4. The Docker daemon streamed that output to the Docker client, which sent it - to your terminal. - - To try something more ambitious, you can run an Ubuntu container with: - $ docker run -it ubuntu bash - - For more examples and ideas, visit: - http://docs.docker.com/userguide/ - -5. Use the `docker ps` command to find out which node the container ran on. - - $ docker ps -a - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - 54a8690043dd hello-world:latest "/hello" 22 seconds ago Exited (0) 3 seconds ago swarm-agent-00/modest_goodall - 78be991b58d1 swarm:latest "/swarm join --addr 5 minutes ago Up 4 minutes 2375/tcp swarm-agent-01/swarm-agent - da5127e4f0f9 swarm:latest "/swarm join --addr 8 minutes ago Up 8 minutes 2375/tcp swarm-agent-00/swarm-agent - ef395f316c59 swarm:latest "/swarm join --addr 18 minutes ago Up 18 minutes 2375/tcp swarm-master/swarm-agent - 45821ca5208e swarm:latest "/swarm manage --tls 18 minutes ago Up 18 minutes 2375/tcp, 192.168.99.104:3376->3376/tcp swarm-master/swarm-agent-master - +1. Point your Docker environment to the machine running the swarm master. + + ```bash + $ eval $(docker-machine env --swarm swarm-master) + ``` + + +2. Get information on your new swarm using the `docker` command. + + ```bash + $ docker info + Containers: 4 + Strategy: spread + Filters: affinity, health, constraint, port, dependency + Nodes: 3 + swarm-agent-00: 192.168.99.105:2376 + └ Containers: 1 + └ Reserved CPUs: 0 / 8 + └ Reserved Memory: 0 B / 1.023 GiB + swarm-agent-01: 192.168.99.106:2376 + └ Containers: 1 + └ Reserved CPUs: 0 / 8 + └ Reserved Memory: 0 B / 1.023 GiB + swarm-master: 192.168.99.104:2376 + └ Containers: 2 + └ Reserved CPUs: 0 / 8 + ``` + + You can see that each agent and the master all have port `2376` exposed. When you create a swarm, you can use any port you like and even different ports on different nodes. Each swarm node runs the swarm agent container. + + The master is running both the swarm manager and a swarm agent container. This isn't recommended in a production environment because it can cause problems with agent failover. However, it is perfectly fine to do this in a learning environment like this one. + +3. Check the images currently running on your swarm. + + ```none + $ docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 78be991b58d1 swarm:latest "/swarm join --addr 3 minutes ago Up 2 minutes 2375/tcp swarm-agent-01/swarm-agent + da5127e4f0f9 swarm:latest "/swarm join --addr 6 minutes ago Up 6 minutes 2375/tcp swarm-agent-00/swarm-agent + ef395f316c59 swarm:latest "/swarm join --addr 16 minutes ago Up 16 minutes 2375/tcp swarm-master/swarm-agent + 45821ca5208e swarm:latest "/swarm manage --tls 16 minutes ago Up 16 minutes 2375/tcp, 192.168.99.104:3376->3376/tcp swarm-master/swarm-agent-master + ``` + +4. Run the Docker `hello-world` test image on your swarm. + + ```bash + $ docker run hello-world + Hello from Docker. + This message shows that your installation appears to be working correctly. + + To generate this message, Docker took the following steps: + 1. The Docker client contacted the Docker daemon. + 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. + (Assuming it was not already locally available.) + 3. The Docker daemon created a new container from that image which runs the + executable that produces the output you are currently reading. + 4. The Docker daemon streamed that output to the Docker client, which sent it + to your terminal. + ``` + + To try something more ambitious, you can run an Ubuntu container with: + + ```bash + $ docker run -it ubuntu bash + ``` + + For more examples and ideas, visit the [User Guide](http://docs.docker.com/userguide/). + +5. Use the `docker ps` command to find out which node the container ran on. + + ```bash + $ docker ps -a + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + 54a8690043dd hello-world:latest "/hello" 22 seconds ago Exited (0) 3 seconds ago swarm-agent-00/modest_goodall + 78be991b58d1 swarm:latest "/swarm join --addr 5 minutes ago Up 4 minutes 2375/tcp swarm-agent-01/swarm-agent + da5127e4f0f9 swarm:latest "/swarm join --addr 8 minutes ago Up 8 minutes 2375/tcp swarm-agent-00/swarm-agent + ef395f316c59 swarm:latest "/swarm join --addr 18 minutes ago Up 18 minutes 2375/tcp swarm-master/swarm-agent + 45821ca5208e swarm:latest "/swarm manage --tls 18 minutes ago Up 18 minutes 2375/tcp, 192.168.99.104:3376->3376/tcp swarm-master/swarm-agent-master + ``` ## Where to go next diff --git a/swarm/overview.md b/swarm/overview.md index c59fe9a7532..39e0a352f3c 100644 --- a/swarm/overview.md +++ b/swarm/overview.md @@ -65,7 +65,7 @@ documents. The [Docker Swarm API](swarm-api.md) is compatible with the [Docker remote -API](http://docs.docker.com/reference/api/docker_remote_api/), and extends it +API](/engine/reference/api/docker_remote_api/), and extends it with some new endpoints. ## Getting help diff --git a/swarm/plan-for-production.md b/swarm/plan-for-production.md index b96d9a93aa0..b49a5f02d0d 100644 --- a/swarm/plan-for-production.md +++ b/swarm/plan-for-production.md @@ -1,14 +1,14 @@ ---- -description: Plan for Swarm in production -keywords: -- docker, swarm, scale, voting, application, plan -menu: - main: - parent: workw_swarm - weight: -45 -title: Plan for Swarm in production ---- - +--- +description: Plan for Swarm in production +keywords: +- docker, swarm, scale, voting, application, plan +menu: + main: + parent: workw_swarm + weight: -45 +title: Plan for Swarm in production +--- + # Plan for Swarm in production This article provides guidance to help you plan, deploy, and manage Docker @@ -75,7 +75,7 @@ configure your firewalls and other network access control lists. - **Custom, cross-host container networks**: - **Inbound 7946/tcp** Allows for discovering other container networks. - **Inbound 7946/udp** Allows for discovering other container networks. - - **Inbound /tcp** Network key-value store service port. + - **Inbound ``/tcp** Network key-value store service port. - **4789/udp** For the container overlay network. diff --git a/swarm/provision-with-machine.md b/swarm/provision-with-machine.md index ec9177f5b93..ca773608c0e 100644 --- a/swarm/provision-with-machine.md +++ b/swarm/provision-with-machine.md @@ -32,9 +32,9 @@ continuing](https://docs.docker.com/machine). ## What you need -If you are using Mac OS X or Windows and have installed with Docker Toolbox, you +If you are using macOS or Windows and have installed with Docker Toolbox, you should already have Machine installed. If you need to install, see the -instructions for [Mac OS X](https://docs.docker.com/engine/installation/mac/) or +instructions for [macOS](https://docs.docker.com/engine/installation/mac/) or [Windows](https://docs.docker.com/engine/installation/mac/). Machine supports installing on AWS, Digital Ocean, Google Cloud Platform, IBM @@ -47,7 +47,7 @@ The Toolbox installation gives you VirtualBox and the `boot2docker.iso` image you need. It also gives you the ability provision on all the systems Machine supports. -**Note**:These examples assume you are using Mac OS X or Windows, if you like you can also [install Docker Machine directly on a Linux +**Note**:These examples assume you are using macOS or Windows, if you like you can also [install Docker Machine directly on a Linux system](http://docs.docker.com/machine/install-machine). ## Provision a host to generate a Swarm token diff --git a/swarm/scheduler/rescheduling.md b/swarm/scheduler/rescheduling.md index 2f474977f58..9569ebdbf8d 100644 --- a/swarm/scheduler/rescheduling.md +++ b/swarm/scheduler/rescheduling.md @@ -2,16 +2,12 @@ description: Swarm rescheduling keywords: - docker, swarm, clustering, rescheduling -menu: - main: - parent: swarm_sched - weight: 6 title: Rescheduling --- # Swarm Rescheduling -You can set recheduling policies with Docker Swarm. A rescheduling policy +You can set rescheduling policies with Docker Swarm. A rescheduling policy determines what the Swarm scheduler does for containers when the nodes they are running on fail. @@ -26,7 +22,7 @@ container when a node fails. To set the `on-node-failure` policy with a `reschedule` environment variable: ```bash -$ docker run -d -e reschedule:on-node-failure redis +$ docker run -d -e "reschedule=on-node-failure" redis ``` To set the same policy with a `com.docker.swarm.reschedule-policies` label: @@ -40,14 +36,14 @@ $ docker run -d -l 'com.docker.swarm.reschedule-policies=["on-node-failure"]' re You can use the `docker logs` command to review the rescheduled container actions. To do this, use the following command syntax: -``` +```bash docker logs SWARM_MANAGER_CONTAINER_ID ``` When a container is successfully rescheduled, it generates a message similar to the following: -``` +```no-highlight Rescheduled container 2536adb23 from node-1 to node-2 as 2362901cb213da321 Container 2536adb23 was running, starting container 2362901cb213da321 ``` @@ -55,7 +51,7 @@ Container 2536adb23 was running, starting container 2362901cb213da321 If for some reason, the new container fails to start on the new node, the log contains: -``` +```no-highlight Failed to start rescheduled container 2362901cb213da321 ``` diff --git a/swarm/swarm_at_scale/deploy-app.md b/swarm/swarm_at_scale/deploy-app.md index 70a94e01e35..be22cf83c74 100644 --- a/swarm/swarm_at_scale/deploy-app.md +++ b/swarm/swarm_at_scale/deploy-app.md @@ -36,41 +36,41 @@ on the Swarm `manager` instance. 1. Direct your local environment to the Swarm manager host. - ```bash - $ eval $(docker-machine env manager) - ``` + ```bash + $ eval $(docker-machine env manager) + ``` You can create the network on a cluster node at the network is visible on them all. 2. Create the `voteapp` container network. - ```bash - $ docker network create -d overlay voteapp - ``` + ```bash + $ docker network create -d overlay voteapp + ``` 3. Switch to the db store. - ```bash - $ eval $(docker-machine env dbstore) - ``` + ```bash + $ eval $(docker-machine env dbstore) + ``` 4. Verify you can see the new network from the dbstore node. - ```bash - $ docker network ls - NETWORK ID NAME DRIVER - e952814f610a voteapp overlay - 1f12c5e7bcc4 bridge bridge - 3ca38e887cd8 none null - 3da57c44586b host host - ``` + ```bash + $ docker network ls + NETWORK ID NAME DRIVER + e952814f610a voteapp overlay + 1f12c5e7bcc4 bridge bridge + 3ca38e887cd8 none null + 3da57c44586b host host + ``` 3. Create a container volume called `db-data`. - ```bash - $ docker volume create --name db-data - ``` + ```bash + $ docker volume create --name db-data + ``` ## Task 2. Start the containerized microservices @@ -98,71 +98,71 @@ command below, look for the value constraint. 1. Start a Postgres database container. - ```bash - $ docker -H $(docker-machine ip manager):3376 run -t -d \ - -v db-data:/var/lib/postgresql/data \ - -e constraint:com.function==dbstore \ - --net="voteapp" \ - --name db postgres:9.4 - ``` + ```none + $ docker -H $(docker-machine ip manager):3376 run -t -d \ + -v db-data:/var/lib/postgresql/data \ + -e constraint:com.function==dbstore \ + --net="voteapp" \ + --name db postgres:9.4 + ``` 6. Start the Redis container. - ```bash - $ docker -H $(docker-machine ip manager):3376 run -t -d \ - -p 6379:6379 \ - -e constraint:com.function==dbstore \ - --net="voteapp" \ - --name redis redis - ``` + ```none + $ docker -H $(docker-machine ip manager):3376 run -t -d \ + -p 6379:6379 \ + -e constraint:com.function==dbstore \ + --net="voteapp" \ + --name redis redis + ``` The `redis` name is important so don't change it. 7. Start the worker application - ```bash - $ docker -H $(docker-machine ip manager):3376 run -t -d \ - -e constraint:com.function==worker01 \ - --net="voteapp" \ - --net-alias=workers \ - --name worker01 docker/example-voting-app-worker - ``` + ```none + $ docker -H $(docker-machine ip manager):3376 run -t -d \ + -e constraint:com.function==worker01 \ + --net="voteapp" \ + --net-alias=workers \ + --name worker01 docker/example-voting-app-worker + ``` 6. Start the results application. - ```bash - $ docker -H $(docker-machine ip manager):3376 run -t -d \ - -p 80:80 \ - --label=interlock.hostname=results \ - --label=interlock.domain=myenterprise.com \ - -e constraint:com.function==dbstore \ - --net="voteapp" \ - --name results-app docker/example-voting-app-result-app - ``` + ```none + $ docker -H $(docker-machine ip manager):3376 run -t -d \ + -p 80:80 \ + --label=interlock.hostname=results \ + --label=interlock.domain=myenterprise.example.com \ + -e constraint:com.function==dbstore \ + --net="voteapp" \ + --name results-app docker/example-voting-app-result-app + ``` 7. Start the voting application twice; once on each frontend node. - ```bash - $ docker -H $(docker-machine ip manager):3376 run -t -d \ - -p 80:80 \ - --label=interlock.hostname=vote \ - --label=interlock.domain=myenterprise.com \ - -e constraint:com.function==frontend01 \ - --net="voteapp" \ - --name voting-app01 docker/example-voting-app-voting-app - ``` + ```none + $ docker -H $(docker-machine ip manager):3376 run -t -d \ + -p 80:80 \ + --label=interlock.hostname=vote \ + --label=interlock.domain=myenterprise.example.com \ + -e constraint:com.function==frontend01 \ + --net="voteapp" \ + --name voting-app01 docker/example-voting-app-voting-app + ``` And again on the other frontend node. - ```bash - $ docker -H $(docker-machine ip manager):3376 run -t -d \ - -p 80:80 \ - --label=interlock.hostname=vote \ - --label=interlock.domain=myenterprise.com \ - -e constraint:com.function==frontend02 \ - --net="voteapp" \ - --name voting-app02 docker/example-voting-app-voting-app - ``` + ```none + $ docker -H $(docker-machine ip manager):3376 run -t -d \ + -p 80:80 \ + --label=interlock.hostname=vote \ + --label=interlock.domain=myenterprise.example.com \ + -e constraint:com.function==frontend02 \ + --net="voteapp" \ + --name voting-app02 docker/example-voting-app-voting-app + ``` ## Task 3. Check your work and update /etc/hosts @@ -173,81 +173,81 @@ allow you to take advantage of the loadbalancer. 1. Change to the `loadbalancer` node. - ```bash - $ eval $(docker-machine env loadbalancer) - ``` + ```bash + $ eval $(docker-machine env loadbalancer) + ``` 2. Check your work by reviewing the configuration of nginx. - ```html - $ docker exec interlock cat /etc/conf/nginx.conf - ... output snipped ... + ```html + $ docker exec interlock cat /etc/conf/nginx.conf + ... output snipped ... - upstream results.myenterprise.com { - zone results.myenterprise.com_backend 64k; + upstream results.myenterprise.example.com { + zone results.myenterprise.example.com_backend 64k; - server 192.168.99.111:80; + server 192.168.99.111:80; - } - server { - listen 80; + } + server { + listen 80; - server_name results.myenterprise.com; + server_name results.myenterprise.example.com; - location / { - proxy_pass http://results.myenterprise.com; - } - } - upstream vote.myenterprise.com { - zone vote.myenterprise.com_backend 64k; + location / { + proxy_pass http://results.myenterprise.example.com; + } + } + upstream vote.myenterprise.example.com { + zone vote.myenterprise.example.com_backend 64k; - server 192.168.99.109:80; - server 192.168.99.108:80; + server 192.168.99.109:80; + server 192.168.99.108:80; - } - server { - listen 80; + } + server { + listen 80; - server_name vote.myenterprise.com; + server_name vote.myenterprise.example.com; - location / { - proxy_pass http://vote.myenterprise.com; - } - } + location / { + proxy_pass http://vote.myenterprise.example.com; + } + } - include /etc/conf/conf.d/*.conf; - } - ``` + include /etc/conf/conf.d/*.conf; + } + ``` - The `http://vote.myenterprise.com` site configuration should point to either - frontend node. Requests to `http://results.myenterprise.com` go just to the - single `dbstore` node where the `example-voting-app-result-app` is running. + The `http://vote.myenterprise.example.com` site configuration should point to either + frontend node. Requests to `http://results.myenterprise.example.com` go just to the + single `dbstore` node where the `example-voting-app-result-app` is running. -8. On your local host, edit `/etc/hosts` file add the resolution for both these +3. On your local host, edit `/etc/hosts` file add the resolution for both these sites. -9. Save and close the `/etc/hosts` file. +4. Save and close the `/etc/hosts` file. -10. Restart the `nginx` container. +5. Restart the `nginx` container. Manual restart is required because the current Interlock server is not forcing an Nginx configuration reload. - ```bash - $ docker restart nginx - ``` + ```bash + $ docker restart nginx + ``` ## Task 4. Test the application Now, you can test your application. -1. Open a browser and navigate to the `http://vote.myenterprise.com` site. +1. Open a browser and navigate to the `http://vote.myenterprise.example.com` site. You should see something similar to the following: ![](../images/vote-app-test.png) 2. Click on one of the two voting options. -3. Navigate to the `http://results.myenterprise.com` site to see the results. +3. Navigate to the `http://results.myenterprise.example.com` site to see the results. 4. Try changing your vote. You'll see both sides change as you switch your vote. @@ -282,24 +282,24 @@ the containers at once. This extra credit in the `docker-compose.yml` file. For example, this command: - ```bash - $ docker -H $(docker-machine ip manager):3376 run -t -d \ - -e constraint:com.function==worker01 \ - --net="voteapp" \ - --net-alias=workers \ - --name worker01 docker/example-voting-app-worker - ``` + ```none + $ docker -H $(docker-machine ip manager):3376 run -t -d \ + -e constraint:com.function==worker01 \ + --net="voteapp" \ + --net-alias=workers \ + --name worker01 docker/example-voting-app-worker + ``` Becomes this in a Compose file. - ``` - worker: - image: docker/example-voting-app-worker - networks: - voteapp: - aliases: - - workers - ``` + ```bash + worker: + image: docker/example-voting-app-worker + networks: + voteapp: + aliases: + - workers + ``` In general, Compose starts services in reverse order they appear in the file. So, if you want a service to start before all the others, make it the last @@ -313,108 +313,108 @@ result file 5. Set `DOCKER_HOST` to the Swarm manager. - ```bash - $ DOCKER_HOST=$(docker-machine ip manager):3376 - ``` + ```bash + $ DOCKER_HOST=$(docker-machine ip manager):3376 + ``` 6. In the same directory as your `docker-compose.yml` file, start the services. - ```bash - $ docker-compose up -d - Creating network "scale_voteapp" with the default driver - Creating volume "scale_db-data" with default driver - Pulling db (postgres:9.4)... - worker01: Pulling postgres:9.4... : downloaded - dbstore: Pulling postgres:9.4... : downloaded - frontend01: Pulling postgres:9.4... : downloaded - frontend02: Pulling postgres:9.4... : downloaded - Creating db - Pulling redis (redis:latest)... - dbstore: Pulling redis:latest... : downloaded - frontend01: Pulling redis:latest... : downloaded - frontend02: Pulling redis:latest... : downloaded - worker01: Pulling redis:latest... : downloaded - Creating redis - Pulling worker (docker/example-voting-app-worker:latest)... - dbstore: Pulling docker/example-voting-app-worker:latest... : downloaded - frontend01: Pulling docker/example-voting-app-worker:latest... : downloaded - frontend02: Pulling docker/example-voting-app-worker:latest... : downloaded - worker01: Pulling docker/example-voting-app-worker:latest... : downloaded - Creating scale_worker_1 - Pulling voting-app (docker/example-voting-app-voting-app:latest)... - dbstore: Pulling docker/example-voting-app-voting-app:latest... : downloaded - frontend01: Pulling docker/example-voting-app-voting-app:latest... : downloaded - frontend02: Pulling docker/example-voting-app-voting-app:latest... : downloaded - worker01: Pulling docker/example-voting-app-voting-app:latest... : downloaded - Creating scale_voting-app_1 - Pulling result-app (docker/example-voting-app-result-app:latest)... - dbstore: Pulling docker/example-voting-app-result-app:latest... : downloaded - frontend01: Pulling docker/example-voting-app-result-app:latest... : downloaded - frontend02: Pulling docker/example-voting-app-result-app:latest... : downloaded - worker01: Pulling docker/example-voting-app-result-app:latest... : downloaded - Creating scale_result-app_1 - ``` - -9. Use the `docker ps` command to see the containers on the Swarm cluster. - - ```bash - $ docker -H $(docker-machine ip manager):3376 ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - b71555033caa docker/example-voting-app-result-app "node server.js" 6 seconds ago Up 4 seconds 192.168.99.104:32774->80/tcp frontend01/scale_result-app_1 - cf29ea21475d docker/example-voting-app-worker "/usr/lib/jvm/java-7-" 6 seconds ago Up 4 seconds worker01/scale_worker_1 - 98414cd40ab9 redis "/entrypoint.sh redis" 7 seconds ago Up 5 seconds 192.168.99.105:32774->6379/tcp frontend02/redis - 1f214acb77ae postgres:9.4 "/docker-entrypoint.s" 7 seconds ago Up 5 seconds 5432/tcp frontend01/db - 1a4b8f7ce4a9 docker/example-voting-app-voting-app "python app.py" 7 seconds ago Up 5 seconds 192.168.99.107:32772->80/tcp dbstore/scale_voting-app_1 - ``` + ```bash + $ docker-compose up -d + Creating network "scale_voteapp" with the default driver + Creating volume "scale_db-data" with default driver + Pulling db (postgres:9.4)... + worker01: Pulling postgres:9.4... : downloaded + dbstore: Pulling postgres:9.4... : downloaded + frontend01: Pulling postgres:9.4... : downloaded + frontend02: Pulling postgres:9.4... : downloaded + Creating db + Pulling redis (redis:latest)... + dbstore: Pulling redis:latest... : downloaded + frontend01: Pulling redis:latest... : downloaded + frontend02: Pulling redis:latest... : downloaded + worker01: Pulling redis:latest... : downloaded + Creating redis + Pulling worker (docker/example-voting-app-worker:latest)... + dbstore: Pulling docker/example-voting-app-worker:latest... : downloaded + frontend01: Pulling docker/example-voting-app-worker:latest... : downloaded + frontend02: Pulling docker/example-voting-app-worker:latest... : downloaded + worker01: Pulling docker/example-voting-app-worker:latest... : downloaded + Creating scale_worker_1 + Pulling voting-app (docker/example-voting-app-voting-app:latest)... + dbstore: Pulling docker/example-voting-app-voting-app:latest... : downloaded + frontend01: Pulling docker/example-voting-app-voting-app:latest... : downloaded + frontend02: Pulling docker/example-voting-app-voting-app:latest... : downloaded + worker01: Pulling docker/example-voting-app-voting-app:latest... : downloaded + Creating scale_voting-app_1 + Pulling result-app (docker/example-voting-app-result-app:latest)... + dbstore: Pulling docker/example-voting-app-result-app:latest... : downloaded + frontend01: Pulling docker/example-voting-app-result-app:latest... : downloaded + frontend02: Pulling docker/example-voting-app-result-app:latest... : downloaded + worker01: Pulling docker/example-voting-app-result-app:latest... : downloaded + Creating scale_result-app_1 + ``` + +7. Use the `docker ps` command to see the containers on the Swarm cluster. + + ```bash + $ docker -H $(docker-machine ip manager):3376 ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + b71555033caa docker/example-voting-app-result-app "node server.js" 6 seconds ago Up 4 seconds 192.168.99.104:32774->80/tcp frontend01/scale_result-app_1 + cf29ea21475d docker/example-voting-app-worker "/usr/lib/jvm/java-7-" 6 seconds ago Up 4 seconds worker01/scale_worker_1 + 98414cd40ab9 redis "/entrypoint.sh redis" 7 seconds ago Up 5 seconds 192.168.99.105:32774->6379/tcp frontend02/redis + 1f214acb77ae postgres:9.4 "/docker-entrypoint.s" 7 seconds ago Up 5 seconds 5432/tcp frontend01/db + 1a4b8f7ce4a9 docker/example-voting-app-voting-app "python app.py" 7 seconds ago Up 5 seconds 192.168.99.107:32772->80/tcp dbstore/scale_voting-app_1 + ``` When you started the services manually, you had a `voting-app` instances running on two frontend servers. How many do you have now? -10. Scale your application up by adding some `voting-app` instances. +8. Scale your application up by adding some `voting-app` instances. - ```bash - $ docker-compose scale voting-app=3 - Creating and starting 2 ... done - Creating and starting 3 ... done - ``` + ```bash + $ docker-compose scale voting-app=3 + Creating and starting 2 ... done + Creating and starting 3 ... done + ``` After you scale up, list the containers on the cluster again. -7. Change to the `loadbalancer` node. +9. Change to the `loadbalancer` node. + + ```bash + $ eval $(docker-machine env loadbalancer) + ``` + +10. Restart the Nginx server. ```bash - $ eval $(docker-machine env loadbalancer) + $ docker restart nginx ``` +11. Check your work again by visiting the `http://vote.myenterprise.example.com` and +`http://results.myenterprise.example.com` again. -7. Restart the Nginx server. +12. You can view the logs on an individual container. ```bash - $ docker restart nginx + $ docker logs scale_voting-app_1 + * Running on http://0.0.0.0:80/ (Press CTRL+C to quit) + * Restarting with stat + * Debugger is active! + * Debugger pin code: 285-809-660 + 192.168.99.103 - - [11/Apr/2016 17:15:44] "GET / HTTP/1.0" 200 - + 192.168.99.103 - - [11/Apr/2016 17:15:44] "GET /static/stylesheets/style.css HTTP/1.0" 304 - + 192.168.99.103 - - [11/Apr/2016 17:15:45] "GET /favicon.ico HTTP/1.0" 404 - + 192.168.99.103 - - [11/Apr/2016 17:22:24] "POST / HTTP/1.0" 200 - + 192.168.99.103 - - [11/Apr/2016 17:23:37] "POST / HTTP/1.0" 200 - + 192.168.99.103 - - [11/Apr/2016 17:23:39] "POST / HTTP/1.0" 200 - + 192.168.99.103 - - [11/Apr/2016 17:23:40] "POST / HTTP/1.0" 200 - + 192.168.99.103 - - [11/Apr/2016 17:23:41] "POST / HTTP/1.0" 200 - + 192.168.99.103 - - [11/Apr/2016 17:23:43] "POST / HTTP/1.0" 200 - + 192.168.99.103 - - [11/Apr/2016 17:23:44] "POST / HTTP/1.0" 200 - + 192.168.99.103 - - [11/Apr/2016 17:23:46] "POST / HTTP/1.0" 200 - ``` -8. Check your work again by visiting the `http://vote.myenterprise.com` and -`http://results.myenterprise.com` again. - -9. You can view the logs on an individual container. - - ```bash - $ docker logs scale_voting-app_1 - * Running on http://0.0.0.0:80/ (Press CTRL+C to quit) - * Restarting with stat - * Debugger is active! - * Debugger pin code: 285-809-660 - 192.168.99.103 - - [11/Apr/2016 17:15:44] "GET / HTTP/1.0" 200 - - 192.168.99.103 - - [11/Apr/2016 17:15:44] "GET /static/stylesheets/style.css HTTP/1.0" 304 - - 192.168.99.103 - - [11/Apr/2016 17:15:45] "GET /favicon.ico HTTP/1.0" 404 - - 192.168.99.103 - - [11/Apr/2016 17:22:24] "POST / HTTP/1.0" 200 - - 192.168.99.103 - - [11/Apr/2016 17:23:37] "POST / HTTP/1.0" 200 - - 192.168.99.103 - - [11/Apr/2016 17:23:39] "POST / HTTP/1.0" 200 - - 192.168.99.103 - - [11/Apr/2016 17:23:40] "POST / HTTP/1.0" 200 - - 192.168.99.103 - - [11/Apr/2016 17:23:41] "POST / HTTP/1.0" 200 - - 192.168.99.103 - - [11/Apr/2016 17:23:43] "POST / HTTP/1.0" 200 - - 192.168.99.103 - - [11/Apr/2016 17:23:44] "POST / HTTP/1.0" 200 - - 192.168.99.103 - - [11/Apr/2016 17:23:46] "POST / HTTP/1.0" 200 - - ``` This log shows the activity on one of the active voting application containers. diff --git a/swarm/swarm_at_scale/deploy-infra.md b/swarm/swarm_at_scale/deploy-infra.md index cc40054f5e6..946586371fc 100644 --- a/swarm/swarm_at_scale/deploy-infra.md +++ b/swarm/swarm_at_scale/deploy-infra.md @@ -46,7 +46,7 @@ actual value. ## Task 1. Create the keystore server To enable a Docker container network and Swarm discovery, you must -supply deploy a key-value store. As a discovery backend, the keystore +deploy (or supply) a key-value store. As a discovery backend, the key-value store maintains an up-to-date list of cluster members and shares that list with the Swarm manager. The Swarm manager uses this list to assign tasks to the nodes. @@ -54,47 +54,48 @@ An overlay network requires a key-value store. The key-value store holds information about the network state which includes discovery, networks, endpoints, IP addresses, and more. -Several different backends are supported. This example uses Consul container. 1. Create a "machine" named `keystore`. - ```bash - $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ - --engine-opt="label=com.function=consul" keystore - ``` + ```bash + $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ + --engine-opt="label=com.function=consul" keystore + ``` - You can set options for the Engine daemon with the `--engine-opt` flag. You'll - use it to label this Engine instance. + You can set options for the Engine daemon with the `--engine-opt` flag. You'll + use it to label this Engine instance. 2. Set your local shell to the `keystore` Docker host. - ```bash - $ eval $(docker-machine env keystore) - ``` + ```bash + $ eval $(docker-machine env keystore) + ``` + 3. Run the `consul` container. - ```bash - $ docker run --restart=unless-stopped -d -p 8500:8500 -h consul progrium/consul -server -bootstrap - ``` + ```bash + $ docker run --restart=unless-stopped -d -p 8500:8500 -h consul progrium/consul -server -bootstrap + ``` - The `-p` flag publishes port 8500 on the container which is where the Consul - server listens. The server also has several other ports exposed which you can - see by running `docker ps`. + The `-p` flag publishes port 8500 on the container which is where the Consul + server listens. The server also has several other ports exposed which you can + see by running `docker ps`. - ```bash - $ docker ps - CONTAINER ID IMAGE ... PORTS NAMES - 372ffcbc96ed progrium/consul ... 53/tcp, 53/udp, 8300-8302/tcp, 8400/tcp, 8301-8302/udp, 0.0.0.0:8500->8500/tcp dreamy_ptolemy - ``` + ```bash + $ docker ps + CONTAINER ID IMAGE ... PORTS NAMES + 372ffcbc96ed progrium/consul ... 53/tcp, 53/udp, 8300-8302/tcp, 8400/tcp, 8301-8302/udp, 0.0.0.0:8500->8500/tcp dreamy_ptolemy + ``` 4. Use a `curl` command test the server by listing the nodes. - ```bash - $ curl $(docker-machine ip keystore):8500/v1/catalog/nodes - [{"Node":"consul","Address":"172.17.0.2"}] - ``` + ```bash + $ curl $(docker-machine ip keystore):8500/v1/catalog/nodes + [{"Node":"consul","Address":"172.17.0.2"}] + ``` ## Task 2. Create the Swarm manager @@ -112,62 +113,62 @@ support the container network you'll create later. 1. Create the `manager` host. - ```bash - $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ - --engine-opt="label=com.function=manager" \ - --engine-opt="cluster-store=consul://$(docker-machine ip keystore):8500" \ - --engine-opt="cluster-advertise=eth1:2376" manager - ``` + ```bash + $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ + --engine-opt="label=com.function=manager" \ + --engine-opt="cluster-store=consul://$(docker-machine ip keystore):8500" \ + --engine-opt="cluster-advertise=eth1:2376" manager + ``` - You also give the daemon a `manager` label. + You also give the daemon a `manager` label. 2. Set your local shell to the `manager` Docker host. - ```bash - $ eval $(docker-machine env manager) - ``` + ```bash + $ eval $(docker-machine env manager) + ``` 3. Start the Swarm manager process. - ```bash - $ docker run --restart=unless-stopped -d -p 3376:2375 \ - -v /var/lib/boot2docker:/certs:ro \ - swarm manage --tlsverify \ - --tlscacert=/certs/ca.pem \ - --tlscert=/certs/server.pem \ - --tlskey=/certs/server-key.pem \ - consul://$(docker-machine ip keystore):8500 - ``` + ```bash + $ docker run --restart=unless-stopped -d -p 3376:2375 \ + -v /var/lib/boot2docker:/certs:ro \ + swarm manage --tlsverify \ + --tlscacert=/certs/ca.pem \ + --tlscert=/certs/server.pem \ + --tlskey=/certs/server-key.pem \ + consul://$(docker-machine ip keystore):8500 + ``` - This command uses the TLS certificates created for the `boot2docker.iso` or - the manager. This is key for the manager when it connects to other machines - in the cluster. + This command uses the TLS certificates created for the `boot2docker.iso` or + the manager. This is key for the manager when it connects to other machines + in the cluster. 4. Test your work by using displaying the Docker daemon logs from the host. - ```bash - $ docker-machine ssh manager - <-- output snipped --> - docker@manager:~$ tail /var/lib/boot2docker/docker.log - time="2016-04-06T23:11:56.481947896Z" level=debug msg="Calling GET /v1.15/version" - time="2016-04-06T23:11:56.481984742Z" level=debug msg="GET /v1.15/version" - time="2016-04-06T23:12:13.070231761Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul - time="2016-04-06T23:12:33.069387215Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul - time="2016-04-06T23:12:53.069471308Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul - time="2016-04-06T23:13:13.069512320Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul - time="2016-04-06T23:13:33.070021418Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul - time="2016-04-06T23:13:53.069395005Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul - time="2016-04-06T23:14:13.071417551Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul - time="2016-04-06T23:14:33.069843647Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul - ``` - - The output indicates that the `consul` and the `manager` are communicating correctly. + ```bash + $ docker-machine ssh manager + <-- output snipped --> + docker@manager:~$ tail /var/lib/boot2docker/docker.log + time="2016-04-06T23:11:56.481947896Z" level=debug msg="Calling GET /v1.15/version" + time="2016-04-06T23:11:56.481984742Z" level=debug msg="GET /v1.15/version" + time="2016-04-06T23:12:13.070231761Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul + time="2016-04-06T23:12:33.069387215Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul + time="2016-04-06T23:12:53.069471308Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul + time="2016-04-06T23:13:13.069512320Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul + time="2016-04-06T23:13:33.070021418Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul + time="2016-04-06T23:13:53.069395005Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul + time="2016-04-06T23:14:13.071417551Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul + time="2016-04-06T23:14:33.069843647Z" level=debug msg="Watch triggered with 1 nodes" discovery=consul + ``` + + The output indicates that the `consul` and the `manager` are communicating correctly. 5. Exit the Docker host. - ```bash - docker@manager:~$ exit - ``` + ```bash + docker@manager:~$ exit + ``` ## Task 3. Add the load balancer @@ -177,58 +178,60 @@ loadbalancer. Before you build the load balancer host, you'll create the configuration you'll use for Nginx. 1. On your local host, create a `config` directory. + 2. Change directories to the `config` directory. - ```bash - $ cd config - ``` + ```bash + $ cd config + ``` + 3. Get the IP address of the Swarm manager host. - For example: + For example: - ```bash - $ docker-machine ip manager - 192.168.99.101 - ``` + ```bash + $ docker-machine ip manager + 192.168.99.101 + ``` 5. Use your favorite editor to create a `config.toml` file and add this content to the file: - ```json - ListenAddr = ":8080" - DockerURL = "tcp://SWARM_MANAGER_IP:3376" - TLSCACert = "/var/lib/boot2docker/ca.pem" - TLSCert = "/var/lib/boot2docker/server.pem" - TLSKey = "/var/lib/boot2docker/server-key.pem" - - [[Extensions]] - Name = "nginx" - ConfigPath = "/etc/conf/nginx.conf" - PidPath = "/etc/conf/nginx.pid" - MaxConn = 1024 - Port = 80 - ``` + ```json + ListenAddr = ":8080" + DockerURL = "tcp://SWARM_MANAGER_IP:3376" + TLSCACert = "/var/lib/boot2docker/ca.pem" + TLSCert = "/var/lib/boot2docker/server.pem" + TLSKey = "/var/lib/boot2docker/server-key.pem" + + [[Extensions]] + Name = "nginx" + ConfigPath = "/etc/conf/nginx.conf" + PidPath = "/etc/conf/nginx.pid" + MaxConn = 1024 + Port = 80 + ``` 6. In the configuration, replace the `SWARM_MANAGER_IP` with the `manager` IP you got in Step 4. - You use this value because the load balancer listens on the manager's event - stream. + You use this value because the load balancer listens on the manager's event + stream. 7. Save and close the `config.toml` file. 8. Create a machine for the load balancer. - ```bash - $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ - --engine-opt="label=com.function=interlock" loadbalancer - ``` + ```bash + $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ + --engine-opt="label=com.function=interlock" loadbalancer + ``` 9. Switch the environment to the `loadbalancer`. - ```bash - $ eval $(docker-machine env loadbalancer) - ``` + ```bash + $ eval $(docker-machine env loadbalancer) + ``` 10. Start an `interlock` container running. @@ -312,38 +315,38 @@ commands below, notice the label you are applying to each node. 1. Create the `frontend01` host and add it to the Swarm cluster. - ```bash - $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ - --engine-opt="label=com.function=frontend01" \ - --engine-opt="cluster-store=consul://$(docker-machine ip keystore):8500" \ - --engine-opt="cluster-advertise=eth1:2376" frontend01 - $ eval $(docker-machine env frontend01) - $ docker run -d swarm join --addr=$(docker-machine ip frontend01):2376 consul://$(docker-machine ip keystore):8500 - ``` + ```bash + $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ + --engine-opt="label=com.function=frontend01" \ + --engine-opt="cluster-store=consul://$(docker-machine ip keystore):8500" \ + --engine-opt="cluster-advertise=eth1:2376" frontend01 + $ eval $(docker-machine env frontend01) + $ docker run -d swarm join --addr=$(docker-machine ip frontend01):2376 consul://$(docker-machine ip keystore):8500 + ``` 2. Create the `frontend02` VM. - ```bash - $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ - --engine-opt="label=com.function=frontend02" \ - --engine-opt="cluster-store=consul://$(docker-machine ip keystore):8500" \ - --engine-opt="cluster-advertise=eth1:2376" frontend02 - $ eval $(docker-machine env frontend02) - $ docker run -d swarm join --addr=$(docker-machine ip frontend02):2376 consul://$(docker-machine ip keystore):8500 - ``` + ```bash + $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ + --engine-opt="label=com.function=frontend02" \ + --engine-opt="cluster-store=consul://$(docker-machine ip keystore):8500" \ + --engine-opt="cluster-advertise=eth1:2376" frontend02 + $ eval $(docker-machine env frontend02) + $ docker run -d swarm join --addr=$(docker-machine ip frontend02):2376 consul://$(docker-machine ip keystore):8500 + ``` 3. Create the `worker01` VM. - ```bash - $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ - --engine-opt="label=com.function=worker01" \ - --engine-opt="cluster-store=consul://$(docker-machine ip keystore):8500" \ - --engine-opt="cluster-advertise=eth1:2376" worker01 - $ eval $(docker-machine env worker01) - $ docker run -d swarm join --addr=$(docker-machine ip worker01):2376 consul://$(docker-machine ip keystore):8500 - ``` + ```bash + $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ + --engine-opt="label=com.function=worker01" \ + --engine-opt="cluster-store=consul://$(docker-machine ip keystore):8500" \ + --engine-opt="cluster-advertise=eth1:2376" worker01 + $ eval $(docker-machine env worker01) + $ docker run -d swarm join --addr=$(docker-machine ip worker01):2376 consul://$(docker-machine ip keystore):8500 + ``` -4. Create the `dbstore` VM. +4. Create the `dbstore` VM. ```bash $ docker-machine create -d virtualbox --virtualbox-memory "2000" \ @@ -354,7 +357,7 @@ commands below, notice the label you are applying to each node. $ docker run -d swarm join --addr=$(docker-machine ip dbstore):2376 consul://$(docker-machine ip keystore):8500 ``` -5. Check your work. +5. Check your work. At this point, you have deployed on the infrastructure you need to run the application. Test this now by listing the running machines: @@ -371,7 +374,7 @@ commands below, notice the label you are applying to each node. worker01 * virtualbox Running tcp://192.168.99.110:2376 v1.10.3 ``` -6. Make sure the Swarm manager sees all your nodes. +6. Make sure the Swarm manager sees all your nodes. ``` $ docker -H $(docker-machine ip manager):3376 info @@ -433,5 +436,5 @@ commands below, notice the label you are applying to each node. ## Next Step -Your key-store, load balancer, and Swarm cluster infrastructure is up. You are +Your key-value store, load balancer, and Swarm cluster infrastructure are up. You are ready to [build and run the voting application](deploy-app.md) on it. diff --git a/swarm/swarm_at_scale/docker-compose.yml b/swarm/swarm_at_scale/docker-compose.yml index 65abb41642f..061d00c4434 100644 --- a/swarm/swarm_at_scale/docker-compose.yml +++ b/swarm/swarm_at_scale/docker-compose.yml @@ -9,7 +9,7 @@ services: - voteapp labels: interlock.hostname: "vote" - interlock.domain: "myenterprise.com" + interlock.domain: "myenterprise.example.com" result-app: image: docker/example-voting-app-result-app ports: @@ -18,7 +18,7 @@ services: - voteapp labels: interlock.hostname: "results" - interlock.domain: "myenterprise.com" + interlock.domain: "myenterprise.example.com" worker: image: docker/example-voting-app-worker networks: diff --git a/toolbox/overview.md b/toolbox/overview.md index 4fa444b7f1c..26f69db1dd5 100644 --- a/toolbox/overview.md +++ b/toolbox/overview.md @@ -37,7 +37,7 @@ Go to the [ \"es\" ] } } filter { json { source => \"message\" } }'" + +docker run -d \ + --name kibana \ + --link elasticsearch:elasticsearch \ + -p 5601:5601 \ + kibana +``` + +Once you have these containers running, configure UCP to send logs to +the IP of the Logstash container. You can then browse to port 5601 on the system +running Kibana and browse log/event entries. You should specify the "time" +field for indexing. + +When deployed in a production environment, you should secure your ELK +stack. UCP does not do this itself, but there are a number of 3rd party +options that can accomplish this (e.g. Shield plug-in for Kibana) ## Where to go next diff --git a/ucp/configuration/dtr-integration.md b/ucp/configuration/dtr-integration.md index d150a3665e7..5578116e9c3 100644 --- a/ucp/configuration/dtr-integration.md +++ b/ucp/configuration/dtr-integration.md @@ -53,7 +53,7 @@ certificate used internally by UCP: This command prints a certificate block like this: - ```markdown + ```none -----BEGIN CERTIFICATE----- MIIFJDCCAwygAwIBAgIIDAApo7wvQCIwDQYJKoZIhvcNAQENBQAwHjEcMBoGA1UE AxMTVUNQIENsdXN0ZXIgUm9vdCBDQTAeFw0xNjA2MDEyMTMzMDBaFw0yMTA1MzEy @@ -183,7 +183,7 @@ commands in the UCP cluster. When successfully pushing the image you should see a result like: - ```markdown + ```none The push refers to a repository [dtr/username/hello-world] 5f70bf18a086: Pushed 33e7801ac047: Pushed @@ -201,7 +201,7 @@ images from a UCP node to a private DTR repository. When UCP can't communicate with DTR, you'll get: -```markdown +```none $ docker push dtr/username/hello-world:1 The push refers to a repository [dtr/username/hello-world] @@ -216,7 +216,7 @@ with DTR. When one of the components is misconfigured, and doesn't trust the root CA certificate of the other components, you'll get an error like: -```markdown +```none $ docker push dtr/username/hello-world:1 The push refers to a repository [dtr/username/hello-world] diff --git a/ucp/configuration/use-externally-signed-certs.md b/ucp/configuration/use-externally-signed-certs.md index 8ffb184bd78..ff1d7724a5c 100644 --- a/ucp/configuration/use-externally-signed-certs.md +++ b/ucp/configuration/use-externally-signed-certs.md @@ -38,7 +38,7 @@ To replace the server certificates used by UCP, for each controller node: 1. Login into the node with ssh. 2. In the directory where you have the keys and certificates to use, run: - ```bash + ```none # Create a container that attaches to the same volume where certificates are stored $ docker create --name replace-certs -v ucp-controller-server-certs:/data busybox diff --git a/ucp/high-availability/backups-and-disaster-recovery.md b/ucp/high-availability/backups-and-disaster-recovery.md index 2ca58d9a5cd..cfcc1bda60f 100644 --- a/ucp/high-availability/backups-and-disaster-recovery.md +++ b/ucp/high-availability/backups-and-disaster-recovery.md @@ -45,7 +45,7 @@ across multiple UCP controller nodes. The example below shows how to create a backup of a UCP controller node: -```bash +```none # Create a backup, encrypt it, and store it on /tmp/backup.tar $ docker run --rm -i --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ diff --git a/ucp/high-availability/replicate-cas.md b/ucp/high-availability/replicate-cas.md index 5bc637ee2d7..df11a38990c 100644 --- a/ucp/high-availability/replicate-cas.md +++ b/ucp/high-availability/replicate-cas.md @@ -60,7 +60,7 @@ containers, so you should use it outside business peak hours. Log into the node using ssh, and run: -```bash +```none $ docker run --rm -i --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ docker/ucp backup --root-ca-only --interactive \ @@ -82,7 +82,7 @@ use the same certificate and private key. Log into the node using ssh, and run: -```bash +```none $ docker run --rm -i --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ docker/ucp restore --root-ca-only --interactive \ diff --git a/ucp/high-availability/set-up-high-availability.md b/ucp/high-availability/set-up-high-availability.md index 612760b63cf..11ca4ef5af3 100644 --- a/ucp/high-availability/set-up-high-availability.md +++ b/ucp/high-availability/set-up-high-availability.md @@ -55,7 +55,7 @@ replicas. When configuring UCP for high-availability, you need to ensure the CAs running on each UCP controller node are interchangeable. This is done by transferring -root certificates and keys for the CAs to each controller node on the cluster. +root certificates and keys for the CAs to each controller node on the cluster. [Learn how to replicate CAs for high availability](replicate-cas.md) ## Load-balancing on UCP @@ -67,7 +67,7 @@ load-balancer to balance user requests across all controller replicas. Since Docker UCP uses mutual TLS, make sure you configure your load balancer to: * Load-balance TCP traffic on ports 80 and 443, -* Not terminate HTTPS connections, +* Use a TCP load balancer that doesn't terminate HTTPS connections, * Use the `/_ping` endpoint on each UCP controller, to check if the controller is healthy and if it should remain on the load balancing pool or not. diff --git a/ucp/images/container_edit.png b/ucp/images/container_edit.png deleted file mode 100644 index d168849ef35..00000000000 Binary files a/ucp/images/container_edit.png and /dev/null differ diff --git a/ucp/images/controller-containers.png b/ucp/images/controller-containers.png deleted file mode 100644 index 16ed7cc8376..00000000000 Binary files a/ucp/images/controller-containers.png and /dev/null differ diff --git a/ucp/images/dashboard.png b/ucp/images/dashboard.png deleted file mode 100644 index d4f790362f8..00000000000 Binary files a/ucp/images/dashboard.png and /dev/null differ diff --git a/ucp/images/display_container.png b/ucp/images/display_container.png deleted file mode 100644 index a41b9624e35..00000000000 Binary files a/ucp/images/display_container.png and /dev/null differ diff --git a/ucp/images/get-license.png b/ucp/images/get-license.png deleted file mode 100644 index 90bae15a44d..00000000000 Binary files a/ucp/images/get-license.png and /dev/null differ diff --git a/ucp/images/install-sandbox-1.png b/ucp/images/install-sandbox-1.png new file mode 100644 index 00000000000..9b3962a4a6e Binary files /dev/null and b/ucp/images/install-sandbox-1.png differ diff --git a/ucp/images/install-sandbox-2-1.png b/ucp/images/install-sandbox-2-1.png new file mode 100644 index 00000000000..7f4e27d5f84 Binary files /dev/null and b/ucp/images/install-sandbox-2-1.png differ diff --git a/ucp/images/install-sandbox-2-2.png b/ucp/images/install-sandbox-2-2.png new file mode 100644 index 00000000000..52d2be1d90f Binary files /dev/null and b/ucp/images/install-sandbox-2-2.png differ diff --git a/ucp/images/install-sandbox-2-3.png b/ucp/images/install-sandbox-2-3.png new file mode 100644 index 00000000000..ad24c32086a Binary files /dev/null and b/ucp/images/install-sandbox-2-3.png differ diff --git a/ucp/images/install-sandbox-2.png b/ucp/images/install-sandbox-2.png new file mode 100644 index 00000000000..3da450cb807 Binary files /dev/null and b/ucp/images/install-sandbox-2.png differ diff --git a/ucp/images/install-sandbox-3.png b/ucp/images/install-sandbox-3.png new file mode 100644 index 00000000000..9e82ff67a76 Binary files /dev/null and b/ucp/images/install-sandbox-3.png differ diff --git a/ucp/images/license.png b/ucp/images/license.png deleted file mode 100644 index 03242137170..00000000000 Binary files a/ucp/images/license.png and /dev/null differ diff --git a/ucp/images/port_config.png b/ucp/images/port_config.png deleted file mode 100644 index 5b6e3fea42f..00000000000 Binary files a/ucp/images/port_config.png and /dev/null differ diff --git a/ucp/images/skip-this.png b/ucp/images/skip-this.png deleted file mode 100644 index f917ebcc926..00000000000 Binary files a/ucp/images/skip-this.png and /dev/null differ diff --git a/ucp/images/welcome_nginx.png b/ucp/images/welcome_nginx.png deleted file mode 100644 index 37a57ec484b..00000000000 Binary files a/ucp/images/welcome_nginx.png and /dev/null differ diff --git a/ucp/install-sandbox-2.md b/ucp/install-sandbox-2.md new file mode 100644 index 00000000000..c3c1638d91c --- /dev/null +++ b/ucp/install-sandbox-2.md @@ -0,0 +1,279 @@ +--- +title: Evaluate DDC in a sandbox deployment +description: Learn how to push Docker images to your private registry and deploy them to your cluster. +keywords: "Docker Datacenter, registry, orchestration" +--- + +# Evaluate DDC in a sandbox deployment + +This tutorial assumes that you have +[installed and configured](install-sandbox.md) a two-node Docker Datacenter +installation including both UCP and DTR using the instructions +[here](install-sandbox.md). If you haven't done this, we can't promise that this +tutorial workflow will work exactly the same. + +In the second half of this tutorial, we'll walk you through a typical deployment +workflow using your sandbox installation of DDC as if it was a production +instance installed on your organization's network. + + +Over the course of this tutorial, we will: + +- Create a repository in DTR +- Set up certificates or set insecure flag +- Pull a Docker image, tag it and push it to your DTR repo. +- Use UCP to deploy the image to a node + + +## Step 1: Set --insecure registry or set up DTR trust and log in + +First, we'll set up a security exception that allows a the Docker-machine hosts +used in your UCP cluster to push images to and pull images from DTR even though +the DTR instance has a self-signed certificate. For a production deployment, +you would +[set up certificate trust](https://docs.docker.com/ucp/configuration/dtr-integration/) +between UCP and DTR, and +[between DTR and your Docker Engine](https://docs.docker.com/docker-trusted-registry/repos-and-images/), +but for our sandbox deployment we can skip this. + +> **Warning**: These steps produce an insecure DTR connection. Do not use these +configuration steps for a production deployment. + +To allow the Docker Engine to connect to DTR despite it having a self-signed +certificate, we'll specify that there is one insecure registry that we'll allow +the Engine instance to connect to. We'll add this exception by editing the +configuration file where docker-machine stores the host's configuration details. + +1. Edit the file found at `~/.docker/machine/machines/node1/config.json` using +your preferred text editor. + + For example + + ```none + $ vi ~/.docker/machine/machines/node1/config.json + ``` + +2. Locate `InsecureRegistry` key in `EngineOptions` section, and add your DTR +instance's IP between the brackets, enclosed in quotes. + + For example + + ``` + "InsecureRegistry": ["192.168.99.101"], + ``` + +3. Save your changes to the file and exit. + +4. Run the command `docker-machine provision node1` to update `node1`'s +configuration with the new `InsecureRegistry` setting. + +5. Repeat this process for `node2`. + + Because UCP runs a `docker pull` from DTR for each node in its cluster, + you must make this security exception for all nodes in the cluster. + +This allows you to push docker images to, and pull docker images from, the +registry. + + +## Step 2: Create an image repository in DTR + +In this step, we'll create an image repository in DTR that you will be able to +push Docker images to. Remember a Docker image is a combination of code and +filesystem used as a template to create a container. + +1. In your web browser, go to the DTR web UI. + + If you need help finding the URL for this host, you can use + `docker-machine ls` to find the IP for `node2` where you installed DTR. + +2. Log in to DTR using your administrator credentials. + +3. Navigate to the **Repositories** screen and click **New Repository**. + +4. In the repository name field, enter `my-nginx`. + +5. Click **Save**. + +## Step 3: Pull an image, tag and push to DTR + +1. In your terminal, make sure `node1` is active using `docker-machine ls`. + + This is the node that you configured the security exception for, and if you + are connecting to a Docker Engine without this exception you won't be able + to push to your DTR instance. + + If necessary, use `docker-machine env` to make `node1` active. + + ```none + $ eval "$(docker-machine env node1)" + ``` + +2. Pull the latest Nginx image + + ``` + $ docker pull nginx:latest + ``` + + Because you aren't specifying a registry as part of the `pull` command, + Docker Engine locates and downloads the latest `nginx` image from Docker + Cloud's registry. + +3. Log in to your DTR instance on `node2` using the `docker login` command and +the DTR instance's IP address. + + ```none + docker login $(docker-machine ip node2) + ``` + + Enter your administrator username and password when prompted. + +4. Tag the `nginx` image you downloaded. + + Use the IP of your DTR instance to specify the repository path, and the tag. + + ```none + $ docker tag nginx:latest $(docker-machine ip node2)/admin/my-nginx:official + ``` + +5. Push the tagged image to your DTR instance. + + ```none + $ docker push $(docker-machine ip node2)/admin/my-nginx:official + ``` + +You now have a copy of the official Nginx Docker image available on your +sandbox DTR instance. + +## Step 4: Pull your image from DTR into UCP + +UCP does not automatically pull images from DTR. To make an image from DTR +appear in UCP, you'll use the UCP web UI to perform a `docker pull`. This `pull` +command pulls the image and makes it available on all nodes in the UCP cluster. + +1. From the UCP dashboard, click **Images** in the left navigation. + +2. Click **Pull Image**. + +3. Enter the full path to the image that you just pushed to your DTR instance. + + For the example path in this demo use `/admin/my-nginx:official` + +4. Click **Pull**. + + UCP contacts the DTR host, and pulls the image on each node in the cluster. + +## Step 5. Deploy a container from the UCP web interface + +UCP allows you to deploy and manage "Dockerized" applications in production. An +application is built using Docker objects, such as images and containers, and +Docker resources, such as volumes and networks. + +UCP deploys and manages these objects and resources using remote API calls to +the Engine daemons running on the nodes. For example, the `run` action may +deploy an image in a Docker container. That image might define a service such as +an Nginx web server or a database like Postgres. + +A UCP administrator initiates Engine actions using the UCP dashboard or the +Docker Engine CLI. In this step, you deploy a container from the UCP dashboard. +The container runs an Nginx server, so you'll need to launch the `nginx` image +inside of it. + +1. Log in to the UCP **Dashboard**. + +2. Click **Containers** from the left navigation. + + The system displays the **Containers** page. + + > **Tip**: UCP runs some containers that support its own operations called + "system" containers. These containers are hidden by default. + +3. Click **+ Deploy Container**. + + We'll deploy the simple `nginx` container you just pulled, using specific + values for each field. If you already know what you're doing, feel free to + explore once you've completed this example. + +4. Enter the path to the `nginx:official` image you just pulled in the +**image name** field. + + This should something like `/admin/my-nginx:official` + + An image is a specific build of software you want to run. The software might + be a stand-alone application, or component software necessary to support a + complex service. + +5. Enter `nginx_official` for the container name. + + This name just identifies the container on your network. + +6. Click **Network** to expand the networking settings. + + A Docker container is isolated from other processes on your network and has + its own internal network configuration. To access the service inside a + container, you need to expose the container's port, which maps to a port on + the node. The node is hosting an instance of Docker Engine, so its port is + called the **Host Port**. + +7. Enter `443` in the **Port** field and enter `4443` the **Host Port** field. + + We're mapping port 443 in the container to a different port on the host + because your UCP instance is already serving the web interface on port 443. + +8. Click the plus sign to add another **Port**. + +9. For this port, enter `80` in the **Port** field, and enter `8080` in the +**Host Port** field. + + When you are done, your dialog should look like this: + + ![Port configuration](images/install-sandbox-2-1.png) + +10. Click **Run Container** to deploy the container. + + +## Step 6. View a running service + +At this point, you have deployed a container and you should see the container +status is `running`. Recall that you deployed an Nginx web server. That server +comes with a default page that you can view to validate that the server is +running. In this step, you open the running server. + +1. Navigate back to the **Containers** page. + +2. Click the **nginx_official** container. + + ![](images/install-sandbox-2-2.png) + + The system displays the container's details and some operations you can run + on the container. + +3. Scroll down to the ports section. + + You'll see an IP address with port `8080` for the server. + +4. Copy the IP address to your browser and paste the information you copied. + + You should see the welcome message for nginx. + + ![](images/install-sandbox-2-3.png) + + +## Explore UCP + +At this point, you've completed the guided tour of a UCP installation. You've +learned how to create a UCP installation by creating two nodes and designating +one of them as a controller. You've pushed an image to Docker Trusted Registry +and used that image to run a container in the cluster managed by UCP. + +In a real UCP production installation, UCP admins and operators are expected to +do similar work every day. While the applications they launch will be more +complicated, the workflow will be very similar to what you've just learned. + +Take some time to explore UCP some more. Investigate the documentation for other +activities you can perform with UCP. + +## Where to go next + +* [UCP architecture](architecture.md) +* [UCP system requirements](installation/system-requirements.md) diff --git a/ucp/install-sandbox.md b/ucp/install-sandbox.md index 2a4b73fe810..0c75cb79e06 100644 --- a/ucp/install-sandbox.md +++ b/ucp/install-sandbox.md @@ -1,158 +1,139 @@ --- aliases: - /ucp/evaluation-install/ -description: Evaluation installation -keywords: -- tbd, tbd -menu: - main: - identifier: ucp_evaluate_sandbox - parent: mn_ucp - weight: 10 -title: Evaluate UCP in a sandbox +title: Install DDC in a sandbox for evaluation +description: Learn how to get a 30-day trial of Docker Datacenter up and running. +keywords: "Docker Datacenter, orchestration, trial" --- -# Evaluate UCP in a sandbox +# Install DDC in a sandbox for evaluation -This page helps you to learn about Docker Universal Control Plane (UCP) at a -high-level through installing and running UCP in your local, sandbox -installation. The installation should be done on a Mac OS X or Windows system. -If you are experienced with Linux or a technical DevOps user wanting a technical -deep dive, please feel free to skip this evaluation and go directly to -[Plan a production installation](installation/plan-production-install.md) -and then to [Install UCP for production](installation/install-production.md). +This page introduces Docker Datacenter (also known as DDC): a combination of +Docker Universal Control Plane (UCP) and Docker Trusted Registry (DTR), and +walks you through installing it on a local (non-production) host or sandbox. +Once you've installed, we'll also give you a guided tour so you can evaluate its +features. -A UCP installation consists of an UCP controller and one or more hosts. These -instructions use Docker Machine, Docker's provisioning tool, to create several -local hosts running Docker Engine. Once you create these hosts, you'll install -UCP and its components on them just as you would in a full-on UCP installation. +The instructions here are for a sandbox installation on Mac OS X or Windows +systems. If you're an experienced Linux user, or if you want more detailed +technical information, you might want to skip this evaluation and go directly to +[Plan a production installation](installation/plan-production-install.md) and +then to [Install UCP for production](installation/install-production.md). ->**Note**: This evaluation installs UCP on top of the open source software version of -Docker Engine inside of a VirtualBox VM which is running the small-footprint -`boot2docker.iso` Linux. Such a configuration is **not** supported for UCP in -production. +> **Note**: This evaluation installs using the open source software version of +Docker Engine inside of a VirtualBox VM which runs the small-footprint +`boot2docker.iso` Linux. This configuration is **not** a production +configuration. +## Introduction: About this example -## Step 1. About this example +In this tutorial, we'll use Docker's provisioning tool - Docker Machine - to +create two virtual hosts. These two hosts are VirtualBox VMs running a small +footprint Linux image called `boot2docker.iso`, with the open source version of +Docker Engine installed. -This example introduces you to UCP by means of a very simple sandbox example. -You'll create a small UCP installation, deploy a container through UCP, and -examine the interface. +![Docker Machine setup](images/explain.png) -For this evaluation installation, you'll use Machine to create two VirtualBox -VMs. Each VM runs small foot-print Linux image called `boot2docker.iso`. Machine -provisions each VM with the open source Docker Engine. +A UCP installation consists of an UCP controller and one or more hosts. We'll +install UCP on one host, then join the second node to UCP as a swarm member. The +two VMs create a simple swarm cluster with one controller, which by default +secures the cluster via self-signed TLS certificates. -![Explain setup](images/explain.png) +![Sandbox](images/sandbox.png) -You'll use each of these VMs as a node in a simple UCP installation. The -installation will have a controller and a node. The installation rests on top of -a Docker Swarm cluster. The UCP installation process by default secures the cluster via self-signed TLS certificates. +DDC's second component is DTR, which must be installed on a host that's a member +of the UCP swarm. So next, we'll then install DTR on that second node. -![Sandbox](images/sandbox.png) +Once you've installed UCP and DTR you'll [work through a tutorial](install-sandbox-2.md) to deploy a +container through UCP, and explore the user interface. -This example is intended as an introduction for non-technical users wanting to -explore UCP for themselves. If you are a highly technical user intending to act as -UCP administration operator, you may prefer to skip this and go straight to -[Plan a production installation](installation/plan-production-install.md). +>**Note**: The command examples in this page were tested for a Mac OS X +environment. If you are in another, you may need to change the commands to use +the correct ones for you environment. ->**Note**: The command examples in this page were tested for a Mac OSX environment. -If you are in another, you may need to adjust the commands to use analogous -commands for you environment. +>**Note**: The command examples in this page were tested for a macOS environment. +If you are in another, you may need to adjust to use analogous commands for your environment. ## Step 2. Verify the prerequisites This example requires that you have: -* Docker Engine -* Docker Machine +* [Docker Toolbox installed](https://docs.docker.com/toolbox/overview/) +(contains Docker Machine and Docker Engine) +* A free Docker ID account -If you don't have those components, start by [installing them](/toolbox/overview.md). +## Step 1: Provision hosts with Engine -## Step 3. Provision hosts with Engine +In this step, you'll provision two VMs for your UCP sandbox installation. One +will run UCP and one will be used to run containers, so the host specifications +will be slightly different. -In this step, you provision two VMs for your UCP installation. This step is -purely to enable your evaluation. You would never run UCP in production on local -VMs with the open source Engine. - -In a production installation, you would use enterprise-grade Linux servers as -your nodes. These nodes could be on your company's private network or in the -cloud. UCP requires that each node be installed with the Commercially Supported -Docker Engine (CS Engine). +In a production environment you would use enterprise-grade hosts instead +of local VMs. These nodes could be on your company's private network or +in the cloud. You would also use the Commercially Supported (CS Engine) version +of Docker Engine required by UCP. Set up the nodes for your evaluation: 1. Open a terminal on your computer. -2. Use Docker Machine to list any VMs in VirtualBox. - - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL SWARM - default * virtualbox Running tcp://192.168.99.100:2376 +2. Use Docker Machine to list any VMs in VirtualBox. -3. Create a VM named `node1`. + ```none + $ docker-machine ls - UCP runs best with a minimum of 1.50 GB in memory and requires a minimum of - 3.00 GB disk space. When you create your virtual host, you supply options to - size it appropriately. + NAME ACTIVE DRIVER STATE URL SWARM + default * virtualbox Running tcp://192.168.99.100:2376 + ``` - $ docker-machine create -d virtualbox \ - --virtualbox-memory "2000" \ - --virtualbox-disk-size "5000" node1 - Running pre-create checks... - Creating machine... - (node1) Copying /Users/mary/.docker/machine/cache/boot2docker.iso to /Users/mary/.docker/machine/machines/node1/boot2docker.iso... - (node1) Creating VirtualBox VM... - (node1) Creating SSH key... - (node1) Starting the VM... - (node1) Waiting for an IP... - Waiting for machine to be running, this may take a few minutes... - Machine is running, waiting for SSH to be available... - Detecting operating system of created instance... - Detecting the provisioner... - Provisioning with boot2docker... - Copying certs to the local machine directory... - Copying certs to the remote machine... - Setting Docker configuration on the remote daemon... - Checking connection to Docker... - Docker is up and running! - To see how to connect Docker to this machine, run: docker-machine env node1 +3. Create a VM named `node1` using the following command. -4. Create a VM named `node2`. + ```none + $ docker-machine create -d virtualbox \ + --virtualbox-memory "2500" \ + --virtualbox-disk-size "5000" node1 + ``` - $ docker-machine create -d virtualbox \ - --virtualbox-memory "2000" node2 + When you create your virtual host you specify the memory and disk size + options. UCP requires a minimum of 3.00 GB disk space and runs best with a + minimum of 2 GB of memory. -5. Use the Machine `ls` command to list your hosts. +4. Create a VM named `node2` using the command below. - NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS - node1 - virtualbox Running tcp://192.168.99.104:2376 v1.10.0 - node2 - virtualbox Running tcp://192.168.99.102:2376 v1.10.0 + ```none + $ docker-machine create -d virtualbox \ + --virtualbox-memory "2500" \ + --virtualbox-disk-size "5000" node2 + ``` - At this point, all the nodes are in the `Running` state. You have your hosts provisioned, now you are ready to install UCP itself. +5. Use the `docker-machine ls` command to list your hosts. -## Step 4. Learn about the ucp tool + ```none + $ docker-machine ls -You install UCP by using the Engine CLI to run the `ucp` tool. The `ucp` tool is -an image with subcommands to `install` a UCP controller or `join` a node to a -UCP controller. The general format of these commands are: + NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS + default - virtualbox Stopped Unknown + node1 - virtualbox Running tcp://192.168.99.100:2376 v1.12.1 + node2 - virtualbox Running tcp://192.168.99.101:2376 v1.12.1 + ``` + At this point, both nodes are in the `Running` state and ready for UCP installation. +## About the ucp tool -| Docker client | `run` command with options | `ucp` image | Subcommand with options | -|:--------------|:---------------------------|:-------------|:------------------------| -| `docker` | `run --rm -it` | `docker/ucp` | `install --help` | -| `docker` | `run --rm -it` | `docker/ucp` | `join --help` | -| `docker` | `run --rm -it` | `docker/ucp` | `uninstall --help` | +To install UCP, you'll use the Docker CLI to pull and run the `docker/ucp` +image, which contains a bootstrapper tool, which is designed to make UCP easier +to install than many enterprise-grade applications. The `ucp` tool runs +`docker run` commands to `install` a UCP controller or `join` a node to a UCP +controller. -You can these subcommands interactively by passing them the `-i` option or by -passing command-line options. The `ucp` tool is designed to make UCP easier to -install than many enterprise-grade applications. In interactive mode the tool -works to discover your network topology and suggest default answers to you. This -evaluation uses the interactive method. +The general format of these commands are a `docker run --rm -it docker/ucp` with +one or more subcommands, and you'll find them later in this document. For the +tutorial purposes, we use the `-i` options for "interactive" install mode, but +you can run them unattended in production. Regardless of how you use the `docker/ucp` tool, the default install supplies -some quick default options for both data volumes and the certificate authority -(CA). In a production installation you can optionally: +default options for both data volumes and the certificate authority (CA). In a +production installation you can also optionally: * use the high availability feature * customize the port used by the UCP web application @@ -160,530 +141,212 @@ some quick default options for both data volumes and the certificate authority * create your own data volumes * use your own TLS certificates -You'll learn more about these when you Plan -a production installation. For now, in this evaluation sandbox installation, -you'll use all the default values with one exception, you'll specify a custom -port for the Swarm manager. +You can learn more about these when you +[plan a production installation](installation/plan-production-install.md). -## Step 5. Install the UCP controller +## Step 2. Install the UCP controller In this step, you install the UCP controller on the `node1` you provisioned earlier. A controller serves the UCP application and runs the processes that manage an installation's Docker objects. -In a production installation, a system administrator can implement -UCP's high availability feature. High availability allows you to designate -several nodes as controller replicas. In this way, if one controller fails -a replica node is ready to take its place. +In a production installation, a system administrator can set up UCP's high +availability feature, which allows you to designate several nodes as controller +replicas. This way if one controller fails, a replica node is ready to take its +place. -For this evaluation, you won't need that level of robustness. A single -host for the controller suffices. +For this sandbox installation, we don't need high availability, so a single +host for the controller works fine. -1. If you don't already have one, open a terminal on your computer. +1. Open a terminal on your computer if you don't have one open already. 2. Connect the terminal environment to the `node1` you created. - a. Use `docker-machine env` command to get the settings. - - $ docker-machine env node1 - export DOCKER_TLS_VERIFY="1" - export DOCKER_HOST="tcp://192.168.99.103:2376" - export DOCKER_CERT_PATH="/Users/mary/.docker/machine/machines/node1" - export DOCKER_MACHINE_NAME="node1" - # Run this command to configure your shell: - # eval $(docker-machine env node1) - - b. Run the `eval` command to set your environment. - - $ eval $(docker-machine env node1) - - c. Verify that `node1` has an active environment. - - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS - node1 * virtualbox Running tcp://192.168.99.104:2376 v1.10.0 - node2 - virtualbox Running tcp://192.168.99.102:2376 v1.10.0 - - An `*` (asterisk) in the `ACTIVE` field indicates that the `node1` environment is active. - - The client will send the `docker` commands in the following steps to the Docker Engine on on `node1`. - -3. Start the `ucp` tool to install interactively. - - >**Note**: If you are on a Windows system, your shell can't resolve the - `$(docker-machine ip node2)` variable. So, replace it with the actual IP - address. - - $ docker run --rm -it \ - -v /var/run/docker.sock:/var/run/docker.sock \ - --name ucp docker/ucp install -i \ - --swarm-port 3376 --host-address $(docker-machine ip node1) - Unable to find image 'docker/ucp:latest' locally - latest: Pulling from docker/ucp - 0198ad4008dc: Pull complete - 291084ae72f3: Pull complete - Digest: sha256:28b6c9640e5af0caf2b2acbbbfd7c07bdbec6b170f04cbaeea7bb4909d74898d - INFO[0000] Verifying your system is compatible with UCP - - The first time you run the `ucp` tool, the `docker run` command pulls its - image from the Docker Hub. The image contains the `ucp` tool. The tool - downloads if needed and then verifies your system supports UCP. The tool is - designed to discover the information it needs if it can. This reduces the - change for human error or mistakes during the install. - -4. Enter a UCP password when prompted and then confirm it. - - Please choose your initial UCP admin password: - Confirm your initial password: - INFO[0016] Pulling required images... (this may take a while) - - The UCP system relies on a set of Docker images running in containers. The `ucp` installer gets the latest official UCP images. - - The system prompts you for Subject alternative names (SANs). UCP requires - that all clients, including the Docker Engine, use a Swarm TLS certificate - chain signed by the UCP Swarm Root CA. You can provide the certificate - system with subject alternative names or SANs. The SANs are used to set up - individual "leaf certificates." In this sandbox, you've already provided the IP address and the `ucp` tool discovered this for you and shows it in the controller list. - - WARN[0004] None of the hostnames we'll be using in the UCP certificates - [controller 127.0.0.1 172.17.0.1 192.168.99.106] contain a domain - component. Your generated certs may fail TLS validation unless you only - use one of these shortnames or IPs to connect. You can use the --san - flag to add more aliases - - You may enter additional aliases (SANs) now or press enter to proceed - with the above list. - Additional aliases: - -5. Press enter to proceed with the list the `ucp` tool provided. - - INFO[0005] Installing UCP with host address 192.168.99.106 - If this is - incorrect, please specify an alternative address with the - '--host-address' flag - WARN[0000] None of the hostnames we'll be using in the UCP certificates - [controller 127.0.0.1 172.17.0.1 192.168.99.106 192.168.99.106] contain - a domain component. Your generated certs may fail TLS validation unless - you only use one of these shortnames or IPs to connect. You can use the - --san flag to add more aliases - INFO[0001] Generating Swarm Root CA - INFO[0022] Generating UCP Root CA - INFO[0024] Deploying UCP Containers - INFO[0028] UCP instance ID: CJQN:ZQVX:B6CC:KFD3:IXN5:FGLF:GXMN:WALD:QFHU:QLSX:ZCBY:CAL7 - INFO[0028] UCP Server SSL: SHA1 Fingerprint=02:36:16:93:B4:21:B7:AD:0A:6C:0F:3C:99:75:18:5D:5A:F7:C4:0C - INFO[0028] Login as "admin"/(your admin password) to UCP at https://192.168.99.106:443 - - When it completes, the `ucp` tool prompts you to login into the UCP GUI - gives you its location. You'll do this and install a license in Step 5, - below. - - -## Step 6. License your installation - -In this step, you log into UCP, get a license, and install it. Docker allows you to run an evaluation version of UCP with a single controller and node for up to 30 days. + a. Use the `docker-machine env` command to get the settings. -1. Enter the address into your browser to view the UCP login screen. + ```none + $ docker-machine env node1 - Your browser may warn you about the connection. The warning appears because, - in this evaluation installation, the UCP certificate was issued by a - built-in certificate authority (CA). Your actions with the install actually - created the certificate. If you are concerned, the certificate's fingerprint - is displayed during install and you can compare it. + export DOCKER_TLS_VERIFY="1" + export DOCKER_HOST="tcp://192.168.99.100:2376" + export DOCKER_CERT_PATH="/Users/ldr/.docker/machine/machines/node1" + export DOCKER_MACHINE_NAME="node1" + # Run this command to configure your shell: + # eval $(docker-machine env node1) + ``` -2. Click the **Advanced** link and then the **Proceed to** link. + b. Run the `eval` command found in the final line to set your environment. - The login screen displays. + ```` + $ eval $(docker-machine env node1) + ```` - ![](images/login-ani.gif) + Running this `eval` command sends the `docker` commands in the following + steps to the Docker Engine on on `node1`. -5. Enter `admin` for the username along with the password you provided to the `install`. + c. Verify that `node1` is the active environment. - After you enter the correct credentials, the UCP dashboard prompts for a - license. + You can do this by running `docker-machine ls` and checking that there is + an `*` (asterisk) in the `ACTIVE` field next to `node1`. - ![](images/skip-this.png) +3. Start the `ucp` tool to install interactively. -6. Press *Skip for now* to continue to the dashboard. + ```none + $ docker run --rm -it \ + -v /var/run/docker.sock:/var/run/docker.sock \ + --name ucp docker/ucp install -i \ + --swarm-port 3376 --host-address $(docker-machine ip node1) + ``` - ![](images/dashboard.png) + > **Note**: If you are on a Windows system, your shell won't be able to + resolve the `$(docker-machine ip node1)` variable. Instead, edit the command + supplied to replace it with the actual IP address. - The dashboard shows a single node, your controller node. It also shows you a - banner saying that you need a license. + The first time you run the `ucp` tool, the `docker run` command pulls the + UCP bootstrapper image from Docker Cloud. The tool downloads the packages it + needs, and verifies that your system will support a UCP installation. -6. Follow the link on the UCP **Dashboard** to the Docker website to get a trial license. +4. Enter a password for UCP when prompted, and then confirm it. - You must fill out a short form. After you complete the form, you are prompted with some **Installation Steps**. + The system prompts you for Subject alternative names (SANs). In this + sandbox, you've already provided the IP address and the `ucp` tool + discovered this for you and shows it in the controller list. -7. Press **Next** until you reach the **Add License** step. +5. Press enter to proceed using the list the `ucp` tool provided. - ![](images/get-license.png) + UCP requires that all clients, including the Docker Engine, use a Swarm TLS + certificate chain signed by the UCP Swarm Root CA. You can provide the + certificate system with subject alternative names or SANs, which allow you + to set up individual "leaf certificates." -8. Press the **Download License** button. + When it completes, the `ucp` tool prompts you to log in into the UCP web + interface and gives you its location. You'll do this in the next step so + you can install a license. - Your browser downloads a `docker_subscription.lic` file. +## Step 3. License your installation -9. Save the file to a safe location. +In this step, you'll get a license, log in to the UCP web interface and install +the license. Docker allows you to run an evaluation version of UCP with a single +controller and node for up to 30 days. -10. Return to the UCP Dashboard. +[Learn how to get a trial license](installation/license.md). -11. Choose **Settings** from the "hamburger" menu on the left side. +In your terminal window you should have instructions on how to access the UCP +web UI. It should look like this: - As you move through UCP, the header changes to display the appropriate - breadcrumbs. In this case you are on the **Dashboard/Settings*** page. +```none +INFO[0056] Login as "admin"/(your admin password) to UCP at https://192.168.99.100:443 +``` -12. Scroll down to the **License** section and click **Choose File**. +In your browser navigate to that IP address, and upload your trial license. - Locate and upload your file. +![](images/install-sandbox-1.png) - ![](images/license.png) +## Step 4. Join a node - Once you upload the file, the license message disappears from UCP. - -Take a minute and explore UCP. At this point, you have a single controller -running. How many nodes is that? What makes a controller is the containers it -runs. Locate the Containers page and show the system containers on your -controller. You'll know you've succeeded if you see this list: - -![](images/controller-containers.png) - -The containers reflect the architecture of UCP. The containers are running -Swarm, a key-value store process, and some containers with certificate volumes. -Explore the other resources. - -## Step 7. Join a node - -In this step, you join your UCP `node2` to the controller using the `ucp join` -subcommand. In a UCP production installation, you'd do this step for each node +In this step, you join your `node2` to the controller using the `ucp join` +command. In a production installation, you'd do this step for each node you want to add. -1. If you don't already have one, open a terminal on your computer. +1. Open a terminal on your computer if you don't already have one open. 2. Connect the terminal environment to the `node2` you provisioned earlier. - a. Use `docker-machine env` command to get the settings. - - $ docker-machine env node2 - export DOCKER_TLS_VERIFY="1" - export DOCKER_HOST="tcp://192.168.99.104:2376" - export DOCKER_CERT_PATH="/Users/mary/.docker/machine/machines/node2" - export DOCKER_MACHINE_NAME="node2" - # Run this command to configure your shell: - # eval $(docker-machine env node2) - - b. Run the `eval` command to set your environment. - - $ eval $(docker-machine env node2) - - The client will send the `docker` commands in the following steps to the Docker Engine on on `controller`. - -2. Run the `docker/ucp join` command. - - >**Note**: If you are on a Windows system, your shell can't resolve the - `$(docker-machine ip node2)` variable. So, replace it with the actual IP - address. - - $ docker run --rm -it \ - -v /var/run/docker.sock:/var/run/docker.sock \ - --name ucp docker/ucp join -i \ - --host-address $(docker-machine ip node2) - - The `join` pulls several images and prompts you for the UCL of the UCP Server. - - Unable to find image 'docker/ucp:latest' locally - latest: Pulling from docker/ucp - 0198ad4008dc: Pull complete - 291084ae72f3: Pull complete - Digest: sha256:28b6c9640e5af0caf2b2acbbbfd7c07bdbec6b170f04cbaeea7bb4909d74898d - Status: Downloaded newer image for docker/ucp:latest - Please enter the URL to your UCP Server: - -3. Enter the URL of your server to continue. - - Orca server https://192.168.99.106 - Subject: ucp - Issuer: UCP Root CA - SHA1 Fingerprint=02:36:16:93:B4:21:B7:AD:0A:6C:0F:3C:99:75:18:5D:5A:F7:C4:0C - Do you want to trust this server and proceed with the join? (y/n): - - The system prompts you to join the server. - -4. Press `y` to continue and the tool prompts you for the username and password for your UCP server. - - Please enter your UCP Admin username: admin - Please enter your UCP Admin password: - INFO[0027] Pulling required images... (this may take a while) - WARN[0070] None of the hostnames we'll be using in the UCP certificates [node1 127.0.0.1 172.17.0.1 192.168.99.108] contain a domain component. Your generated certs may fail TLS validation unless you only use one of these shortnames or IPs to connect. You can use the --san flag to add more aliases - You may enter additional aliases (SANs) now or press enter to proceed with the above list. - Additional aliases: - - The system continues and prompts you for SANs. In this sandbox, you've already provided the IP address and the `ucp` tool discovered this for you and shows it in the controller list. - -5. Press enter to proceed without providing a SAN. - - WARN[0000] None of the hostnames we'll be using in the UCP certificates - [node1 127.0.0.1 172.17.0.1 192.168.99.108 192.168.99.108] contain a - domain component. Your generated certs may fail TLS validation unless you - only use one of these shortnames or IPs to connect. You can use the --san - flag to add more aliases - INFO[0000] This engine will join UCP and advertise itself with host - address 192.168.99.108 - If this is incorrect, please specify an - alternative address with the '--host-address' flag - INFO[0000] Verifying your system is compatible with UCP - INFO[0011] Starting local swarm containers ’ - -4. Login into UCP with your browser and check to make sure your new node appears. - - The page should display your new node. - - ![](images/nodes.png) - -## Step 8. Deploy a container - -UCP allows you to deploy and manage "Dockerized" applications in production. An -application is built up using Docker objects, such as images and containers, and -Docker resources, such as volumes and networks. - -UCP deploys and manages these objects and resources using remote API calls the -Engine daemons running on the nodes. For example, the `run` action may deploy an -image in a Docker container. That image might define a service such as an Nginix -web server or a database like Postgres. - -A UCP operator initiates Engine actions through the UCP dashboard or through the -Docker Engine CLI. In this step, you deploy a container through the UCP -dashboard. The container will run an Nginx server, so you'll need to launch the -`nginx` image inside of it. - -1. Log into the UCP **Dashboard**. - -2. Click **Containers**. - - The system displays the **Containers** page. UCP runs some containers that - support its own operations. These are called "system" containers and they - are hidden by default. - -3. Click **+ Deploy Container**. - - The system displays a dialog with several fields. Using the dialog requires some basic knowledge of Docker objects and their attributes. A UCP admin or operator would typically have this knowledge. For now, you'll just follow along. - -4. Enter `nginx` for the image name. - - An image is simply predefined software you want to run. The software might - be an actual standalone application or maybe some component software necessary - to support a complex service. - -5. Enter `nginx_server` for the container name. - - This name just identifies the container on your network. - -6. Click **Publish Ports** from the **Overview** menu. - - A Docker container, like it sounds, is securely isolated from other processes on your network. In fact, the container has its own internal network configuration. If you want to access to a service inside a container, you need to expose a container's port. This container port maps to a port on the node. The node is hosting an instance of Docker Engine, so its port is called the **Host Port**. - -7. Enter `443` in the **Port** and in the **Host Port** field. - -8. Use the plus sign to add another **Port**. - -9. For this port, enter `80` in the **Port** and **Host Port** field. - - When you are done, your dialog looks like the following: - - ![Port configuration](images/port_config.png) - -10. Click **Run Container** to deploy your container. - - ![Deployed](images/display_container.png) - -## Step 9. View a running service - -At this point, you have deployed a container and you should see the application running. Recall that you deployed an Nginx web server. That server comes with a default page. In this step, you open the running server. - -1. Make sure you are still on the **Containers** page. - -2. Select the edit icon on the container. - - ![Edit](images/container_edit.png) - - The system displays the container's details and some operations you can run on the container. - -3. Scroll down to the ports section. - - You'll see an IP address with port `80` for the server. - -4. Copy the IP address to your browser and paste the information you copied. - - You should see the welcome message for nginx. - - ![Port 80](images/welcome_nginx.png) - - -## Step 10. Download a client bundle - -In this step, you download the *client bundle*. Each node in your UCP cluster is running Engine. A UCP operator can use the Engine CLI client instead of UCP to interact with the Docker objects and resources UCP manages. To issue commands to a UCP node, your local shell environment must be configured with the same security certificates as the UCP application itself. The client bundle contains the certificates and a script to configure a shell environment. - -Download the bundle and configure your environment. - -1. If you haven't already done so, log into UCP. - -2. Choose **admin > Profile** from the right-hand menu. - - Any user can download their certificates. So, if you were logged in under a user name such as `davey` the path to download bundle is **davey > Profile**. Since you are logged in as `admin`, the path is `admin`. - -3. Click **Create Client Bundle**. - - The browser downloads the `ucp-bundle-admin.zip` file. - -4. Open a new shell on your local machine. - -5. Make sure your shell is does not have an active Docker Machine host. - - $ docker-machine ls - NAME ACTIVE DRIVER STATE URL SWARM DOCKER ERRORS - moxie - virtualbox Stopped Unknown - test - virtualbox Running tcp://192.168.99.100:2376 v1.10.1 - - While Machine has a stopped and running host, neither is active in the shell. You know this because neither host shows an * (asterisk) indicating the shell is configured. - -4. Create a directory to hold the deploy information. - - $ mkdir deploy-app - -4. Navigate to where the bundle was downloaded, and unzip the client bundle - - $ unzip bundle.zip - Archive: bundle.zip - extracting: ca.pem - extracting: cert.pem - extracting: key.pem - extracting: cert.pub - extracting: env.sh - -5. Change into the directory that was created when the bundle was unzipped - -6. Execute the `env.sh` script to set the appropriate environment variables for your UCP deployment. - - $ source env.sh - - If you are on Windows, you may need to set the environment variables manually. - -7. Run `docker info` to examine the UCP deployment. + 1. Use `docker-machine env` command to get the settings command for `node2`. - Your output should show that you are managing UCP vs. a single node. + ```none + $ docker-machine env node2 - $ docker info - Containers: 12 - Running: 0 - Paused: 0 - Stopped: 0 - Images: 17 - Role: primary - Strategy: spread - Filters: health, port, dependency, affinity, constraint - Nodes: 2 - node1: 192.168.99.106:12376 - └ Status: Healthy - └ Containers: 9 - └ Reserved CPUs: 0 / 1 - └ Reserved Memory: 0 B / 3.01 GiB - └ Labels: executiondriver=native-0.2, kernelversion=4.1.17-boot2docker, operatingsystem=Boot2Docker 1.10.0 (TCL 6.4.1); master : b09ed60 - Thu Feb 4 20:16:08 UTC 2016, provider=virtualbox, storagedriver=aufs - └ Error: (none) - └ UpdatedAt: 2016-02-09T12:03:16Z - node2: 192.168.99.107:12376 - └ Status: Healthy - └ Containers: 3 - └ Reserved CPUs: 0 / 1 - └ Reserved Memory: 0 B / 4.956 GiB - └ Labels: executiondriver=native-0.2, kernelversion=4.1.17-boot2docker, operatingsystem=Boot2Docker 1.10.0 (TCL 6.4.1); master : b09ed60 - Thu Feb 4 20:16:08 UTC 2016, provider=virtualbox, storagedriver=aufs - └ Error: (none) - └ UpdatedAt: 2016-02-09T12:03:11Z - Cluster Managers: 1 - 192.168.99.106: Healthy - └ Orca Controller: https://192.168.99.106:443 - └ Swarm Manager: tcp://192.168.99.106:3376 - └ KV: etcd://192.168.99.106:12379 - Plugins: - Volume: - Network: - CPUs: 2 - Total Memory: 7.966 GiB - Name: ucp-controller-node1 - ID: P5QI:ZFCX:ELZ6:RX2F:ADCT:SJ7X:LAMQ:AA4L:ZWGR:IA5V:CXDE:FTT2 - WARNING: No oom kill disable support - WARNING: No cpu cfs quota support - WARNING: No cpu cfs period support - WARNING: No cpu shares support - WARNING: No cpuset support - Labels: - com.docker.ucp.license_key=p3vPAznHhbitGG_KM36NvCWDiDDEU7aP_Y9z4i7V4DNb - com.docker.ucp.license_max_engines=1 - com.docker.ucp.license_expires=2016-11-11 00:53:53 +0000 UTC + export DOCKER_TLS_VERIFY="1" + export DOCKER_HOST="tcp://192.168.99.101:2376" + export DOCKER_CERT_PATH="/Users/ldr/.docker/machine/machines/node2" + export DOCKER_MACHINE_NAME="node2" + # Run this command to configure your shell: + # eval $(docker-machine env node2) + ``` -## Step 11. Deploy with the CLI + b. Run the `eval` command to set your environment. -In this exercise, you'll launch another Nginx container. Only this time, you'll use the Engine CLI. Then, you'll look at the result in the UCP dashboard. + ``` + $ eval $(docker-machine env node2) + ``` -1. Connect the terminal environment to the `node2`. + Running this `eval` command sends the `docker` commands in the following + steps to the Docker Engine on `node2`. - $ eval "$(docker-machine env node2)" +3. Run the `docker/ucp join` command. -2. Change to your user `$HOME` directory. + > **Note**: If you are on a Windows system, your shell won't be able to + resolve the `$(docker-machine ip node2)` variable. Instead, edit the command + supplied to replace it with the actual IP address. - $ cd $HOME + ```none + $ docker run --rm -it \ + -v /var/run/docker.sock:/var/run/docker.sock \ + --name ucp docker/ucp join -i \ + --host-address $(docker-machine ip node2) + ``` -2. Make a `site` directory. + The `join` command pulls several images, then prompts you for the URL of the + UCP Server. - $ mkdir site +4. Enter the URL of the UCP server to continue. -3. Change into the `site` directory. +5. Press `y` when prompted to continue and join the node to the swarm. - $ cd site +6. Enter the admin username and password for the UCP server when prompted. -4. Create an `index.html` file. + The installer continues and prompts you for SANs. In this sandbox, you've + already provided the IP address and the `ucp` tool discovered this for you + and shows it in the controller list. - $ echo "my new site" > index.html +7. Press `enter` to proceed without providing a SAN. -5. Start a new `nginx` container and replace the `html` folder with your `site` directory. + The installation is complete when you see the message + `Starting local swarm containers`. - $ docker run -d -P -v $HOME/site:/usr/share/nginx/html --name mysite nginx +8. Log in to UCP with your browser and confirm that the new node appears. - This command runs an `nginx` image in a container called `mysite`. The `-P` tells the Engine to expose all the ports on the container. + ![](images/install-sandbox-2.png) -6. Open the UCP dashboard in your browser. -7. Navigate to the **Containers** page and locate your `mysite` container. +## Step 5: Install Docker Trusted Registry - ![mysite](images/second_node.png) +Next, we'll install Docker Trusted Registry (DTR). DTR provides a secure +location to store your organization's Docker images. Images are used by UCP to +run containers that make up a service. By providing a secure connection between +DTR and UCP, you can verify that your production services contain only signed +code produced by your own organization. -8. Scroll down to the ports section. +1. First, make sure you know the IP addresses of both your UCP and DTR nodes. +You can find this easily by running `docker-machine ls`. - You'll see an IP address with port `80/tcp` for the server. This time, you'll - find that the port mapped on this container than the one created yourself. - That's because the command didn't explicitly map a port, so the Engine chose - mapped the default Nginx port `80` inside the container to an arbitrary port - on the node. +2. Run the `docker-machine env node2` command to make sure that you are passing +commands to the node on which you will install DTR. -4. Copy the IP address to your browser and paste the information you copied. +3. Next, use the following command to install DTR on `node2`. - You should see your `index.html` file display instead of the standard Nginx welcome. + ```none + $ docker run -it --rm docker/dtr install \ + --ucp-url $(docker-machine ip node1) \ + --ucp-insecure-tls \ + --ucp-node node2 \ + --dtr-external-url $(docker-machine ip node2) + ``` - ![mysite](images/second_node.png) + You'll be prompted for the credentials of the UCP administrator. -## Explore UCP +4. Verify that DTR is running by navigating your browser to the DTR server's IP. -At this point, you've completed the guided tour of a UCP installation. You've -learned how to create a UCP installation by creating two nodes and designating -one of them as a controller. You've created a container running a simple web -server both using UCP and directly on the command line. You used UCP to get -information about what you created. +5. Confirm that you can log in using your UCP administrator credentials. -In a real UCP production installation, UCP admins and operators are expected to -do similar work every day. While the applications they launch will be more -complicated, the interaction channels a user can take, the GUI or the -certificate bundle plus a command line, remain the same. +![](images/install-sandbox-3.png) -Take some time to explore UCP some more. Investigate the documentation for other -activities you can perform with UCP. +**Congratulations!** You now have a working installation of Docker Datacenter +running in your sandbox. You can explore on your own, or continue your +evaluation by walking through our [guided tour](install-sandbox-2.md). -## Where to Go Next +## Where to go next +* [DDC guided tour](install-sandbox-2.md) * [UCP architecture](architecture.md) -* [UCP system requirements](installation/system-requirements.md) diff --git a/ucp/installation/install-offline.md b/ucp/installation/install-offline.md index 3c0b7c1ecf5..4933261cf00 100644 --- a/ucp/installation/install-offline.md +++ b/ucp/installation/install-offline.md @@ -27,7 +27,7 @@ all the images. Then you copy that package to the host where you’ll install UC Use a computer with internet access to download a single package with all Docker Datacenter components: - ```bash + ```none $ wget https://packages.docker.com/caas/ucp-1.1.3_dtr-2.0.3.tar.gz -O docker-datacenter.tar.gz ``` @@ -37,7 +37,7 @@ all the images. Then you copy that package to the host where you’ll install UC host where you'll be installing Docker UCP. You can use the Secure Copy command for this: - ```bash + ```none $ scp docker-datacenter.tar.gz $USER@$UCP_HOST:/tmp ``` @@ -49,7 +49,7 @@ all the images. Then you copy that package to the host where you’ll install UC `docker load` command, to load the images from the tar archive. On the host were you are going to install UCP, run: - ```bash + ```none $ docker load < docker-datacenter.tar.gz ``` diff --git a/ucp/installation/install-production.md b/ucp/installation/install-production.md index bdf253a475a..b82fd79a5a4 100644 --- a/ucp/installation/install-production.md +++ b/ucp/installation/install-production.md @@ -93,7 +93,7 @@ To install UCP: the command prompts for the necessary configuration values. You can also use flags to pass values to the install command. - ```bash + ```none $ docker run --rm -it --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ docker/ucp install -i \ @@ -141,7 +141,7 @@ To create a backup of the CAs used on the controller node: 1. Log into the controller node using ssh. 2. Run the docker/ucp backup command. - ```bash + ```none $ docker run --rm -i --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ docker/ucp backup \ @@ -173,7 +173,7 @@ For each node that you want to install as a controller replica: be passing the backup.tar file from the previous step in order to ensure that the CAs are replicated to the new controller node. - ```bash + ```none $ docker run --rm -it --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ -v $BACKUP_PATH/backup.tar:/backup.tar \ @@ -214,7 +214,7 @@ For each controller node: 2. Run the engine-discovery command. - ```bash + ```none $ docker run --rm -it \ --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ @@ -233,7 +233,7 @@ For each node that you want to add to your UCP cluster: 2. Use the join command, to join the node to the cluster: - ```bash + ```none $ docker run --rm -it --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ docker/ucp join -i diff --git a/ucp/installation/system-requirements.md b/ucp/installation/system-requirements.md index 49ddf53b3f2..625b485075c 100644 --- a/ucp/installation/system-requirements.md +++ b/ucp/installation/system-requirements.md @@ -40,7 +40,7 @@ When installing UCP on a host, make sure the following ports are open: | controllers, nodes | in | TCP 443 (configurable) | Web app and CLI client access to UCP. | | controllers, nodes | in | TCP 2375 | Heartbeat for nodes, to ensure they are running. | | controllers | in | TCP 2376 (configurable) | Swarm manager accepts requests from UCP controller. | -| controllers, nodes | in, out | UDP 4789 | Overlay networking. | +| controllers, nodes | in, out | TCP + UDP 4789 | Overlay networking. | | controllers, nodes | in, out | TCP + UDP 7946 | Overlay networking. | | controllers, nodes | in | TCP 12376 | Proxy for TLS, provides access to UCP, Swarm, and Engine. | | controller | in | TCP 12379 | Internal node configuration, cluster configuration, and HA. | diff --git a/ucp/installation/uninstall.md b/ucp/installation/uninstall.md index 495b75037a5..563a1d21c11 100644 --- a/ucp/installation/uninstall.md +++ b/ucp/installation/uninstall.md @@ -34,23 +34,26 @@ You can also use flags to pass values to the uninstall command. 1. Run the uninstall command. - ```bash + ```none $ docker run --rm -it \ - -v /var/run/docker.sock:/var/run/docker.sock + -v /var/run/docker.sock:/var/run/docker.sock \ --name ucp \ docker/ucp uninstall -i INFO[0000] Were about to uninstall the local components for UCP ID: FEY4:M46O:7OUS:QQA4:HLR3:4HRD:IUTH:LC2W:QPRE:BLYH:UWEM:3TYV Do you want proceed with the uninstall? (y/n): y - INFO[0000] Removing UCP Containers - INFO[0000] Removing UCP images - INFO[0005] Removing UCP volumes + WARN[0000] We detected a daemon advertisement configuration. Proceed with caution, as the daemon will require a restart. Press ctrl-c to cancel uninstall within 4 seconds. + INFO[0004] Removing UCP Containers + INFO[0005] Removing UCP images + WARN[0006] Configuration updated. You will have to manually restart the docker daemon for the changes to take effect. + WARN[0006] Engine discovery configuration removed. You will need to restart the daemon. + INFO[0010] Removing UCP volumes ``` 2. List the images remaining on the node. - ``` + ```none $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE @@ -62,7 +65,7 @@ You can also use flags to pass values to the uninstall command. 3. Remove the docker/ucp image. - ``` + ```none $ docker rmi docker/ucp Untagged: docker/ucp:latest @@ -71,7 +74,7 @@ You can also use flags to pass values to the uninstall command. Deleted: sha256:93743d5df2362466e2fe116a677ec6a4b0091bd09e889abfc9109047fcfcdebf ``` -5. Restart the Docker daemon. +4. Restart the Docker daemon. When you install or join a node, UCP configures the Docker engine on that node for multi-host networking. When uninstalling, the configuration is @@ -84,7 +87,7 @@ You can also use flags to pass values to the uninstall command. $ sudo service docker restart ``` -6. Confirm the node was removed from the cluster. +5. Confirm the node was removed from the cluster. In the UCP web application, confirm the node is no longer listed. It might take a few minutes for UCP to stop listing that node. diff --git a/ucp/installation/upgrade.md b/ucp/installation/upgrade.md index e36090a7549..692b81a2e08 100644 --- a/ucp/installation/upgrade.md +++ b/ucp/installation/upgrade.md @@ -96,21 +96,11 @@ replica nodes): 4. Upgrade the controller node. - ```bash + ```none $ docker run --rm -it \ --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ docker/ucp:$UCP_VERSION upgrade -i - - # INFO[0000] We're about to upgrade the local components for UCP ID: KRIJ:UEYA:AGTD:6BBO:AHS7:QSPA:ZFAM:X7KW:GNXR:4MWQ:UDPU:MRGR - Do you want proceed with the upgrade? (y/n): y - - # INFO[0028] All required images are present - # INFO[0000] Checking for version compatibility - # INFO[0000] Your system is compatible. Removing old UCP Containers while preserving data - # INFO[0000] Redeploying UCP containers - # INFO[0002] Verifying containers started - # INFO[0004] Success! Please log in to the UCP console to verify your system before proceeding to upgrade additional nodes. ``` 5. Delete your browser cache. @@ -177,21 +167,11 @@ If your cluster is set up for high-availability (has several controller nodes): 7. Upgrade the controller node. - ```bash + ```none $ docker run --rm -it \ --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ docker/ucp:$UCP_VERSION upgrade -i - - # INFO[0000] We're about to upgrade the local components for UCP ID: KRIJ:UEYA:AGTD:6BBO:AHS7:QSPA:ZFAM:X7KW:GNXR:4MWQ:UDPU:MRGR - Do you want proceed with the upgrade? (y/n): y - - # INFO[0028] All required images are present - # INFO[0000] Checking for version compatibility - # INFO[0000] Your system is compatible. Removing old UCP Containers while preserving data - # INFO[0000] Redeploying UCP containers - # INFO[0002] Verifying containers started - # INFO[0004] Success! Please log in to the UCP console to verify your system before proceeding to upgrade additional nodes. ``` 8. Delete your browser cache. diff --git a/ucp/monitor/troubleshoot-configurations.md b/ucp/monitor/troubleshoot-configurations.md index 57299b7eb42..c99750b3e1e 100644 --- a/ucp/monitor/troubleshoot-configurations.md +++ b/ucp/monitor/troubleshoot-configurations.md @@ -38,7 +38,7 @@ $ sudo apt-get update && apt-get install curl jq 2. Use the REST API to access the cluster configurations. -```bash +```none # $DOCKER_HOST and $DOCKER_CERT_PATH are set when using the client bundle $ export KV_URL="https://$(echo $DOCKER_HOST | cut -f3 -d/ | cut -f1 -d:):12379" @@ -62,7 +62,7 @@ The examples below assume you are logged in with ssh into a UCP controller node. ### Check the health of the etcd cluster -```bash +```none $ docker exec -it ucp-kv etcdctl \ --endpoint https://127.0.0.1:2379 \ --ca-file /etc/docker/ssl/ca.pem \ @@ -80,7 +80,7 @@ On failure the command exits with an error code, and no output. ### Show the current value of a key -```bash +```none $ docker exec -it ucp-kv etcdctl \ --endpoint https://127.0.0.1:2379 \ --ca-file /etc/docker/ssl/ca.pem \ @@ -96,7 +96,7 @@ $ docker exec -it ucp-kv etcdctl \ ### List the current members of the cluster -```bash +```none $ docker exec -it ucp-kv etcdctl \ --endpoint https://127.0.0.1:2379 \ --ca-file /etc/docker/ssl/ca.pem \ @@ -115,7 +115,7 @@ As long as your cluster is still functional and has not lost quorum (more than (n/2)-1 nodes failed) you can use the following command to remove the failed members. -```bash +```none $ docker exec -it ucp-kv etcdctl \ --endpoint https://127.0.0.1:2379 \ --ca-file /etc/docker/ssl/ca.pem \ diff --git a/ucp/monitor/troubleshoot-ucp.md b/ucp/monitor/troubleshoot-ucp.md index 9f62914d646..7be05147f2c 100644 --- a/ucp/monitor/troubleshoot-ucp.md +++ b/ucp/monitor/troubleshoot-ucp.md @@ -46,7 +46,7 @@ specially useful if the UCP web application is not working. 2. Check the logs of UCP system containers. - ```bash + ```none # By default system containers are not displayed. Use the -a flag to display them $ docker ps -a diff --git a/ucp/reference/backup.md b/ucp/reference/backup.md index 8b2b11e57c3..73d7381412d 100644 --- a/ucp/reference/backup.md +++ b/ucp/reference/backup.md @@ -9,13 +9,13 @@ menu: title: backup --- -# docker/ucp id +# docker/ucp backup Stream a tar file to stdout containing all UCP data volumes. ## Usage -```bash +```none docker run --rm -i \ --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ @@ -51,4 +51,4 @@ built-in PGP compatible encryption. | `--id` | The ID of the UCP instance to backup | | `--root-ca-only` | Backup only the root CA certificates and keys from this controller node | | `--passphrase` | Encrypt the tar file with the provided passphrase [$UCP_PASSPHRASE] | -| `--interactive, -i` | Enable interactive mode. You will be prompted to enter all required information. | \ No newline at end of file +| `--interactive, -i` | Enable interactive mode. You will be prompted to enter all required information. | diff --git a/ucp/reference/upgrade.md b/ucp/reference/upgrade.md index 1ca430e0991..f82a2a7a5a0 100644 --- a/ucp/reference/upgrade.md +++ b/ucp/reference/upgrade.md @@ -38,9 +38,9 @@ before proceeding to the next node. | Option | Description | |:----------------------|:----------------------------------------------------------------------------------------| -| ` --debug, -D` | Enable debug | -| ` --jsonlog` | Produce json formatted output for easier parsing | -| ` --interactive, -i` | Enable interactive mode.,You will be prompted to enter all required information | +| `--debug, -D` | Enable debug | +| `--jsonlog` | Produce json formatted output for easier parsing | +| `--interactive, -i` | Enable interactive mode.,You will be prompted to enter all required information | | `--admin-username` | Specify the UCP admin username [$UCP_ADMIN_USER] | | `--admin-password` | Specify the UCP admin password [$UCP_ADMIN_PASSWORD] | | `--registry-username` | Specify the username to pull required images with [$REGISTRY_USERNAME] |