From 0e01de91859cbbb504e4e468a9433985d318ac5f Mon Sep 17 00:00:00 2001 From: Simon Li Date: Sun, 15 Nov 2020 23:08:47 +0000 Subject: [PATCH 01/15] pre-commit: shellcheck (not run by default) --- .pre-commit-config.yaml | 5 +++++ .pre-commit-shellcheck.yaml | 5 +++++ 2 files changed, 10 insertions(+) create mode 100644 .pre-commit-shellcheck.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 532aa69c38..caa54d2799 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,3 +26,8 @@ repos: entry: chartpress --reset language: system pass_filenames: false + +# The hooks in this file are automatically installed by pre-commit, but shellcheck +# must be manually installed so it is included in .pre-commit-shellcheck.yaml +# instead. To run it: +# pre-commit run --config .pre-commit-shellcheck.yaml --all-files diff --git a/.pre-commit-shellcheck.yaml b/.pre-commit-shellcheck.yaml new file mode 100644 index 0000000000..3a4333e4c7 --- /dev/null +++ b/.pre-commit-shellcheck.yaml @@ -0,0 +1,5 @@ +repos: + - repo: https://github.com/gruntwork-io/pre-commit + rev: v0.1.10 + hooks: + - id: shellcheck From 26ac60579bf9120b18fafa8d0ba09c95ece273ad Mon Sep 17 00:00:00 2001 From: Simon Li Date: Sun, 15 Nov 2020 23:09:51 +0000 Subject: [PATCH 02/15] shellcheck ci/common ci/common contains bash-specific syntax, and triggers several warnings --- ci/common | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ci/common b/ci/common index 705086b8ee..ad9ac79aa8 100755 --- a/ci/common +++ b/ci/common @@ -1,5 +1,12 @@ -#!/bin/sh +#!/bin/bash # Use https://www.shellcheck.net/ to reduce mistakes if you make changes to this file. +# +# shellcheck disable=SC2015 +# Note that A && B || C is not if-then-else. C may run when A is true. +# shellcheck disable=SC2016 +# Expressions don't expand in single quotes, use double quotes for that. +# shellcheck disable=SC2086 +# Double quote to prevent globbing and word splitting. setup_helm () { echo "setup helm ${HELM_VERSION}" From 83f868cd91557dad4a2bff2de1538f2c05c7ddcd Mon Sep 17 00:00:00 2001 From: Simon Li Date: Sun, 15 Nov 2020 23:10:40 +0000 Subject: [PATCH 03/15] Run pre-commit shellcheck in test workflow --- .github/workflows/test-chart.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/test-chart.yaml b/.github/workflows/test-chart.yaml index 718f3257a3..8ce9b7d442 100644 --- a/.github/workflows/test-chart.yaml +++ b/.github/workflows/test-chart.yaml @@ -40,6 +40,9 @@ jobs: pip install chartpress yamllint - uses: pre-commit/action@v2.0.0 + - uses: pre-commit/action@v2.0.0 + with: + extra_args: --config .pre-commit-shellcheck.yaml - name: Lint and validate run: tools/templates/lint-and-validate.py From f7e435d569230ed180ed6920270c5c03bba7831b Mon Sep 17 00:00:00 2001 From: Simon Li Date: Sun, 15 Nov 2020 23:11:39 +0000 Subject: [PATCH 04/15] pre-commit: helmlint --- .pre-commit-config.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index caa54d2799..16ff331263 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,6 +27,12 @@ repos: language: system pass_filenames: false + # Check chart for possible issues + - repo: https://github.com/gruntwork-io/pre-commit + rev: v0.1.10 + hooks: + - id: helmlint + # The hooks in this file are automatically installed by pre-commit, but shellcheck # must be manually installed so it is included in .pre-commit-shellcheck.yaml # instead. To run it: From 9fc136306ea219f9e68f7074be0880e5091f3246 Mon Sep 17 00:00:00 2001 From: Simon Li Date: Sun, 17 Jan 2021 19:23:27 +0000 Subject: [PATCH 05/15] Enable prettier --- .pre-commit-config.yaml | 6 ++++++ .prettierignore | 2 ++ 2 files changed, 8 insertions(+) create mode 100644 .prettierignore diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 16ff331263..e5702502f7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -33,6 +33,12 @@ repos: hooks: - id: helmlint + # Autoformat multiple file types + - repo: https://github.com/prettier/prettier + rev: 2.1.2 + hooks: + - id: prettier + # The hooks in this file are automatically installed by pre-commit, but shellcheck # must be manually installed so it is included in .pre-commit-shellcheck.yaml # instead. To run it: diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 0000000000..4453d15a0b --- /dev/null +++ b/.prettierignore @@ -0,0 +1,2 @@ +jupyterhub/templates/ +*.md From 847789b5b44b711aa9ff466bd8e6a70648d4131c Mon Sep 17 00:00:00 2001 From: Simon Li Date: Sun, 17 Jan 2021 19:20:15 +0000 Subject: [PATCH 06/15] Fix prettier error in lint-and-validate-values.yaml ``` tools/templates/lint-and-validate-values.yaml [error] tools/templates/lint-and-validate-values.yaml: SyntaxError: Map keys must be unique; "nodeSelector" is repeated (11:3) ``` --- tools/templates/lint-and-validate-values.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/tools/templates/lint-and-validate-values.yaml b/tools/templates/lint-and-validate-values.yaml index be9286b3a7..5ba0d71061 100644 --- a/tools/templates/lint-and-validate-values.yaml +++ b/tools/templates/lint-and-validate-values.yaml @@ -15,8 +15,6 @@ hub: baseUrl: / cookieSecret: mock publicURL: mock-public-url - nodeSelector: - node-type: mock activeServerLimit: 3 deploymentStrategy: type: Recreate From 92bfb0cbabb5fd92f8c370f2d5c033b6137c2a30 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 20 Jan 2021 07:27:27 +0100 Subject: [PATCH 07/15] pre-commit: update to non-deprecated prettier hook --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e5702502f7..24d72a50b0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -34,8 +34,8 @@ repos: - id: helmlint # Autoformat multiple file types - - repo: https://github.com/prettier/prettier - rev: 2.1.2 + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v2.2.1 hooks: - id: prettier From 049a86632ba5296a4e544197cc2f3d4a71777d7b Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 20 Jan 2021 07:28:17 +0100 Subject: [PATCH 08/15] pre-commit: rename config file for readability --- .github/workflows/test-chart.yaml | 2 +- ...commit-shellcheck.yaml => .pre-commit-config-shellcheck.yaml | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename .pre-commit-shellcheck.yaml => .pre-commit-config-shellcheck.yaml (100%) diff --git a/.github/workflows/test-chart.yaml b/.github/workflows/test-chart.yaml index 8ce9b7d442..298a30bc86 100644 --- a/.github/workflows/test-chart.yaml +++ b/.github/workflows/test-chart.yaml @@ -42,7 +42,7 @@ jobs: - uses: pre-commit/action@v2.0.0 - uses: pre-commit/action@v2.0.0 with: - extra_args: --config .pre-commit-shellcheck.yaml + extra_args: --config .pre-commit-config-shellcheck.yaml - name: Lint and validate run: tools/templates/lint-and-validate.py diff --git a/.pre-commit-shellcheck.yaml b/.pre-commit-config-shellcheck.yaml similarity index 100% rename from .pre-commit-shellcheck.yaml rename to .pre-commit-config-shellcheck.yaml From 3357c427e650aaef9e06bf54d2f5d46d2e75a684 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 20 Jan 2021 07:45:16 +0100 Subject: [PATCH 09/15] pre-commit: update comments --- .pre-commit-config-shellcheck.yaml | 1 + .pre-commit-config.yaml | 41 +++++++++++++++++++----------- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/.pre-commit-config-shellcheck.yaml b/.pre-commit-config-shellcheck.yaml index 3a4333e4c7..1fdd1bd16c 100644 --- a/.pre-commit-config-shellcheck.yaml +++ b/.pre-commit-config-shellcheck.yaml @@ -1,3 +1,4 @@ +# See .pre-commit-config.yaml for more details. repos: - repo: https://github.com/gruntwork-io/pre-commit rev: v0.1.10 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 24d72a50b0..8060a0721a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,22 +1,44 @@ -# pre-commit is a tool to automatically do tasks before committing. +# pre-commit is a tool to perform a predefined set of tasks manually and/or +# automatically before git commits are made. # # Config reference: https://pre-commit.com/#pre-commit-configyaml---top-level # +# Common tasks +# +# - Run on all files: pre-commit run --all-files +# - Register git hooks: pre-commit install --install-hooks +# +# About bash scripts autoformatting +# +# All hooks in this file can automatically be installed by pre-commit, but the +# hook we want to use for bash script formatting, shellcheck, must be manually +# installed. If needed, install it and run it using the link and snippet below. +# +# https://github.com/koalaman/shellcheck#installing +# +# pre-commit run --config .pre-commit-config-shellcheck.yaml --all-files +# repos: - # Python code formatting + # Autoformat: Python code - repo: https://github.com/ambv/black rev: 20.8b1 hooks: - id: black args: [--target-version=py36] - # Shell script code formatting + # Autoformat: Bash scripts - repo: https://github.com/lovesegfault/beautysh rev: 6.0.1 hooks: - id: beautysh - # Reset changes by chartpress + # Autoformat: markdown, yaml (but not helm templates) + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v2.2.1 + hooks: + - id: prettier + + # Reset Chart.yaml version and values.yaml image tags - repo: local hooks: - id: chartpress @@ -32,14 +54,3 @@ repos: rev: v0.1.10 hooks: - id: helmlint - - # Autoformat multiple file types - - repo: https://github.com/pre-commit/mirrors-prettier - rev: v2.2.1 - hooks: - - id: prettier - -# The hooks in this file are automatically installed by pre-commit, but shellcheck -# must be manually installed so it is included in .pre-commit-shellcheck.yaml -# instead. To run it: -# pre-commit run --config .pre-commit-shellcheck.yaml --all-files From 66e420a59f8e14efb2b0c410f96f2ef9fdcc6eb8 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 20 Jan 2021 07:46:05 +0100 Subject: [PATCH 10/15] pre-commit: bump shellcheck --- .pre-commit-config-shellcheck.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config-shellcheck.yaml b/.pre-commit-config-shellcheck.yaml index 1fdd1bd16c..68a0c352f2 100644 --- a/.pre-commit-config-shellcheck.yaml +++ b/.pre-commit-config-shellcheck.yaml @@ -1,6 +1,6 @@ # See .pre-commit-config.yaml for more details. repos: - repo: https://github.com/gruntwork-io/pre-commit - rev: v0.1.10 + rev: v0.1.12 hooks: - id: shellcheck From 9a902ea0377285ceb64ff440193c889012e33840 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 20 Jan 2021 08:08:31 +0100 Subject: [PATCH 11/15] pre-commit: allow prettier to format markdown --- .prettierignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.prettierignore b/.prettierignore index 4453d15a0b..9ad205279b 100644 --- a/.prettierignore +++ b/.prettierignore @@ -1,2 +1 @@ jupyterhub/templates/ -*.md From e2d1471276be40fc6a29071643c39c8a912d094a Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 20 Jan 2021 09:46:02 +0100 Subject: [PATCH 12/15] bump required chartpress version in dev-requirements.txt 1.0.4 contains a bugfix making chartpress no longer forcefully wrap image tags set in values.yaml with single quotes. --- dev-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-requirements.txt b/dev-requirements.txt index accff8f726..ad2cf86d0b 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -5,7 +5,7 @@ # # ref: https://github.com/jupyterhub/chartpress # -chartpress +chartpress>=1.0.4 # pytest run tests that require requests and pyyaml pytest>=3.7.1 From 1ba382c60122b5f272e5ef555358c98bbe51766f Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 20 Jan 2021 09:55:31 +0100 Subject: [PATCH 13/15] prettier autoformat: css --- doc/source/_static/custom.css | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/_static/custom.css b/doc/source/_static/custom.css index e55a6a55eb..5da1156171 100644 --- a/doc/source/_static/custom.css +++ b/doc/source/_static/custom.css @@ -1,4 +1,4 @@ -/* Added to avoid logo being too squeezed */ -.navbar-brand { - height: 4rem !important; -} +/* Added to avoid logo being too squeezed */ +.navbar-brand { + height: 4rem !important; +} From cd89b41763ac0f47e2fe719e3e9a07f0114472db Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 20 Jan 2021 09:56:23 +0100 Subject: [PATCH 14/15] prettier autoformat: yaml --- .github/dependabot.yaml | 2 +- .github/workflows/publish.yml | 3 +- .github/workflows/test-chart.yaml | 6 +- .github/workflows/test-docs.yaml | 3 +- .github/workflows/vuln-scan.yaml | 17 ++--- dev-config.yaml | 18 ++--- jupyterhub/Chart.yaml | 4 +- .../files/userscheduler-defaultpolicy.yaml | 51 ++++++------- jupyterhub/schema.yaml | 32 ++++---- jupyterhub/values.yaml | 73 +++++++++---------- tools/templates/lint-and-validate-values.yaml | 68 ++++++++--------- tools/templates/yamllint-config.yaml | 13 ++-- 12 files changed, 133 insertions(+), 157 deletions(-) diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 75a54b3f8b..81dbf0631a 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -37,7 +37,7 @@ updates: # Maintain dependencies in our GitHub Workflows - package-ecosystem: "github-actions" - directory: "/" # This should be / rather than .github/workflows + directory: "/" # This should be / rather than .github/workflows schedule: interval: daily time: "05:00" diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 2eee261339..5c4bfd6e86 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -9,7 +9,6 @@ on: branches: ["main", "master"] tags: ["[0-9]+.[0-9]+.[0-9]+*"] - jobs: # Builds and pushes docker images to DockerHub and package the Helm chart and # pushes it to jupyterhub/helm-chart@gh-pages where index.yaml represents the @@ -28,7 +27,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v2 with: - python-version: '3.8' + python-version: "3.8" - name: Install chart publishing dependencies (chartpress, helm) run: | diff --git a/.github/workflows/test-chart.yaml b/.github/workflows/test-chart.yaml index 298a30bc86..27c41fe354 100644 --- a/.github/workflows/test-chart.yaml +++ b/.github/workflows/test-chart.yaml @@ -23,7 +23,6 @@ on: - "dependabot/**" workflow_dispatch: - jobs: lint_and_validate: runs-on: ubuntu-20.04 @@ -31,7 +30,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 with: - python-version: '3.8' + python-version: "3.8" - name: Install dependencies run: | @@ -51,7 +50,6 @@ jobs: run: tools/templates/lint-and-validate.py --strict continue-on-error: true - test: runs-on: ubuntu-20.04 timeout-minutes: 20 @@ -117,7 +115,7 @@ jobs: # environment and setup in a fraction of a second. - uses: actions/setup-python@v2 with: - python-version: '3.8' + python-version: "3.8" # Install a local ACME server to fill the role of Let's Encrypt (LE). We # do this as the HTTP challenge sent out by an ACME server must be able to diff --git a/.github/workflows/test-docs.yaml b/.github/workflows/test-docs.yaml index b53ceb583b..c8dbdf4fea 100644 --- a/.github/workflows/test-docs.yaml +++ b/.github/workflows/test-docs.yaml @@ -20,7 +20,6 @@ on: - "dependabot/**" workflow_dispatch: - jobs: linkcheck: runs-on: ubuntu-20.04 @@ -33,7 +32,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v2 with: - python-version: '3.8' + python-version: "3.8" - name: Install deps run: pip install --no-cache-dir -r doc/doc-requirements.txt diff --git a/.github/workflows/vuln-scan.yaml b/.github/workflows/vuln-scan.yaml index e35922b908..51550e944e 100644 --- a/.github/workflows/vuln-scan.yaml +++ b/.github/workflows/vuln-scan.yaml @@ -14,7 +14,6 @@ on: - cron: "0 0 * * *" workflow_dispatch: - jobs: trivy_image_scan: if: github.repository == 'jupyterhub/zero-to-jupyterhub-k8s' @@ -45,7 +44,7 @@ jobs: mkdir ./tmp - uses: actions/setup-python@v2 with: - python-version: '3.8' + python-version: "3.8" - name: Install chartpress run: | pip install chartpress @@ -72,11 +71,11 @@ jobs: uses: aquasecurity/trivy-action@master with: image-ref: ${{ steps.image.outputs.spec }} - format: json # ref: https://github.com/aquasecurity/trivy#save-the-results-as-json + format: json # ref: https://github.com/aquasecurity/trivy#save-the-results-as-json output: tmp/scan_1.json ignore-unfixed: true - severity: 'CRITICAL,HIGH' - exit-code: '1' + severity: "CRITICAL,HIGH" + exit-code: "1" # Keep running the subsequent steps of the job, they are made to # explicitly adjust based on this step's outcome. continue-on-error: true @@ -96,10 +95,10 @@ jobs: uses: aquasecurity/trivy-action@master with: image-ref: rebuilt-image - format: json # ref: https://github.com/aquasecurity/trivy#save-the-results-as-json + format: json # ref: https://github.com/aquasecurity/trivy#save-the-results-as-json output: tmp/scan_2.json ignore-unfixed: true - severity: 'CRITICAL,HIGH' + severity: "CRITICAL,HIGH" # Analyze the scan reports. If they differ, we want to proceed and create # or update a PR. We use a hash from the final scan report as an @@ -156,7 +155,7 @@ jobs: image-ref: rebuilt-image format: table ignore-unfixed: true - severity: 'CRITICAL,HIGH' + severity: "CRITICAL,HIGH" - name: Decision to not proceed if: steps.analyze.outputs.proceed == 'no' @@ -192,7 +191,7 @@ jobs: title: "Vulnerability patch in ${{ matrix.image_ref }}" body: | A rebuild of `${{ steps.image.outputs.name }}` has been found to influence the detected vulnerabilities! This PR will trigger a rebuild because it has updated a comment in the Dockerfile. - + ## About This scan for known vulnerabilities has been made by [aquasecurity/trivy](https://github.com/aquasecurity/trivy). Trivy was configured to filter the vulnerabilities with the following settings: - severity: `CRITICAL,HIGH` diff --git a/dev-config.yaml b/dev-config.yaml index 80e0cf0dc8..f30a372c87 100644 --- a/dev-config.yaml +++ b/dev-config.yaml @@ -11,7 +11,7 @@ proxy: hosts: - local.jovyan.org letsencrypt: - contactEmail: 'jovyan@jupyter.test' + contactEmail: "jovyan@jupyter.test" acmeServer: https://pebble/dir traefik: extraVolumes: @@ -30,7 +30,7 @@ proxy: memory: 0 cpu: 0 networkPolicy: - egress: [] # overrides allowance of 0.0.0.0/0 + egress: [] # overrides allowance of 0.0.0.0/0 hub: cookieSecret: cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc @@ -41,11 +41,11 @@ hub: admin: true apiToken: ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss networkPolicy: - egress: # overrides allowance of 0.0.0.0/0 + egress: # overrides allowance of 0.0.0.0/0 # In kind/k3s clusters the Kubernetes API server is exposing this port - ports: - - protocol: TCP - port: 6443 + - protocol: TCP + port: 6443 resources: requests: memory: 0 @@ -60,10 +60,10 @@ singleuser: # For testing purposes in test_singleuser_netpol egress: - to: - # jupyter.org has multiple IPs associated with it, among them are these - # two. We explicitly allow access to one, but leave out the the other. - - ipBlock: - cidr: 104.28.8.110/32 + # jupyter.org has multiple IPs associated with it, among them are these + # two. We explicitly allow access to one, but leave out the the other. + - ipBlock: + cidr: 104.28.8.110/32 # - ipBlock: # cidr: 104.28.9.110/32 extraEnv: diff --git a/jupyterhub/Chart.yaml b/jupyterhub/Chart.yaml index 546f1972f9..140066790f 100644 --- a/jupyterhub/Chart.yaml +++ b/jupyterhub/Chart.yaml @@ -9,8 +9,8 @@ home: https://z2jh.jupyter.org sources: - https://github.com/jupyterhub/zero-to-jupyterhub-k8s icon: https://jupyter.org/assets/hublogo.svg -kubeVersion: '>=1.14.0-0' -tillerVersion: '>=2.16.0-0' +kubeVersion: ">=1.14.0-0" +tillerVersion: ">=2.16.0-0" maintainers: # Since it is a requirement of Artifact Hub to have specific maintainers # listed, we have added some below, but in practice the entire JupyterHub team diff --git a/jupyterhub/files/userscheduler-defaultpolicy.yaml b/jupyterhub/files/userscheduler-defaultpolicy.yaml index 455a9abeee..0bf55061cb 100644 --- a/jupyterhub/files/userscheduler-defaultpolicy.yaml +++ b/jupyterhub/files/userscheduler-defaultpolicy.yaml @@ -1,32 +1,33 @@ { "kind": "Policy", "apiVersion": "v1", - "predicates": [ - { "name": "PodFitsResources" }, - { "name": "HostName" }, - { "name": "PodFitsHostPorts" }, - { "name": "MatchNodeSelector" }, - { "name": "NoDiskConflict" }, - { "name": "PodToleratesNodeTaints" }, - { "name": "MaxEBSVolumeCount" }, - { "name": "MaxGCEPDVolumeCount" }, - { "name": "MaxAzureDiskVolumeCount" }, - { "name": "MaxCSIVolumeCountPred" }, - { "name": "CheckVolumeBinding" }, - { "name": "NoVolumeZoneConflict" }, - { "name": "MatchInterPodAffinity" } - ], - "priorities": [ - { "name": "NodePreferAvoidPodsPriority", "weight": 161051 }, - { "name": "NodeAffinityPriority", "weight": 14641 }, - { "name": "InterPodAffinityPriority", "weight": 1331 }, - { "name": "MostRequestedPriority", "weight": 121 }, - { "name": "ImageLocalityPriority", "weight": 11} - ], - "hardPodAffinitySymmetricWeight" : 100, - "alwaysCheckAllPredicates" : false + "predicates": + [ + { "name": "PodFitsResources" }, + { "name": "HostName" }, + { "name": "PodFitsHostPorts" }, + { "name": "MatchNodeSelector" }, + { "name": "NoDiskConflict" }, + { "name": "PodToleratesNodeTaints" }, + { "name": "MaxEBSVolumeCount" }, + { "name": "MaxGCEPDVolumeCount" }, + { "name": "MaxAzureDiskVolumeCount" }, + { "name": "MaxCSIVolumeCountPred" }, + { "name": "CheckVolumeBinding" }, + { "name": "NoVolumeZoneConflict" }, + { "name": "MatchInterPodAffinity" }, + ], + "priorities": + [ + { "name": "NodePreferAvoidPodsPriority", "weight": 161051 }, + { "name": "NodeAffinityPriority", "weight": 14641 }, + { "name": "InterPodAffinityPriority", "weight": 1331 }, + { "name": "MostRequestedPriority", "weight": 121 }, + { "name": "ImageLocalityPriority", "weight": 11 }, + ], + "hardPodAffinitySymmetricWeight": 100, + "alwaysCheckAllPredicates": false, } - # # Notes about ranges # ImageLocalityPriority - ranges from 0-10 * 11 # MostRequestedPriority - ranges from 0-10 * 11^2 diff --git a/jupyterhub/schema.yaml b/jupyterhub/schema.yaml index 45b631fc9e..f60368df97 100644 --- a/jupyterhub/schema.yaml +++ b/jupyterhub/schema.yaml @@ -28,7 +28,7 @@ properties: avoid hardcoding names. If you want to reference the name of a resource in this chart from a parent helm chart's template, you can make use of the global named templates instead. - + ```yaml # some pod definition of a parent chart helm template schedulerName: {{ include "jupyterhub.user-scheduler.fullname" . }} @@ -252,7 +252,7 @@ properties: diagnose, and can due to this could cause a lot of time expenditure for both the community maintaining the Helm chart as well as yourself, even if this wasn't the reason for the issue. - + Due to this, we ask that you do your _absolute best to avoid replacing the default provided `jupyterhub_config.py` file. It can often be possible. For example, if your goal is to have a dedicated .py file @@ -339,7 +339,7 @@ properties: description: | A list of references to existing Kubernetes Secrets with credentials to pull the image. - + This Pod's final `imagePullSecrets` k8s specification will be a combination of: @@ -351,12 +351,12 @@ properties: conditionally created from image registry credentials provided under `imagePullSecret` if `imagePullSecret.create` is set to true. - + ```yaml # example - k8s native syntax pullSecrets: - name: my-k8s-secret-with-image-registry-credentials - + # example - simplified syntax pullSecrets: - my-k8s-secret-with-image-registry-credentials @@ -409,7 +409,7 @@ properties: what the origin of the request is. The default setting for `proxy.chp` and `proxy.traefik`'s networkPolicy configuration is `[http, https]`, while it is `[]` for other networkPolicies. - + Note that these port names or numbers target a Pod's port name or number, not a k8s Service's port name or number. db: @@ -540,7 +540,7 @@ properties: type: list description: | list of initContainers to be run with hub pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) - + ```yaml hub: initContainers: @@ -625,8 +625,7 @@ properties: uid: type: integer minimum: 0 - description: - The UID the hub process should be running as. + description: The UID the hub process should be running as. Use this only if you are building your own image & know that a user with this uid exists inside the hub container! Advanced feature, handle with care! @@ -763,7 +762,6 @@ properties: docs](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) for more info. - proxy: type: object properties: @@ -861,7 +859,7 @@ properties: `proxy` pod, a k8s Service named `proxy-http` will be added targeting the `proxy` pod and only accepting HTTP traffic on port 80. properties: - type: + type: type: string enum: - ClusterIP @@ -919,7 +917,7 @@ properties: be exposed on. This entry will end up at the configurable proxy server that JupyterHub manages, which will direct traffic to user pods at the `/user` path and the hub pod at the `/hub` path. - + Set this if you want to use a fixed external IP address instead of a dynamically acquired one. This is relevant if you have a domain name that you want to point to a specific IP and want to ensure it @@ -1016,7 +1014,7 @@ properties: ``` hosts: - - ``` + ``` traefik: type: object description: | @@ -1072,7 +1070,6 @@ properties: required: - secretToken - singleuser: type: object description: | @@ -1101,7 +1098,7 @@ properties: type: object description: | Set Memory limits & guarantees that are enforced for each user. - + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for more info. properties: @@ -1120,7 +1117,7 @@ properties: type: list description: | list of initContainers to be run every singleuser pod. See [Kubernetes Docs](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) - + ```yaml singleuser: initContainers: @@ -1208,7 +1205,7 @@ properties: certain label (node affinity). They may also require to be scheduled in proximity or with a lack of proximity to another pod (pod affinity and anti pod affinity). - + See the [Kubernetes docs](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for more info. @@ -1260,7 +1257,6 @@ properties: [`WeightedPodAffinityTerm`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#weightedpodaffinityterm-v1-core) objects. - scheduling: type: object description: | diff --git a/jupyterhub/values.yaml b/jupyterhub/values.yaml index 9f8e3ef857..1451eeb3db 100644 --- a/jupyterhub/values.yaml +++ b/jupyterhub/values.yaml @@ -12,15 +12,14 @@ custom: {} imagePullSecret: create: false automaticReferenceInjection: true - registry: '' - username: '' - email: '' - password: '' + registry: "" + username: "" + email: "" + password: "" # imagePullSecrets is configuration to reference the k8s Secret resources the # Helm chart's pods can get credentials from to pull their images. imagePullSecrets: [] - # hub relates to the hub pod, responsible for running JupyterHub, its configured # Authenticator class KubeSpawner, and its configured Proxy class # ConfigurableHTTPProxy. KubeSpawner creates the user pods, and @@ -80,8 +79,8 @@ hub: extraVolumeMounts: [] image: name: jupyterhub/k8s-hub - tag: 'set-by-chartpress' - pullPolicy: '' + tag: "set-by-chartpress" + pullPolicy: "" pullSecrets: [] resources: requests: @@ -140,11 +139,10 @@ hub: rbac: enabled: true - # proxy relates to the proxy pod, the proxy-public service, and the autohttps # pod and proxy-http service. proxy: - secretToken: '' + secretToken: "" annotations: {} deploymentStrategy: ## type: Recreate @@ -187,13 +185,13 @@ proxy: # on dynamic configuration sent from JupyterHub to CHP's REST API. chp: containerSecurityContext: - runAsUser: 65534 # nobody user + runAsUser: 65534 # nobody user runAsGroup: 65534 # nobody group allowPrivilegeEscalation: false image: name: jupyterhub/configurable-http-proxy tag: 4.2.2 - pullPolicy: '' + pullPolicy: "" pullSecrets: [] extraCommandLineFlags: [] livenessProbe: @@ -228,7 +226,7 @@ proxy: # termination when proxy.https.type=letsencrypt. traefik: containerSecurityContext: - runAsUser: 65534 # nobody user + runAsUser: 65534 # nobody user runAsGroup: 65534 # nobody group allowPrivilegeEscalation: false image: @@ -239,7 +237,7 @@ proxy: # past. For example: # https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1857. tag: v2.3.7 # ref: https://hub.docker.com/_/traefik?tab=tags - pullPolicy: '' + pullPolicy: "" pullSecrets: [] hsts: includeSubdomains: false @@ -268,13 +266,13 @@ proxy: minAvailable: 1 secretSync: containerSecurityContext: - runAsUser: 65534 # nobody user + runAsUser: 65534 # nobody user runAsGroup: 65534 # nobody group allowPrivilegeEscalation: false image: name: jupyterhub/k8s-secret-sync - tag: 'set-by-chartpress' - pullPolicy: '' + tag: "set-by-chartpress" + pullPolicy: "" pullSecrets: [] resources: {} labels: {} @@ -283,19 +281,18 @@ proxy: type: letsencrypt #type: letsencrypt, manual, offload, secret letsencrypt: - contactEmail: '' + contactEmail: "" # Specify custom server here (https://acme-staging-v02.api.letsencrypt.org/directory) to hit staging LE acmeServer: https://acme-v02.api.letsencrypt.org/directory manual: key: cert: secret: - name: '' + name: "" key: tls.key crt: tls.crt hosts: [] - # singleuser relates to the configuration of KubeSpawner which runs in the hub # pod, and its spawning of user pods such as jupyter-myusername. singleuser: @@ -314,8 +311,8 @@ singleuser: networkTools: image: name: jupyterhub/k8s-network-tools - tag: 'set-by-chartpress' - pullPolicy: '' + tag: "set-by-chartpress" + pullPolicy: "" pullSecrets: [] cloudMetadata: # block set to true will append a privileged initContainer using the @@ -342,7 +339,7 @@ singleuser: events: true extraAnnotations: {} extraLabels: - hub.jupyter.org/network-access-hub: 'true' + hub.jupyter.org/network-access-hub: "true" extraEnv: {} lifecycleHooks: {} initContainers: [] @@ -357,7 +354,7 @@ singleuser: extraVolumeMounts: [] static: pvcName: - subPath: '{username}' + subPath: "{username}" capacity: 10Gi homeMountPath: /home/jovyan dynamic: @@ -367,8 +364,8 @@ singleuser: storageAccessModes: [ReadWriteOnce] image: name: jupyterhub/k8s-singleuser-sample - tag: 'set-by-chartpress' - pullPolicy: '' + tag: "set-by-chartpress" + pullPolicy: "" pullSecrets: [] startTimeout: 300 cpu: @@ -385,7 +382,6 @@ singleuser: extraPodConfig: {} profileList: [] - # scheduling relates to the user-scheduler pods and user-placeholder pods. scheduling: userScheduler: @@ -419,7 +415,7 @@ scheduling: - name: ImageLocality weight: 11 containerSecurityContext: - runAsUser: 65534 # nobody user + runAsUser: 65534 # nobody user runAsGroup: 65534 # nobody group allowPrivilegeEscalation: false image: @@ -428,7 +424,7 @@ scheduling: # that we have forked. name: k8s.gcr.io/kube-scheduler tag: v1.19.7 - pullPolicy: '' + pullPolicy: "" pullSecrets: [] nodeSelector: {} tolerations: [] @@ -448,7 +444,7 @@ scheduling: enabled: true replicas: 0 containerSecurityContext: - runAsUser: 65534 # nobody user + runAsUser: 65534 # nobody user runAsGroup: 65534 # nobody group allowPrivilegeEscalation: false corePods: @@ -458,7 +454,6 @@ scheduling: nodeAffinity: matchNodePurpose: prefer - # prePuller relates to the hook|continuous-image-puller DaemonsSets prePuller: annotations: {} @@ -467,7 +462,7 @@ prePuller: cpu: 0 memory: 0 containerSecurityContext: - runAsUser: 65534 # nobody user + runAsUser: 65534 # nobody user runAsGroup: 65534 # nobody group allowPrivilegeEscalation: false extraTolerations: [] @@ -477,11 +472,11 @@ prePuller: # image and the configuration below relates to the hook-image-awaiter Job image: name: jupyterhub/k8s-image-awaiter - tag: 'set-by-chartpress' - pullPolicy: '' + tag: "set-by-chartpress" + pullPolicy: "" pullSecrets: [] containerSecurityContext: - runAsUser: 65534 # nobody user + runAsUser: 65534 # nobody user runAsGroup: 65534 # nobody group allowPrivilegeEscalation: false podSchedulingWaitDuration: 10 @@ -497,23 +492,22 @@ prePuller: extraImages: {} pause: containerSecurityContext: - runAsUser: 65534 # nobody user + runAsUser: 65534 # nobody user runAsGroup: 65534 # nobody group allowPrivilegeEscalation: false image: name: k8s.gcr.io/pause - tag: '3.2' # https://console.cloud.google.com/gcr/images/google-containers/GLOBAL/pause?gcrImageListsize=30 - pullPolicy: '' + tag: "3.2" # https://console.cloud.google.com/gcr/images/google-containers/GLOBAL/pause?gcrImageListsize=30 + pullPolicy: "" pullSecrets: [] ingress: enabled: false annotations: {} hosts: [] - pathSuffix: '' + pathSuffix: "" tls: [] - cull: enabled: true users: false @@ -523,7 +517,6 @@ cull: concurrency: 10 maxAge: 0 - debug: enabled: false diff --git a/tools/templates/lint-and-validate-values.yaml b/tools/templates/lint-and-validate-values.yaml index 5ba0d71061..14c6bd3307 100644 --- a/tools/templates/lint-and-validate-values.yaml +++ b/tools/templates/lint-and-validate-values.yaml @@ -5,7 +5,7 @@ imagePullSecret: username: U email: e@domain.com password: P -imagePullSecrets: [a,b] +imagePullSecrets: [a, b] hub: service: @@ -94,18 +94,16 @@ hub: value: "mock-taint-to-tolerates-value" effect: "NoExecute" - rbac: enabled: true - proxy: - secretToken: '0000000000000000000000000000000000000000000000000000000000000000' + secretToken: "0000000000000000000000000000000000000000000000000000000000000000" service: extraPorts: - - name: ssh - port: 22 - targetPort: ssh + - name: ssh + port: 22 + targetPort: ssh type: LoadBalancer labels: MOCK_PROXY_ENV: mock @@ -145,8 +143,8 @@ proxy: enabled: true traefik: extraPorts: - - name: ssh - containerPort: 8022 + - name: ssh + containerPort: 8022 labels: hub.jupyter.org/test-label: mock resources: @@ -196,17 +194,16 @@ proxy: type: letsencrypt #type: letsencrypt, manual, offload, secret letsencrypt: - contactEmail: 'e@domain.com' + contactEmail: "e@domain.com" manual: key: mock-key cert: mock-cert secret: - name: 'mock-secret-name' - key: 'mock-key' - crt: 'mock-crt' + name: "mock-secret-name" + key: "mock-key" + crt: "mock-crt" hosts: [domain.com] - singleuser: profileList: - display_name: "mock display name 1" @@ -228,49 +225,49 @@ singleuser: extraNodeAffinity: required: - matchExpressions: - - key: hub.jupyter.org/test-required-node - operator: In - values: [test] + - key: hub.jupyter.org/test-required-node + operator: In + values: [test] preferred: - weight: 10 preference: matchExpressions: - - key: hub.jupyter.org/test-preferred-node - operator: In - values: [test] + - key: hub.jupyter.org/test-preferred-node + operator: In + values: [test] extraPodAffinity: required: - labelSelector: matchExpressions: - - key: hub.jupyter.org/test-required-pod - operator: In - values: [test] + - key: hub.jupyter.org/test-required-pod + operator: In + values: [test] topologyKey: failure-domain.beta.kubernetes.io/zone preferred: - weight: 10 podAffinityTerm: labelSelector: matchExpressions: - - key: hub.jupyter.org/test-preferred-pod - operator: In - values: [test] + - key: hub.jupyter.org/test-preferred-pod + operator: In + values: [test] topologyKey: kubernetes.io/hostname extraPodAntiAffinity: required: - labelSelector: matchExpressions: - - key: hub.jupyter.org/test-required-anti-pod - operator: In - values: [test] + - key: hub.jupyter.org/test-required-anti-pod + operator: In + values: [test] topologyKey: failure-domain.beta.kubernetes.io/zone preferred: - weight: 10 podAffinityTerm: labelSelector: matchExpressions: - - key: hub.jupyter.org/test-preferred-anti-pod - operator: In - values: [test] + - key: hub.jupyter.org/test-preferred-anti-pod + operator: In + values: [test] topologyKey: kubernetes.io/hostname cloudMetadata: blockWithIptables: true @@ -308,7 +305,7 @@ singleuser: extraVolumeMounts: [] static: pvcName: - subPath: '{username}' + subPath: "{username}" capacity: 10Gi homeMountPath: /home/jovyan dynamic: @@ -331,7 +328,6 @@ singleuser: cmd: jupyterhub-singleuser defaultUrl: / - scheduling: userScheduler: enabled: true @@ -363,7 +359,6 @@ scheduling: nodeAffinity: matchNodePurpose: require - prePuller: extraTolerations: - key: "mock-taint-to-tolerates-key" @@ -399,7 +394,6 @@ prePuller: name: mock-user/mock-image2 tag: mock-tag - ingress: enabled: true annotations: @@ -413,7 +407,6 @@ ingress: - mocked1.domain.name - mocked2.domain.name - cull: enabled: true users: true @@ -422,6 +415,5 @@ cull: concurrency: 10 maxAge: 28800 - debug: enabled: true diff --git a/tools/templates/yamllint-config.yaml b/tools/templates/yamllint-config.yaml index f86d35a644..452b236c01 100644 --- a/tools/templates/yamllint-config.yaml +++ b/tools/templates/yamllint-config.yaml @@ -17,11 +17,11 @@ rules: min-spaces-after: 0 max-spaces-after: 1 comments: - require-starting-space: false # Default: true (*) + require-starting-space: false # Default: true (*) min-spaces-from-content: 2 comments-indentation: {} document-end: disable - document-start: disable # Default: { present: true } + document-start: disable # Default: { present: true } empty-lines: max: 2 max-start: 0 @@ -32,12 +32,12 @@ rules: hyphens: max-spaces-after: 1 indentation: - spaces: 2 # Default: consistent - indent-sequences: whatever # Default true (**) + spaces: 2 # Default: consistent + indent-sequences: whatever # Default true (**) check-multi-line-strings: false key-duplicates: enable key-ordering: disable - line-length: disable # Default: { max: 80, ... } + line-length: disable # Default: { max: 80, ... } new-line-at-end-of-file: enable new-lines: type: unix @@ -47,7 +47,6 @@ rules: trailing-spaces: disable truthy: level: warning - # (*) Until we can use PR: https://github.com/kubernetes/helm/pull/3811 # (**) toYaml forces us to use false and be indent different amounts of spaced @@ -65,4 +64,4 @@ rules: # myList: # {{- .Values.list | toYaml | trimSuffix "\n" | nindent 0 }} # myDict: -# {{- .Values.dict | toYaml | trimSuffix "\n" | nindent 2 }} \ No newline at end of file +# {{- .Values.dict | toYaml | trimSuffix "\n" | nindent 2 }} From 6ee24df1f7d0aa8a2226eef90584ac9ebfd0b508 Mon Sep 17 00:00:00 2001 From: Erik Sundell Date: Wed, 20 Jan 2021 10:08:35 +0100 Subject: [PATCH 15/15] prettier autoformat: markdown --- CHANGELOG.md | 824 +++++++++--------- CONTRIBUTING.md | 50 +- README.md | 3 +- RELEASE.md | 8 +- doc/source/administrator/advanced.md | 13 +- doc/source/administrator/authentication.md | 13 +- doc/source/administrator/cost.md | 10 +- doc/source/administrator/debug.md | 5 +- doc/source/administrator/optimization.md | 206 ++--- doc/source/administrator/security.md | 52 +- doc/source/administrator/troubleshooting.md | 2 - doc/source/administrator/upgrading.md | 2 +- doc/source/index.md | 32 +- .../customizing/extending-jupyterhub.md | 7 +- .../customizing/user-environment.md | 43 +- .../jupyterhub/customizing/user-management.md | 6 +- .../jupyterhub/customizing/user-resources.md | 30 +- .../jupyterhub/customizing/user-storage.md | 14 +- doc/source/jupyterhub/installation.md | 45 +- doc/source/jupyterhub/uninstall.md | 10 +- doc/source/kubernetes/amazon/efs_storage.md | 45 +- .../kubernetes/amazon/step-zero-aws-eks.md | 79 +- doc/source/kubernetes/amazon/step-zero-aws.md | 56 +- .../digital-ocean/step-zero-digital-ocean.md | 8 +- doc/source/kubernetes/google/step-zero-gcp.md | 38 +- doc/source/kubernetes/ibm/step-zero-ibm.md | 51 +- .../microsoft/step-zero-azure-autoscale.md | 77 +- .../kubernetes/microsoft/step-zero-azure.md | 65 +- doc/source/kubernetes/ovh/step-zero-ovh.md | 28 +- doc/source/kubernetes/setup-helm.md | 6 +- doc/source/kubernetes/setup-helm2.md | 9 +- doc/source/repo2docker.md | 35 +- doc/source/resources/community.md | 10 +- doc/source/resources/glossary.md | 2 +- doc/source/resources/reference-docs.md | 6 +- doc/source/resources/tools.md | 14 +- images/image-awaiter/README.md | 4 +- images/singleuser-sample/README.md | 3 +- 38 files changed, 995 insertions(+), 916 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc2f14444f..739647adfd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,6 @@ Here you can find upgrade changes in between releases and upgrade instructions. - ## [0.11] ### [0.11.1] - 2021-01-15 @@ -12,11 +11,11 @@ jupyterhub-nativeauthenticator from 0.0.6 to 0.0.7. #### Bugs fixed -* fix: fix of ingress regression and improved testing ([@consideRatio](https://github.com/consideRatio)) +- fix: fix of ingress regression and improved testing ([@consideRatio](https://github.com/consideRatio)) #### Maintenance and upkeep improvements -* build(deps): bump jupyterhub-nativeauthenticator from 0.0.6 to 0.0.7 in /images/hub [#1988](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1988) ([@dependabot](https://github.com/dependabot)) +- build(deps): bump jupyterhub-nativeauthenticator from 0.0.6 to 0.0.7 in /images/hub [#1988](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1988) ([@dependabot](https://github.com/dependabot)) ### [0.11.0] - 2021-01-14 @@ -43,7 +42,7 @@ to share insights that can be useful to others. #### Breaking changes -- __`auth` configuration moves to `hub.config` - [#1943](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1943)__ +- **`auth` configuration moves to `hub.config` - [#1943](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1943)** Helm chart configuration under `auth` is now no longer supported. If you make a `helm upgrade` using `auth` configuration, the upgrade will abort before any @@ -53,7 +52,8 @@ to share insights that can be useful to others. By default, the printed equivalent configuration is censored as it can contain secrets that shouldn't be exposed. By passing `--global.safeToShowValues=true` you can get an uncensored version. -- __Pod Disruption Budget's now disabled by default - [#1938](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1938)__ + +- **Pod Disruption Budget's now disabled by default - [#1938](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1938)** A Pod Disruption Budget (PDB) for the hub and proxy pods were created by default before, but will by default not be created from now on. The @@ -64,78 +64,78 @@ to share insights that can be useful to others. #### Notable dependencies updated -Dependency | Version in 0.10.6 | Version in 0.11.0 | Changelog link | Note --|-|-|-|- -[jupyterhub](https://github.com/jupyterhub/jupyterhub) | 1.2.2 | 1.3.0 | [Changelog](https://jupyterhub.readthedocs.io/en/stable/changelog.html) | Run in the `hub` pod -[kubespawner](https://github.com/jupyterhub/kubespawner) | 0.14.1 | 0.15.0 | [Changelog](https://jupyterhub-kubespawner.readthedocs.io/en/latest/changelog.html) | Run in the `hub` pod -[oauthenticator](https://github.com/jupyterhub/oauthenticator) | 0.12.1 | 0.12.3 | [Changelog](https://oauthenticator.readthedocs.io/en/latest/changelog.html) | Run in the `hub` pod -[ldapauthenticator](https://github.com/jupyterhub/ldapauthenticator) | 1.3.2 | 1.3.2 | [Changelog](https://github.com/jupyterhub/ldapauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod -[ltiauthenticator](https://github.com/jupyterhub/ltiauthenticator) | 0.4.0 | 1.0.0 | [Changelog](https://github.com/jupyterhub/ltiauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod -[nativeauthenticator](https://github.com/jupyterhub/nativeauthenticator) | 0.0.6 | 0.0.6 | [Changelog](https://github.com/jupyterhub/nativeauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod -[jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler) | 1.0 | 1.0 | - | Run in the `hub` pod -[configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy) | 4.2.2 | 4.2.2 | [Changelog](https://github.com/jupyterhub/configurable-http-proxy/blob/master/CHANGELOG.md) | Run in the `proxy` pod -[traefik](https://github.com/traefik/traefik) | v2.3.2 | v2.3.7 | [Changelog](https://github.com/traefik/traefik/blob/master/CHANGELOG.md) | Run in the `autohttps` pod -[kube-scheduler](https://github.com/kubernetes/kube-scheduler) | v1.19.2 | v1.19.7 | - | Run in the `user-scheduler` pod(s) +| Dependency | Version in 0.10.6 | Version in 0.11.0 | Changelog link | Note | +| -------------------------------------------------------------------------------- | ----------------- | ----------------- | ------------------------------------------------------------------------------------------- | ---------------------------------- | +| [jupyterhub](https://github.com/jupyterhub/jupyterhub) | 1.2.2 | 1.3.0 | [Changelog](https://jupyterhub.readthedocs.io/en/stable/changelog.html) | Run in the `hub` pod | +| [kubespawner](https://github.com/jupyterhub/kubespawner) | 0.14.1 | 0.15.0 | [Changelog](https://jupyterhub-kubespawner.readthedocs.io/en/latest/changelog.html) | Run in the `hub` pod | +| [oauthenticator](https://github.com/jupyterhub/oauthenticator) | 0.12.1 | 0.12.3 | [Changelog](https://oauthenticator.readthedocs.io/en/latest/changelog.html) | Run in the `hub` pod | +| [ldapauthenticator](https://github.com/jupyterhub/ldapauthenticator) | 1.3.2 | 1.3.2 | [Changelog](https://github.com/jupyterhub/ldapauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod | +| [ltiauthenticator](https://github.com/jupyterhub/ltiauthenticator) | 0.4.0 | 1.0.0 | [Changelog](https://github.com/jupyterhub/ltiauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod | +| [nativeauthenticator](https://github.com/jupyterhub/nativeauthenticator) | 0.0.6 | 0.0.6 | [Changelog](https://github.com/jupyterhub/nativeauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod | +| [jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler) | 1.0 | 1.0 | - | Run in the `hub` pod | +| [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy) | 4.2.2 | 4.2.2 | [Changelog](https://github.com/jupyterhub/configurable-http-proxy/blob/master/CHANGELOG.md) | Run in the `proxy` pod | +| [traefik](https://github.com/traefik/traefik) | v2.3.2 | v2.3.7 | [Changelog](https://github.com/traefik/traefik/blob/master/CHANGELOG.md) | Run in the `autohttps` pod | +| [kube-scheduler](https://github.com/kubernetes/kube-scheduler) | v1.19.2 | v1.19.7 | - | Run in the `user-scheduler` pod(s) | For a detailed list of how Python dependencies have change in the `hub` Pod's Docker image, inspect the [images/hub/requirements.txt](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/images/hub/requirements.txt) file. #### Enhancements made -* ci: automatically scan and patch our images for known vulnerabilities [#1942](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1942) ([@consideRatio](https://github.com/consideRatio)) +- ci: automatically scan and patch our images for known vulnerabilities [#1942](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1942) ([@consideRatio](https://github.com/consideRatio)) #### Bugs fixed -* Fix failure to block insecure metadata server IP [#1950](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1950) ([@consideRatio](https://github.com/consideRatio)) -* Enable hub livenessProbe by default and relax hub/proxy probes [#1941](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1941) ([@consideRatio](https://github.com/consideRatio)) -* Disable PDBs for hub/proxy, add PDB for autohttps, and relocate config proxy.pdb to proxy.chp.pdb [#1938](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1938) ([@consideRatio](https://github.com/consideRatio)) +- Fix failure to block insecure metadata server IP [#1950](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1950) ([@consideRatio](https://github.com/consideRatio)) +- Enable hub livenessProbe by default and relax hub/proxy probes [#1941](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1941) ([@consideRatio](https://github.com/consideRatio)) +- Disable PDBs for hub/proxy, add PDB for autohttps, and relocate config proxy.pdb to proxy.chp.pdb [#1938](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1938) ([@consideRatio](https://github.com/consideRatio)) #### Maintenance and upkeep improvements -* dep: bump traefik (autohttps pod) from v2.3.2 to v2.3.7 [#1986](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1986) ([@consideRatio](https://github.com/consideRatio)) -* k8s: update Ingress / PriorityClass apiVersions [#1983](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1983) ([@consideRatio](https://github.com/consideRatio)) -* dep: bump kube-scheduler from 1.19.2 to 1.19.7 [#1981](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1981) ([@consideRatio](https://github.com/consideRatio)) -* singleuser-sample image: bump jupyerhub to 1.3.0 [#1961](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1961) ([@consideRatio](https://github.com/consideRatio)) -* build(deps): bump jupyterhub from 1.2.2 to 1.3.0 in /images/hub [#1959](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1959) ([@dependabot](https://github.com/dependabot)) -* Vulnerability patch in network-tools [#1947](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1947) ([@github-actions](https://github.com/github-actions)) -* hub image: bump jupyterhub-kubespawner from 0.14.1 to 0.15.0 in /images/hub [#1946](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1946) ([@dependabot](https://github.com/dependabot)) -* Helm template linting - remove extra space [#1945](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1945) ([@DArtagan](https://github.com/DArtagan)) -* hub image: bump jupyterhub-hmacauthenticator from 0.1 to 1.0 in /images/hub [#1944](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1944) ([@dependabot](https://github.com/dependabot)) -* add hub.config passthrough and use it for all auth config [#1943](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1943) ([@consideRatio](https://github.com/consideRatio)) -* hub image: bump ltiauthenticator to 1.0.0 and oauthenticator to 0.12.3 [#1932](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1932) ([@consideRatio](https://github.com/consideRatio)) -* bump oauthenticator to 0.12.2 [#1925](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1925) ([@minrk](https://github.com/minrk)) +- dep: bump traefik (autohttps pod) from v2.3.2 to v2.3.7 [#1986](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1986) ([@consideRatio](https://github.com/consideRatio)) +- k8s: update Ingress / PriorityClass apiVersions [#1983](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1983) ([@consideRatio](https://github.com/consideRatio)) +- dep: bump kube-scheduler from 1.19.2 to 1.19.7 [#1981](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1981) ([@consideRatio](https://github.com/consideRatio)) +- singleuser-sample image: bump jupyerhub to 1.3.0 [#1961](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1961) ([@consideRatio](https://github.com/consideRatio)) +- build(deps): bump jupyterhub from 1.2.2 to 1.3.0 in /images/hub [#1959](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1959) ([@dependabot](https://github.com/dependabot)) +- Vulnerability patch in network-tools [#1947](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1947) ([@github-actions](https://github.com/github-actions)) +- hub image: bump jupyterhub-kubespawner from 0.14.1 to 0.15.0 in /images/hub [#1946](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1946) ([@dependabot](https://github.com/dependabot)) +- Helm template linting - remove extra space [#1945](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1945) ([@DArtagan](https://github.com/DArtagan)) +- hub image: bump jupyterhub-hmacauthenticator from 0.1 to 1.0 in /images/hub [#1944](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1944) ([@dependabot](https://github.com/dependabot)) +- add hub.config passthrough and use it for all auth config [#1943](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1943) ([@consideRatio](https://github.com/consideRatio)) +- hub image: bump ltiauthenticator to 1.0.0 and oauthenticator to 0.12.3 [#1932](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1932) ([@consideRatio](https://github.com/consideRatio)) +- bump oauthenticator to 0.12.2 [#1925](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1925) ([@minrk](https://github.com/minrk)) #### Documentation improvements -* docs: 100% MyST Markdown [#1974](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1974) ([@consideRatio](https://github.com/consideRatio)) -* docs: remove unused config of esoteric sphinx builders [#1969](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1969) ([@consideRatio](https://github.com/consideRatio)) -* docs: fix the dynamically set version of chart/jupyterhub [#1968](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1968) ([@consideRatio](https://github.com/consideRatio)) -* Adds a linebreak [#1957](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1957) ([@arokem](https://github.com/arokem)) -* Fixes link to authentication guide from user-management.md [#1955](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1955) ([@arokem](https://github.com/arokem)) -* Adds cli command for finding the k8s version on Azure. [#1954](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1954) ([@arokem](https://github.com/arokem)) +- docs: 100% MyST Markdown [#1974](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1974) ([@consideRatio](https://github.com/consideRatio)) +- docs: remove unused config of esoteric sphinx builders [#1969](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1969) ([@consideRatio](https://github.com/consideRatio)) +- docs: fix the dynamically set version of chart/jupyterhub [#1968](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1968) ([@consideRatio](https://github.com/consideRatio)) +- Adds a linebreak [#1957](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1957) ([@arokem](https://github.com/arokem)) +- Fixes link to authentication guide from user-management.md [#1955](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1955) ([@arokem](https://github.com/arokem)) +- Adds cli command for finding the k8s version on Azure. [#1954](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1954) ([@arokem](https://github.com/arokem)) #### Continuous integration improvements -* ci: accept helm lint --strict failure, but ensure GitHub UI warns [#1985](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1985) ([@consideRatio](https://github.com/consideRatio)) -* ci: replace kubeval with helm template --validate [#1984](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1984) ([@consideRatio](https://github.com/consideRatio)) -* ci: use extracted github action for namespace report [#1980](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1980) ([@consideRatio](https://github.com/consideRatio)) -* ci: add another upgrade test and provide a template rendering diff [#1978](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1978) ([@consideRatio](https://github.com/consideRatio)) -* ci: linkcheck rework: avoid duplicated build, add colors, make it fail loud [#1976](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1976) ([@consideRatio](https://github.com/consideRatio)) -* ci: run tests conditionally on changed paths [#1975](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1975) ([@consideRatio](https://github.com/consideRatio)) -* ci: use k3s-channel instead of k3s-version [#1973](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1973) ([@consideRatio](https://github.com/consideRatio)) -* ci: full_namespace_report improvements for restartCount > 0 [#1971](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1971) ([@consideRatio](https://github.com/consideRatio)) -* pre-commit: chartpress --reset on Chart.yaml/values.yaml changes [#1970](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1970) ([@consideRatio](https://github.com/consideRatio)) -* ci: full_namespace_report function improved [#1967](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1967) ([@consideRatio](https://github.com/consideRatio)) -* ci: dependabot, add notes to config, fix singleuser-sample config [#1966](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1966) ([@consideRatio](https://github.com/consideRatio)) -* ci: let pytest keep running even if one test has failed [#1965](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1965) ([@consideRatio](https://github.com/consideRatio)) -* ci: help dependabot only trigger one set of tests [#1964](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1964) ([@consideRatio](https://github.com/consideRatio)) -* ci: remove yaml anchors from dependabot config [#1963](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1963) ([@consideRatio](https://github.com/consideRatio)) -* ci: Test against k8s 1.20 [#1956](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1956) ([@consideRatio](https://github.com/consideRatio)) -* ci: vuln scan fix [#1953](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1953) ([@consideRatio](https://github.com/consideRatio)) -* ci: let dependabot update used GitHub action's versions [#1949](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1949) ([@consideRatio](https://github.com/consideRatio)) -* ci: let dependabot update jupyterhub, replace JUPYTERHUB_VERSION with PIP_OVERRIDES [#1948](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1948) ([@consideRatio](https://github.com/consideRatio)) -* ci: automatically scan and patch our images for known vulnerabilities [#1942](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1942) ([@consideRatio](https://github.com/consideRatio)) -* ci: action-k3s-helm was moved to jupyterhub [#1939](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1939) ([@manics](https://github.com/manics)) -* ci: fix of intermittent netpol test failure [#1933](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1933) ([@consideRatio](https://github.com/consideRatio)) +- ci: accept helm lint --strict failure, but ensure GitHub UI warns [#1985](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1985) ([@consideRatio](https://github.com/consideRatio)) +- ci: replace kubeval with helm template --validate [#1984](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1984) ([@consideRatio](https://github.com/consideRatio)) +- ci: use extracted github action for namespace report [#1980](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1980) ([@consideRatio](https://github.com/consideRatio)) +- ci: add another upgrade test and provide a template rendering diff [#1978](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1978) ([@consideRatio](https://github.com/consideRatio)) +- ci: linkcheck rework: avoid duplicated build, add colors, make it fail loud [#1976](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1976) ([@consideRatio](https://github.com/consideRatio)) +- ci: run tests conditionally on changed paths [#1975](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1975) ([@consideRatio](https://github.com/consideRatio)) +- ci: use k3s-channel instead of k3s-version [#1973](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1973) ([@consideRatio](https://github.com/consideRatio)) +- ci: full_namespace_report improvements for restartCount > 0 [#1971](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1971) ([@consideRatio](https://github.com/consideRatio)) +- pre-commit: chartpress --reset on Chart.yaml/values.yaml changes [#1970](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1970) ([@consideRatio](https://github.com/consideRatio)) +- ci: full_namespace_report function improved [#1967](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1967) ([@consideRatio](https://github.com/consideRatio)) +- ci: dependabot, add notes to config, fix singleuser-sample config [#1966](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1966) ([@consideRatio](https://github.com/consideRatio)) +- ci: let pytest keep running even if one test has failed [#1965](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1965) ([@consideRatio](https://github.com/consideRatio)) +- ci: help dependabot only trigger one set of tests [#1964](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1964) ([@consideRatio](https://github.com/consideRatio)) +- ci: remove yaml anchors from dependabot config [#1963](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1963) ([@consideRatio](https://github.com/consideRatio)) +- ci: Test against k8s 1.20 [#1956](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1956) ([@consideRatio](https://github.com/consideRatio)) +- ci: vuln scan fix [#1953](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1953) ([@consideRatio](https://github.com/consideRatio)) +- ci: let dependabot update used GitHub action's versions [#1949](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1949) ([@consideRatio](https://github.com/consideRatio)) +- ci: let dependabot update jupyterhub, replace JUPYTERHUB_VERSION with PIP_OVERRIDES [#1948](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1948) ([@consideRatio](https://github.com/consideRatio)) +- ci: automatically scan and patch our images for known vulnerabilities [#1942](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1942) ([@consideRatio](https://github.com/consideRatio)) +- ci: action-k3s-helm was moved to jupyterhub [#1939](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1939) ([@manics](https://github.com/manics)) +- ci: fix of intermittent netpol test failure [#1933](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1933) ([@consideRatio](https://github.com/consideRatio)) #### Contributors to this release @@ -159,11 +159,11 @@ for more information. ## Bugs fixed -* image: bump JupyterHub to 1.2.2 from 1.2.1 for bugfixes [#1924](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1924) ([@consideRatio](https://github.com/consideRatio)) +- image: bump JupyterHub to 1.2.2 from 1.2.1 for bugfixes [#1924](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1924) ([@consideRatio](https://github.com/consideRatio)) #### Maintenance and upkeep improvements -* pre-commit autoformat: black and beautysh [#1920](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1920) ([@manics](https://github.com/manics)) +- pre-commit autoformat: black and beautysh [#1920](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1920) ([@manics](https://github.com/manics)) #### Contributors to this release @@ -171,7 +171,6 @@ for more information. [@consideRatio](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3AconsideRatio+updated%3A2020-11-21..2020-11-27&type=Issues) | [@manics](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Amanics+updated%3A2020-11-21..2020-11-27&type=Issues) - ### [0.10.4] - 2020-11-21 A patch release to patch a bug in the dependency oauthenticator that made users @@ -180,7 +179,7 @@ configuration if c.KubeSpawner.profile_list was configured. #### Bugs fixed -* hub image: bump oauthenticator and prometheus-client [#1918](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1918) ([@consideRatio](https://github.com/consideRatio)) +- hub image: bump oauthenticator and prometheus-client [#1918](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1918) ([@consideRatio](https://github.com/consideRatio)) #### Contributors to this release @@ -200,30 +199,30 @@ rely on Helm 3 features. #### Enhancements made -* Configurable resource requests for hook-image-awaiter [#1906](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1906) ([@consideRatio](https://github.com/consideRatio)) -* Add use_lookup_dn_username parameter for LDAP [#1903](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1903) ([@JarnoRFB](https://github.com/JarnoRFB)) -* Allow exposing extra ports in autohttps/traefik deployment [#1901](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1901) ([@yuvipanda](https://github.com/yuvipanda)) -* prePuller.extraTolerations added for the image-puller daemonsets [#1883](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1883) ([@jerkern](https://github.com/jerkern)) +- Configurable resource requests for hook-image-awaiter [#1906](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1906) ([@consideRatio](https://github.com/consideRatio)) +- Add use_lookup_dn_username parameter for LDAP [#1903](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1903) ([@JarnoRFB](https://github.com/JarnoRFB)) +- Allow exposing extra ports in autohttps/traefik deployment [#1901](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1901) ([@yuvipanda](https://github.com/yuvipanda)) +- prePuller.extraTolerations added for the image-puller daemonsets [#1883](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1883) ([@jerkern](https://github.com/jerkern)) ## Bugs fixed -* hub image: kubernetes 12.0.1, nativeauth 0.0.6, tornado 6.1 [#1912](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1912) ([@consideRatio](https://github.com/consideRatio)) +- hub image: kubernetes 12.0.1, nativeauth 0.0.6, tornado 6.1 [#1912](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1912) ([@consideRatio](https://github.com/consideRatio)) #### Maintenance and upkeep improvements -* hub image: kubernetes 12.0.1, nativeauth 0.0.6, tornado 6.1 [#1912](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1912) ([@consideRatio](https://github.com/consideRatio)) -* Require helm v2.16.0 explicitly and minor CI updates [#1911](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1911) ([@consideRatio](https://github.com/consideRatio)) -* CI: make upgrades more robust and skip 1m precautionary sleep [#1904](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1904) ([@consideRatio](https://github.com/consideRatio)) -* CI: publish with helpful commit message [#1898](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1898) ([@consideRatio](https://github.com/consideRatio)) -* Replace Travis with GitHub workflow [#1896](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1896) ([@manics](https://github.com/manics)) -* Avoid harmless error in user-scheduler [#1895](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1895) ([@consideRatio](https://github.com/consideRatio)) -* removal: contributors script [#1669](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1669) ([@consideRatio](https://github.com/consideRatio)) +- hub image: kubernetes 12.0.1, nativeauth 0.0.6, tornado 6.1 [#1912](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1912) ([@consideRatio](https://github.com/consideRatio)) +- Require helm v2.16.0 explicitly and minor CI updates [#1911](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1911) ([@consideRatio](https://github.com/consideRatio)) +- CI: make upgrades more robust and skip 1m precautionary sleep [#1904](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1904) ([@consideRatio](https://github.com/consideRatio)) +- CI: publish with helpful commit message [#1898](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1898) ([@consideRatio](https://github.com/consideRatio)) +- Replace Travis with GitHub workflow [#1896](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1896) ([@manics](https://github.com/manics)) +- Avoid harmless error in user-scheduler [#1895](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1895) ([@consideRatio](https://github.com/consideRatio)) +- removal: contributors script [#1669](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1669) ([@consideRatio](https://github.com/consideRatio)) #### Documentation improvements -* Update jupyterhub extension documentation to specify namespace [#1909](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1909) ([@plant99](https://github.com/plant99)) -* DOCS: Adding note on limit to guarantee ratio [#1897](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1897) ([@choldgraf](https://github.com/choldgraf)) -* Changelog for 0.10.2 [#1893](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1893) ([@consideRatio](https://github.com/consideRatio)) +- Update jupyterhub extension documentation to specify namespace [#1909](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1909) ([@plant99](https://github.com/plant99)) +- DOCS: Adding note on limit to guarantee ratio [#1897](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1897) ([@choldgraf](https://github.com/choldgraf)) +- Changelog for 0.10.2 [#1893](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1893) ([@consideRatio](https://github.com/consideRatio)) #### Contributors to this release @@ -231,7 +230,6 @@ rely on Helm 3 features. [@betatim](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Abetatim+updated%3A2020-10-30..2020-11-15&type=Issues) | [@choldgraf](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Acholdgraf+updated%3A2020-10-30..2020-11-15&type=Issues) | [@consideRatio](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3AconsideRatio+updated%3A2020-10-30..2020-11-15&type=Issues) | [@JarnoRFB](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3AJarnoRFB+updated%3A2020-10-30..2020-11-15&type=Issues) | [@jerkern](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Ajerkern+updated%3A2020-10-30..2020-11-15&type=Issues) | [@manics](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Amanics+updated%3A2020-10-30..2020-11-15&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Aminrk+updated%3A2020-10-30..2020-11-15&type=Issues) | [@plant99](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Aplant99+updated%3A2020-10-30..2020-11-15&type=Issues) | [@tirumerla](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Atirumerla+updated%3A2020-10-30..2020-11-15&type=Issues) | [@yuvipanda](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Ayuvipanda+updated%3A2020-10-30..2020-11-15&type=Issues) - ### [0.10.2] - 2020-10-30 A bugfix release to add securityContext configuration on _all_ the containers in the image-puller pods, which can be needed when a k8s PodSecurityPolicy is forcing pods to startup as non-root users. @@ -240,11 +238,11 @@ Note that whoever need to comply with a strict PodSecurityPolicy will also need #### Bugs fixed -* Add securityContext to all image-puller pods' containers [#1892](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1892) ([@consideRatio](https://github.com/consideRatio)) +- Add securityContext to all image-puller pods' containers [#1892](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1892) ([@consideRatio](https://github.com/consideRatio)) #### Documentation improvements -* Changelog for 0.10.1 [#1890](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1890) ([@consideRatio](https://github.com/consideRatio)) +- Changelog for 0.10.1 [#1890](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1890) ([@consideRatio](https://github.com/consideRatio)) #### Contributors to this release @@ -252,26 +250,28 @@ Note that whoever need to comply with a strict PodSecurityPolicy will also need [@consideRatio](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3AconsideRatio+updated%3A2020-10-30..2020-10-30&type=Issues) | [@jatinder91](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Ajatinder91+updated%3A2020-10-30..2020-10-30&type=Issues) - ### [0.10.1] - 2020-10-30 A bugfix release simply updating JupyterHub to 1.2.1. JupyterHub 1.2.1 fixes a regression related to registered JupyterHub services using the `oauth_no_confirm` configuration. #### Bugs fixed -* Use JupyterHub 1.2.1 - fixes regression for external JH services' oauth_no_confirm config [#1889](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1889) ([@minrk](https://github.com/minrk)) + +- Use JupyterHub 1.2.1 - fixes regression for external JH services' oauth_no_confirm config [#1889](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1889) ([@minrk](https://github.com/minrk)) #### Maintenance and upkeep improvements -* Fix CI that broke as assumptions changed about latest published version [#1887](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1887) ([@consideRatio](https://github.com/consideRatio)) + +- Fix CI that broke as assumptions changed about latest published version [#1887](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1887) ([@consideRatio](https://github.com/consideRatio)) #### Documentation improvements -* Update changelog for 0.10.0 release [#1886](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1886) ([@consideRatio](https://github.com/consideRatio)) + +- Update changelog for 0.10.0 release [#1886](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1886) ([@consideRatio](https://github.com/consideRatio)) #### Contributors to this release + ([GitHub contributors page for this release](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/graphs/contributors?from=2020-10-29&to=2020-10-30&type=c)) [@consideRatio](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3AconsideRatio+updated%3A2020-10-29..2020-10-30&type=Issues) | [@minrk](https://github.com/search?q=repo%3Ajupyterhub%2Fzero-to-jupyterhub-k8s+involves%3Aminrk+updated%3A2020-10-29..2020-10-30&type=Issues) - ### [0.10.0] - 2020-10-29 This release makes the deployment more robust, and enhances users ability to @@ -296,7 +296,7 @@ the Helm chart to easier comply with PodSecurityPolicies by default. pods. These `NetworkPolicy` resources are very permissive on the outgoing traffic (egress), but is limiting the incoming traffic to what is known to be needed. - + Note that these NetworkPolicies only influence network communication in a Kubernetes cluster if a NetworkPolicy controller enforce them, such as Calico. @@ -345,179 +345,179 @@ the Helm chart to easier comply with PodSecurityPolicies by default. #### Notable dependencies updated -Dependency | Version in previous release | Version in this release | Changelog link | Note --|-|-|-|- -[jupyterhub](https://github.com/jupyterhub/jupyterhub) | 1.1.0 | 1.2.0 | [Changelog](https://jupyterhub.readthedocs.io/en/stable/changelog.html) | Run in the `hub` pod -[kubespawner](https://github.com/jupyterhub/kubespawner) | 0.11.1 | 0.14.1 | [Changelog](https://jupyterhub-kubespawner.readthedocs.io/en/latest/changelog.html) | Run in the `hub` pod -[oauthenticator](https://github.com/jupyterhub/oauthenticator) | 0.11.0 | 0.12.0 | [Changelog](https://oauthenticator.readthedocs.io/en/latest/changelog.html) | Run in the `hub` pod -[ldapauthenticator](https://github.com/jupyterhub/ldapauthenticator) | 1.3.0 | 1.3.2 | [Changelog](https://github.com/jupyterhub/ldapauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod -[ltiauthenticator](https://github.com/jupyterhub/ltiauthenticator) | 0.4.0 | 0.4.0 | [Changelog](https://github.com/jupyterhub/ltiauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod -[nativeauthenticator](https://github.com/jupyterhub/nativeauthenticator) | 0.0.5 | 0.0.5 | [Changelog](https://github.com/jupyterhub/nativeauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod -[jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler) | - | v1.0 | - | Run in the `hub` pod -[configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy) | 4.2.1 | 4.2.2 | [Changelog](https://github.com/jupyterhub/configurable-http-proxy/blob/master/CHANGELOG.md) | Run in the `proxy` pod -[traefik](https://github.com/traefik/traefik) | v2.1 | v2.3.2 | [Changelog](https://github.com/traefik/traefik/blob/master/CHANGELOG.md) | Run in the `autohttps` pod -[kube-scheduler](https://github.com/kubernetes/kube-scheduler) | v1.13.12 | v1.19.2 | - | Run in the `user-scheduler` pod(s) +| Dependency | Version in previous release | Version in this release | Changelog link | Note | +| -------------------------------------------------------------------------------- | --------------------------- | ----------------------- | ------------------------------------------------------------------------------------------- | ---------------------------------- | +| [jupyterhub](https://github.com/jupyterhub/jupyterhub) | 1.1.0 | 1.2.0 | [Changelog](https://jupyterhub.readthedocs.io/en/stable/changelog.html) | Run in the `hub` pod | +| [kubespawner](https://github.com/jupyterhub/kubespawner) | 0.11.1 | 0.14.1 | [Changelog](https://jupyterhub-kubespawner.readthedocs.io/en/latest/changelog.html) | Run in the `hub` pod | +| [oauthenticator](https://github.com/jupyterhub/oauthenticator) | 0.11.0 | 0.12.0 | [Changelog](https://oauthenticator.readthedocs.io/en/latest/changelog.html) | Run in the `hub` pod | +| [ldapauthenticator](https://github.com/jupyterhub/ldapauthenticator) | 1.3.0 | 1.3.2 | [Changelog](https://github.com/jupyterhub/ldapauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod | +| [ltiauthenticator](https://github.com/jupyterhub/ltiauthenticator) | 0.4.0 | 0.4.0 | [Changelog](https://github.com/jupyterhub/ltiauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod | +| [nativeauthenticator](https://github.com/jupyterhub/nativeauthenticator) | 0.0.5 | 0.0.5 | [Changelog](https://github.com/jupyterhub/nativeauthenticator/blob/master/CHANGELOG.md) | Run in the `hub` pod | +| [jupyterhub-idle-culler](https://github.com/jupyterhub/jupyterhub-idle-culler) | - | v1.0 | - | Run in the `hub` pod | +| [configurable-http-proxy](https://github.com/jupyterhub/configurable-http-proxy) | 4.2.1 | 4.2.2 | [Changelog](https://github.com/jupyterhub/configurable-http-proxy/blob/master/CHANGELOG.md) | Run in the `proxy` pod | +| [traefik](https://github.com/traefik/traefik) | v2.1 | v2.3.2 | [Changelog](https://github.com/traefik/traefik/blob/master/CHANGELOG.md) | Run in the `autohttps` pod | +| [kube-scheduler](https://github.com/kubernetes/kube-scheduler) | v1.13.12 | v1.19.2 | - | Run in the `user-scheduler` pod(s) | For a detailed list of how Python dependencies have change in the `hub` Pod's Docker image, inspect the [images/hub/requirements.txt](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/images/hub/requirements.txt) file. #### Enhancements made -* Allow adding extra labels to the traefik pod [#1862](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1862) ([@yuvipanda](https://github.com/yuvipanda)) -* Add proxy.service.extraPorts to add ports to the k8s Service proxy-public [#1852](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1852) ([@yuvipanda](https://github.com/yuvipanda)) -* netpol: allowedIngressPorts and interNamespaceAccessLabels config added with defaults retaining 0.9.1 current behavior [#1842](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1842) ([@consideRatio](https://github.com/consideRatio)) -* hub.command and hub.args configuration added [#1840](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1840) ([@cbanek](https://github.com/cbanek)) -* Add nodeSelector and tolerations config for all pods of Helm chart [#1827](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1827) ([@stevenstetzler](https://github.com/stevenstetzler)) -* Added config prePuller.pullProfileListImages [#1818](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1818) ([@consideRatio](https://github.com/consideRatio)) -* Added config option: proxy.chp.extraCommandLineFlags [#1813](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1813) ([@consideRatio](https://github.com/consideRatio)) -* Set container securityContext by default [#1798](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1798) ([@consideRatio](https://github.com/consideRatio)) -* Support chart wide and pod specific config of imagePullSecrets [#1794](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1794) ([@consideRatio](https://github.com/consideRatio)) -* Added proxy.chp.extraEnv and proxy.traefik.extraEnv configuration [#1784](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1784) ([@agrahamlincoln](https://github.com/agrahamlincoln)) -* Remove memory / cpu limits for pre-puller [#1780](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1780) ([@yuvipanda](https://github.com/yuvipanda)) -* Add additional liveness and readiness probe properties [#1767](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1767) ([@rmoe](https://github.com/rmoe)) -* Minimal and explicit resource requests for image-puller pods [#1764](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1764) ([@consideRatio](https://github.com/consideRatio)) -* hook-image-puller: -pod-scheduling-wait-duration flag added for reliability during helm upgrades [#1763](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1763) ([@consideRatio](https://github.com/consideRatio)) -* Make continuous image puller pods evictable [#1762](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1762) ([@consideRatio](https://github.com/consideRatio)) -* hub.extraEnv / singleuser.extraEnv in dict format to support k8s EnvVar spec [#1757](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1757) ([@consideRatio](https://github.com/consideRatio)) -* Add config for hub/proxy/autohttps container's securityContext [#1708](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1708) ([@mriedem](https://github.com/mriedem)) -* Add annotations to image puller pods [#1702](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1702) ([@duongnt](https://github.com/duongnt)) -* fix: intentionally error on missing Let's Encrypt contact email configuration [#1701](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1701) ([@consideRatio](https://github.com/consideRatio)) -* Add services API tokens in hub-secret [#1689](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1689) ([@betatim](https://github.com/betatim)) -* Tweaking readiness/liveness probe: faster startup [#1671](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1671) ([@consideRatio](https://github.com/consideRatio)) -* Tighten and flesh out networkpolicies [#1670](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1670) ([@consideRatio](https://github.com/consideRatio)) -* DX: k3s/k3d instead of kind & CI: autohttps testing [#1664](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1664) ([@consideRatio](https://github.com/consideRatio)) -* autohttps: instant secret-sync shutdown [#1659](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1659) ([@consideRatio](https://github.com/consideRatio)) -* Use DNS names instead of IPv4 addresses to be IPv6 friendly [#1643](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1643) ([@stv0g](https://github.com/stv0g)) -* autohttps: traefik's config now configurable and in YAML [#1636](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1636) ([@consideRatio](https://github.com/consideRatio)) -* Feat: autohttps readinessProbe for quicker validated startup and shutdown [#1633](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1633) ([@consideRatio](https://github.com/consideRatio)) -* switching to myst markdown in docs [#1628](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1628) ([@choldgraf](https://github.com/choldgraf)) -* Bind proxy on IPv4 and IPv6 for dual stack support [#1624](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1624) ([@stv0g](https://github.com/stv0g)) -* Do not hardcode IPv4 localhost address for IPv6 compatibility [#1623](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1623) ([@stv0g](https://github.com/stv0g)) -* enable network policy by default [#1271](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1271) ([@minrk](https://github.com/minrk)) -* Allow configuration of Kuberspawner's pod_name_template [#1144](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1144) ([@tmshn](https://github.com/tmshn)) +- Allow adding extra labels to the traefik pod [#1862](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1862) ([@yuvipanda](https://github.com/yuvipanda)) +- Add proxy.service.extraPorts to add ports to the k8s Service proxy-public [#1852](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1852) ([@yuvipanda](https://github.com/yuvipanda)) +- netpol: allowedIngressPorts and interNamespaceAccessLabels config added with defaults retaining 0.9.1 current behavior [#1842](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1842) ([@consideRatio](https://github.com/consideRatio)) +- hub.command and hub.args configuration added [#1840](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1840) ([@cbanek](https://github.com/cbanek)) +- Add nodeSelector and tolerations config for all pods of Helm chart [#1827](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1827) ([@stevenstetzler](https://github.com/stevenstetzler)) +- Added config prePuller.pullProfileListImages [#1818](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1818) ([@consideRatio](https://github.com/consideRatio)) +- Added config option: proxy.chp.extraCommandLineFlags [#1813](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1813) ([@consideRatio](https://github.com/consideRatio)) +- Set container securityContext by default [#1798](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1798) ([@consideRatio](https://github.com/consideRatio)) +- Support chart wide and pod specific config of imagePullSecrets [#1794](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1794) ([@consideRatio](https://github.com/consideRatio)) +- Added proxy.chp.extraEnv and proxy.traefik.extraEnv configuration [#1784](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1784) ([@agrahamlincoln](https://github.com/agrahamlincoln)) +- Remove memory / cpu limits for pre-puller [#1780](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1780) ([@yuvipanda](https://github.com/yuvipanda)) +- Add additional liveness and readiness probe properties [#1767](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1767) ([@rmoe](https://github.com/rmoe)) +- Minimal and explicit resource requests for image-puller pods [#1764](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1764) ([@consideRatio](https://github.com/consideRatio)) +- hook-image-puller: -pod-scheduling-wait-duration flag added for reliability during helm upgrades [#1763](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1763) ([@consideRatio](https://github.com/consideRatio)) +- Make continuous image puller pods evictable [#1762](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1762) ([@consideRatio](https://github.com/consideRatio)) +- hub.extraEnv / singleuser.extraEnv in dict format to support k8s EnvVar spec [#1757](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1757) ([@consideRatio](https://github.com/consideRatio)) +- Add config for hub/proxy/autohttps container's securityContext [#1708](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1708) ([@mriedem](https://github.com/mriedem)) +- Add annotations to image puller pods [#1702](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1702) ([@duongnt](https://github.com/duongnt)) +- fix: intentionally error on missing Let's Encrypt contact email configuration [#1701](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1701) ([@consideRatio](https://github.com/consideRatio)) +- Add services API tokens in hub-secret [#1689](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1689) ([@betatim](https://github.com/betatim)) +- Tweaking readiness/liveness probe: faster startup [#1671](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1671) ([@consideRatio](https://github.com/consideRatio)) +- Tighten and flesh out networkpolicies [#1670](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1670) ([@consideRatio](https://github.com/consideRatio)) +- DX: k3s/k3d instead of kind & CI: autohttps testing [#1664](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1664) ([@consideRatio](https://github.com/consideRatio)) +- autohttps: instant secret-sync shutdown [#1659](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1659) ([@consideRatio](https://github.com/consideRatio)) +- Use DNS names instead of IPv4 addresses to be IPv6 friendly [#1643](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1643) ([@stv0g](https://github.com/stv0g)) +- autohttps: traefik's config now configurable and in YAML [#1636](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1636) ([@consideRatio](https://github.com/consideRatio)) +- Feat: autohttps readinessProbe for quicker validated startup and shutdown [#1633](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1633) ([@consideRatio](https://github.com/consideRatio)) +- switching to myst markdown in docs [#1628](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1628) ([@choldgraf](https://github.com/choldgraf)) +- Bind proxy on IPv4 and IPv6 for dual stack support [#1624](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1624) ([@stv0g](https://github.com/stv0g)) +- Do not hardcode IPv4 localhost address for IPv6 compatibility [#1623](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1623) ([@stv0g](https://github.com/stv0g)) +- enable network policy by default [#1271](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1271) ([@minrk](https://github.com/minrk)) +- Allow configuration of Kuberspawner's pod_name_template [#1144](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1144) ([@tmshn](https://github.com/tmshn)) #### Bugs fixed -* Bump KubeSpawner to 0.14.1 to fix a bug in 0.14.0 about image_pull_secrets [#1868](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1868) ([@consideRatio](https://github.com/consideRatio)) -* netpol: allowedIngressPorts and interNamespaceAccessLabels config added with defaults retaining 0.9.1 current behavior [#1842](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1842) ([@consideRatio](https://github.com/consideRatio)) -* user-scheduler: let image locality etc matter again [#1837](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1837) ([@consideRatio](https://github.com/consideRatio)) -* Add retryable HTTP client to image-awaiter [#1830](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1830) ([@bleggett](https://github.com/bleggett)) -* prePuller: fix recently introduced regression [#1817](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1817) ([@consideRatio](https://github.com/consideRatio)) -* userScheduler: only render associated PDB resource if userScheduler itself is enabled [#1812](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1812) ([@consideRatio](https://github.com/consideRatio)) -* Fix same functionality for proxy.traefik.extraEnv as other extraEnv [#1808](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1808) ([@consideRatio](https://github.com/consideRatio)) -* Set container securityContext by default [#1798](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1798) ([@consideRatio](https://github.com/consideRatio)) -* Relax hook-image-puller to make upgrades more reliable [#1787](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1787) ([@consideRatio](https://github.com/consideRatio)) -* Updates to user-scheduler's coupling to the kube-scheduler binary [#1778](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1778) ([@consideRatio](https://github.com/consideRatio)) -* https: Only expose port 443 if we really have HTTPS on [#1758](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1758) ([@yuvipanda](https://github.com/yuvipanda)) -* jupyterhub existing image pull secret configuration load bug fixed [#1727](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1727) ([@mpolatcan](https://github.com/mpolatcan)) -* fix: jupyterhub services without apiToken was ignored [#1721](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1721) ([@consideRatio](https://github.com/consideRatio)) -* fix: autohttps cert acquisition stability fixed [#1719](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1719) ([@consideRatio](https://github.com/consideRatio)) -* Enable the user scheduler to pay attention to CSI volume count [#1699](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1699) ([@rschroll](https://github.com/rschroll)) -* secret-sync: selective write to secret / functional logs [#1678](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1678) ([@consideRatio](https://github.com/consideRatio)) -* Tighten and flesh out networkpolicies [#1670](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1670) ([@consideRatio](https://github.com/consideRatio)) +- Bump KubeSpawner to 0.14.1 to fix a bug in 0.14.0 about image_pull_secrets [#1868](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1868) ([@consideRatio](https://github.com/consideRatio)) +- netpol: allowedIngressPorts and interNamespaceAccessLabels config added with defaults retaining 0.9.1 current behavior [#1842](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1842) ([@consideRatio](https://github.com/consideRatio)) +- user-scheduler: let image locality etc matter again [#1837](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1837) ([@consideRatio](https://github.com/consideRatio)) +- Add retryable HTTP client to image-awaiter [#1830](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1830) ([@bleggett](https://github.com/bleggett)) +- prePuller: fix recently introduced regression [#1817](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1817) ([@consideRatio](https://github.com/consideRatio)) +- userScheduler: only render associated PDB resource if userScheduler itself is enabled [#1812](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1812) ([@consideRatio](https://github.com/consideRatio)) +- Fix same functionality for proxy.traefik.extraEnv as other extraEnv [#1808](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1808) ([@consideRatio](https://github.com/consideRatio)) +- Set container securityContext by default [#1798](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1798) ([@consideRatio](https://github.com/consideRatio)) +- Relax hook-image-puller to make upgrades more reliable [#1787](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1787) ([@consideRatio](https://github.com/consideRatio)) +- Updates to user-scheduler's coupling to the kube-scheduler binary [#1778](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1778) ([@consideRatio](https://github.com/consideRatio)) +- https: Only expose port 443 if we really have HTTPS on [#1758](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1758) ([@yuvipanda](https://github.com/yuvipanda)) +- jupyterhub existing image pull secret configuration load bug fixed [#1727](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1727) ([@mpolatcan](https://github.com/mpolatcan)) +- fix: jupyterhub services without apiToken was ignored [#1721](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1721) ([@consideRatio](https://github.com/consideRatio)) +- fix: autohttps cert acquisition stability fixed [#1719](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1719) ([@consideRatio](https://github.com/consideRatio)) +- Enable the user scheduler to pay attention to CSI volume count [#1699](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1699) ([@rschroll](https://github.com/rschroll)) +- secret-sync: selective write to secret / functional logs [#1678](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1678) ([@consideRatio](https://github.com/consideRatio)) +- Tighten and flesh out networkpolicies [#1670](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1670) ([@consideRatio](https://github.com/consideRatio)) #### Maintenance and upkeep improvements -* use jupyterhub 1.2.0 [#1884](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1884) ([@minrk](https://github.com/minrk)) -* Update Travis CI badge following .org -> com migration [#1882](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1882) ([@consideRatio](https://github.com/consideRatio)) -* Remove globus_sdk and update various Docker images [#1881](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1881) ([@consideRatio](https://github.com/consideRatio)) -* Complementary fix to recent aesthetics PR [#1878](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1878) ([@consideRatio](https://github.com/consideRatio)) -* Helm template aesthetics fixes [#1877](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1877) ([@consideRatio](https://github.com/consideRatio)) -* Added rediraffe redirecgtion [#1876](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1876) ([@NerdSec](https://github.com/NerdSec)) -* Bump OAuthenticator to 0.12.0 from 0.11.0 [#1874](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1874) ([@consideRatio](https://github.com/consideRatio)) -* Dependency: bump proxy pods image of CHP to 4.2.2 for bugfixes and docker image dependency updates [#1873](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1873) ([@consideRatio](https://github.com/consideRatio)) -* Pin Traefik to v2.3.2 for cert acquisition stability [#1859](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1859) ([@consideRatio](https://github.com/consideRatio)) -* CI: Add logs for autohttps pod on failure to debug intermittent issue [#1855](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1855) ([@consideRatio](https://github.com/consideRatio)) -* CI: Try to improve test stability and autohttps cert aquisition reliability [#1854](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1854) ([@consideRatio](https://github.com/consideRatio)) -* CI: bump k3s and helm versions [#1848](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1848) ([@consideRatio](https://github.com/consideRatio)) -* Add dependabot config to update dependencies automatically [#1844](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1844) ([@jgwerner](https://github.com/jgwerner)) -* try out jupyterhub 1.2.0b1 [#1841](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1841) ([@minrk](https://github.com/minrk)) -* Remove unnecessary Dockerfile build step [#1833](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1833) ([@bleggett](https://github.com/bleggett)) -* Add schema.yaml and validate.py to .helmignore [#1832](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1832) ([@consideRatio](https://github.com/consideRatio)) -* CI: reorder ci jobs to provide relevant feedback quickly [#1828](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1828) ([@consideRatio](https://github.com/consideRatio)) -* Revert recent removal of image-pulling related to cloudMetadata blocker [#1826](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1826) ([@consideRatio](https://github.com/consideRatio)) -* Add maintainers / owners to register with Artifact Hub [#1820](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1820) ([@consideRatio](https://github.com/consideRatio)) -* CI: fix RTD builds on push to master [#1816](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1816) ([@consideRatio](https://github.com/consideRatio)) -* deprecation: warn when proxy.https is modified and proxy.https.enabled=true [#1807](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1807) ([@consideRatio](https://github.com/consideRatio)) -* Soft deprecate singleuser.cloudMetadata.enabled in favor of blockWithIptables [#1805](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1805) ([@consideRatio](https://github.com/consideRatio)) -* hub livenessProbe: bump from 1m to 3m delay before probes are sent [#1804](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1804) ([@consideRatio](https://github.com/consideRatio)) -* hub image: bump kubespawner to 0.14.0 [#1802](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1802) ([@consideRatio](https://github.com/consideRatio)) -* ci: bump helm to 3.3.2 and test with k8s 1.19 also [#1783](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1783) ([@consideRatio](https://github.com/consideRatio)) -* user-scheduler: tweak modern configuration [#1782](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1782) ([@consideRatio](https://github.com/consideRatio)) -* Update to newer version of 'pause' container [#1781](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1781) ([@yuvipanda](https://github.com/yuvipanda)) -* Remove memory / cpu limits for pre-puller [#1780](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1780) ([@yuvipanda](https://github.com/yuvipanda)) -* Updates to user-scheduler's coupling to the kube-scheduler binary [#1778](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1778) ([@consideRatio](https://github.com/consideRatio)) -* hub: Switch base image to latest LTS [#1772](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1772) ([@yuvipanda](https://github.com/yuvipanda)) -* CI: Add test for singleuser.extraEnv [#1769](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1769) ([@consideRatio](https://github.com/consideRatio)) -* Bump KubeSpawner to 0.13.0 [#1768](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1768) ([@consideRatio](https://github.com/consideRatio)) -* CI: always publish helm chart on push to master [#1765](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1765) ([@consideRatio](https://github.com/consideRatio)) -* Bump traefik (autohttps pod) to v2.3 [#1756](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1756) ([@consideRatio](https://github.com/consideRatio)) -* Update JupyterHub's python package dependencies [#1752](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1752) ([@jgwerner](https://github.com/jgwerner)) -* Fix travis by pinning docker python package version [#1743](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1743) ([@chancez](https://github.com/chancez)) -* update kubespawner to 0.12 [#1722](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1722) ([@minrk](https://github.com/minrk)) -* k8s api compatibility: add conditional to ingress apiVersion [#1718](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1718) ([@davidsmf](https://github.com/davidsmf)) -* Upgrade libc to patch vulnerability in hub img [#1715](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1715) ([@meneal](https://github.com/meneal)) -* Autohttps reliability fix: bump traefik version [#1714](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1714) ([@consideRatio](https://github.com/consideRatio)) -* k8s-hub img rebuild -> dependencies refrozen [#1713](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1713) ([@consideRatio](https://github.com/consideRatio)) -* removing circleci [#1711](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1711) ([@choldgraf](https://github.com/choldgraf)) -* Complexity reduction - combine passthrough values.yaml data in hub-config (k8s configmap) to hub-secret (k8s secret) [#1682](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1682) ([@consideRatio](https://github.com/consideRatio)) -* secret-sync: selective write to secret / functional logs [#1678](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1678) ([@consideRatio](https://github.com/consideRatio)) -* DX: k3s/k3d instead of kind & CI: autohttps testing [#1664](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1664) ([@consideRatio](https://github.com/consideRatio)) -* cleanup: remove old deploy secret [#1661](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1661) ([@consideRatio](https://github.com/consideRatio)) -* RTD build fix: get correct version of sphinx [#1658](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1658) ([@consideRatio](https://github.com/consideRatio)) -* Force sphinx>=2,<3 for myst_parser [#1657](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1657) ([@consideRatio](https://github.com/consideRatio)) -* Use idle culler from jupyterhub-idle-culler package [#1648](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1648) ([@yuvipanda](https://github.com/yuvipanda)) -* Refactor: reference ports by name instead of repeating the number [#1645](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1645) ([@consideRatio](https://github.com/consideRatio)) -* DX: refactor helm template [#1635](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1635) ([@consideRatio](https://github.com/consideRatio)) -* CI: fix sphinx warnings turned into errors [#1634](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1634) ([@consideRatio](https://github.com/consideRatio)) -* Dep: Bump deploy/autohttps's traefik to v2.2 [#1632](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1632) ([@consideRatio](https://github.com/consideRatio)) -* DX: more recognizable port numbers [#1631](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1631) ([@consideRatio](https://github.com/consideRatio)) +- use jupyterhub 1.2.0 [#1884](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1884) ([@minrk](https://github.com/minrk)) +- Update Travis CI badge following .org -> com migration [#1882](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1882) ([@consideRatio](https://github.com/consideRatio)) +- Remove globus_sdk and update various Docker images [#1881](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1881) ([@consideRatio](https://github.com/consideRatio)) +- Complementary fix to recent aesthetics PR [#1878](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1878) ([@consideRatio](https://github.com/consideRatio)) +- Helm template aesthetics fixes [#1877](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1877) ([@consideRatio](https://github.com/consideRatio)) +- Added rediraffe redirecgtion [#1876](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1876) ([@NerdSec](https://github.com/NerdSec)) +- Bump OAuthenticator to 0.12.0 from 0.11.0 [#1874](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1874) ([@consideRatio](https://github.com/consideRatio)) +- Dependency: bump proxy pods image of CHP to 4.2.2 for bugfixes and docker image dependency updates [#1873](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1873) ([@consideRatio](https://github.com/consideRatio)) +- Pin Traefik to v2.3.2 for cert acquisition stability [#1859](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1859) ([@consideRatio](https://github.com/consideRatio)) +- CI: Add logs for autohttps pod on failure to debug intermittent issue [#1855](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1855) ([@consideRatio](https://github.com/consideRatio)) +- CI: Try to improve test stability and autohttps cert aquisition reliability [#1854](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1854) ([@consideRatio](https://github.com/consideRatio)) +- CI: bump k3s and helm versions [#1848](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1848) ([@consideRatio](https://github.com/consideRatio)) +- Add dependabot config to update dependencies automatically [#1844](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1844) ([@jgwerner](https://github.com/jgwerner)) +- try out jupyterhub 1.2.0b1 [#1841](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1841) ([@minrk](https://github.com/minrk)) +- Remove unnecessary Dockerfile build step [#1833](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1833) ([@bleggett](https://github.com/bleggett)) +- Add schema.yaml and validate.py to .helmignore [#1832](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1832) ([@consideRatio](https://github.com/consideRatio)) +- CI: reorder ci jobs to provide relevant feedback quickly [#1828](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1828) ([@consideRatio](https://github.com/consideRatio)) +- Revert recent removal of image-pulling related to cloudMetadata blocker [#1826](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1826) ([@consideRatio](https://github.com/consideRatio)) +- Add maintainers / owners to register with Artifact Hub [#1820](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1820) ([@consideRatio](https://github.com/consideRatio)) +- CI: fix RTD builds on push to master [#1816](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1816) ([@consideRatio](https://github.com/consideRatio)) +- deprecation: warn when proxy.https is modified and proxy.https.enabled=true [#1807](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1807) ([@consideRatio](https://github.com/consideRatio)) +- Soft deprecate singleuser.cloudMetadata.enabled in favor of blockWithIptables [#1805](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1805) ([@consideRatio](https://github.com/consideRatio)) +- hub livenessProbe: bump from 1m to 3m delay before probes are sent [#1804](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1804) ([@consideRatio](https://github.com/consideRatio)) +- hub image: bump kubespawner to 0.14.0 [#1802](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1802) ([@consideRatio](https://github.com/consideRatio)) +- ci: bump helm to 3.3.2 and test with k8s 1.19 also [#1783](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1783) ([@consideRatio](https://github.com/consideRatio)) +- user-scheduler: tweak modern configuration [#1782](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1782) ([@consideRatio](https://github.com/consideRatio)) +- Update to newer version of 'pause' container [#1781](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1781) ([@yuvipanda](https://github.com/yuvipanda)) +- Remove memory / cpu limits for pre-puller [#1780](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1780) ([@yuvipanda](https://github.com/yuvipanda)) +- Updates to user-scheduler's coupling to the kube-scheduler binary [#1778](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1778) ([@consideRatio](https://github.com/consideRatio)) +- hub: Switch base image to latest LTS [#1772](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1772) ([@yuvipanda](https://github.com/yuvipanda)) +- CI: Add test for singleuser.extraEnv [#1769](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1769) ([@consideRatio](https://github.com/consideRatio)) +- Bump KubeSpawner to 0.13.0 [#1768](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1768) ([@consideRatio](https://github.com/consideRatio)) +- CI: always publish helm chart on push to master [#1765](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1765) ([@consideRatio](https://github.com/consideRatio)) +- Bump traefik (autohttps pod) to v2.3 [#1756](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1756) ([@consideRatio](https://github.com/consideRatio)) +- Update JupyterHub's python package dependencies [#1752](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1752) ([@jgwerner](https://github.com/jgwerner)) +- Fix travis by pinning docker python package version [#1743](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1743) ([@chancez](https://github.com/chancez)) +- update kubespawner to 0.12 [#1722](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1722) ([@minrk](https://github.com/minrk)) +- k8s api compatibility: add conditional to ingress apiVersion [#1718](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1718) ([@davidsmf](https://github.com/davidsmf)) +- Upgrade libc to patch vulnerability in hub img [#1715](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1715) ([@meneal](https://github.com/meneal)) +- Autohttps reliability fix: bump traefik version [#1714](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1714) ([@consideRatio](https://github.com/consideRatio)) +- k8s-hub img rebuild -> dependencies refrozen [#1713](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1713) ([@consideRatio](https://github.com/consideRatio)) +- removing circleci [#1711](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1711) ([@choldgraf](https://github.com/choldgraf)) +- Complexity reduction - combine passthrough values.yaml data in hub-config (k8s configmap) to hub-secret (k8s secret) [#1682](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1682) ([@consideRatio](https://github.com/consideRatio)) +- secret-sync: selective write to secret / functional logs [#1678](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1678) ([@consideRatio](https://github.com/consideRatio)) +- DX: k3s/k3d instead of kind & CI: autohttps testing [#1664](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1664) ([@consideRatio](https://github.com/consideRatio)) +- cleanup: remove old deploy secret [#1661](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1661) ([@consideRatio](https://github.com/consideRatio)) +- RTD build fix: get correct version of sphinx [#1658](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1658) ([@consideRatio](https://github.com/consideRatio)) +- Force sphinx>=2,<3 for myst_parser [#1657](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1657) ([@consideRatio](https://github.com/consideRatio)) +- Use idle culler from jupyterhub-idle-culler package [#1648](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1648) ([@yuvipanda](https://github.com/yuvipanda)) +- Refactor: reference ports by name instead of repeating the number [#1645](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1645) ([@consideRatio](https://github.com/consideRatio)) +- DX: refactor helm template [#1635](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1635) ([@consideRatio](https://github.com/consideRatio)) +- CI: fix sphinx warnings turned into errors [#1634](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1634) ([@consideRatio](https://github.com/consideRatio)) +- Dep: Bump deploy/autohttps's traefik to v2.2 [#1632](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1632) ([@consideRatio](https://github.com/consideRatio)) +- DX: more recognizable port numbers [#1631](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1631) ([@consideRatio](https://github.com/consideRatio)) #### Documentation improvements -* Add back Helm chart badge for latest pre-release (alpha, beta) [#1879](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1879) ([@consideRatio](https://github.com/consideRatio)) -* Added rediraffe redirecgtion [#1876](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1876) ([@NerdSec](https://github.com/NerdSec)) -* docs: fix edit button, so it doesn't go to a 404 page [#1864](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1864) ([@consideRatio](https://github.com/consideRatio)) -* Fix link to Hub23 docs [#1860](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1860) ([@sgibson91](https://github.com/sgibson91)) -* Provide links to Hub23 deployment guide [#1850](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1850) ([@sgibson91](https://github.com/sgibson91)) -* docs: clarify user-placeholder resource requests [#1835](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1835) ([@consideRatio](https://github.com/consideRatio)) -* Change doc structure [#1825](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1825) ([@NerdSec](https://github.com/NerdSec)) -* Remove mistakenly introduced artifact [#1824](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1824) ([@consideRatio](https://github.com/consideRatio)) -* fixing broken links [#1823](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1823) ([@choldgraf](https://github.com/choldgraf)) -* README.md: badges for the helm chart repo to go directly to the relevant view [#1815](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1815) ([@consideRatio](https://github.com/consideRatio)) -* Docs: fix some sphinx warnings [#1796](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1796) ([@consideRatio](https://github.com/consideRatio)) -* Fix legacy version in DigitalOcean Kubernetes setup doc [#1788](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1788) ([@subwaymatch](https://github.com/subwaymatch)) -* Add terraform resources to the community resources section [#1776](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1776) ([@salvis2](https://github.com/salvis2)) -* Docs: fixes to outdated links found by the linkchecker [#1770](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1770) ([@consideRatio](https://github.com/consideRatio)) -* Leave a comment about where HUB_SERVICE_* values come from [#1766](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1766) ([@mriedem](https://github.com/mriedem)) -* Unindent lines to fix the bug in "Specify certificate through Secret resource" [#1755](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1755) ([@salvis2](https://github.com/salvis2)) -* [Documentation] Authenticating with Auth0 [#1736](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1736) ([@asubb](https://github.com/asubb)) -* Docs/schema.yaml patches [#1735](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1735) ([@rubdos](https://github.com/rubdos)) -* Fix broken link to Jupyter contributor guide [#1729](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1729) ([@sgibson91](https://github.com/sgibson91)) -* Fix link [#1728](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1728) ([@JarnoRFB](https://github.com/JarnoRFB)) -* docs: myst-parser deprecation adjustment [#1723](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1723) ([@consideRatio](https://github.com/consideRatio)) -* docs: fix linkcheck warning [#1720](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1720) ([@consideRatio](https://github.com/consideRatio)) -* Docs: fix squeezed logo, broken links, and strip unused CSS and templates [#1710](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1710) ([@consideRatio](https://github.com/consideRatio)) -* Add documentation to create a Kubernetes cluster on OVH [#1704](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1704) ([@jtpio](https://github.com/jtpio)) -* DX: final touches on CONTRIBUTING.md [#1696](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1696) ([@consideRatio](https://github.com/consideRatio)) -* Update Google auth to use a list for hosted_domain [#1695](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1695) ([@petebachant](https://github.com/petebachant)) -* Simplify setting up JupyterLab as default [#1690](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1690) ([@yuvipanda](https://github.com/yuvipanda)) -* Use --num-nodes instead of --size to resize gcloud cluster [#1688](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1688) ([@aculich](https://github.com/aculich)) -* docs: fix broken links [#1687](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1687) ([@consideRatio](https://github.com/consideRatio)) -* Change helm chart version in setup documentation [#1685](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1685) ([@ivanpokupec](https://github.com/ivanpokupec)) -* Docs: assume usage of helm3 over deprecated helm2 [#1684](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1684) ([@GeorgianaElena](https://github.com/GeorgianaElena)) -* removal: Vagrant for local dev [#1668](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1668) ([@consideRatio](https://github.com/consideRatio)) -* docs: fixed links [#1666](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1666) ([@consideRatio](https://github.com/consideRatio)) -* DX: k3s/k3d instead of kind & CI: autohttps testing [#1664](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1664) ([@consideRatio](https://github.com/consideRatio)) -* Reference static ip docs [#1663](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1663) ([@GeorgianaElena](https://github.com/GeorgianaElena)) -* Docs: remove too outdated cost-calculator [#1660](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1660) ([@consideRatio](https://github.com/consideRatio)) -* Update create service principle command. [#1654](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1654) ([@superyaniv](https://github.com/superyaniv)) -* proxy.service.type: Default is different from hub.service.type [#1647](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1647) ([@manics](https://github.com/manics)) -* Fix user storage customization variable [#1640](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1640) ([@bibz](https://github.com/bibz)) -* Fix broken links in the Reference documentation [#1639](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1639) ([@bibz](https://github.com/bibz)) -* Update index.rst [#1629](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1629) ([@deinal](https://github.com/deinal)) -* AWS documentation fixes [#1564](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1564) ([@metonymic-smokey](https://github.com/metonymic-smokey)) -* add Auth0 configuration documentation [#1436](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1436) ([@philvarner](https://github.com/philvarner)) +- Add back Helm chart badge for latest pre-release (alpha, beta) [#1879](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1879) ([@consideRatio](https://github.com/consideRatio)) +- Added rediraffe redirecgtion [#1876](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1876) ([@NerdSec](https://github.com/NerdSec)) +- docs: fix edit button, so it doesn't go to a 404 page [#1864](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1864) ([@consideRatio](https://github.com/consideRatio)) +- Fix link to Hub23 docs [#1860](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1860) ([@sgibson91](https://github.com/sgibson91)) +- Provide links to Hub23 deployment guide [#1850](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1850) ([@sgibson91](https://github.com/sgibson91)) +- docs: clarify user-placeholder resource requests [#1835](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1835) ([@consideRatio](https://github.com/consideRatio)) +- Change doc structure [#1825](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1825) ([@NerdSec](https://github.com/NerdSec)) +- Remove mistakenly introduced artifact [#1824](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1824) ([@consideRatio](https://github.com/consideRatio)) +- fixing broken links [#1823](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1823) ([@choldgraf](https://github.com/choldgraf)) +- README.md: badges for the helm chart repo to go directly to the relevant view [#1815](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1815) ([@consideRatio](https://github.com/consideRatio)) +- Docs: fix some sphinx warnings [#1796](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1796) ([@consideRatio](https://github.com/consideRatio)) +- Fix legacy version in DigitalOcean Kubernetes setup doc [#1788](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1788) ([@subwaymatch](https://github.com/subwaymatch)) +- Add terraform resources to the community resources section [#1776](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1776) ([@salvis2](https://github.com/salvis2)) +- Docs: fixes to outdated links found by the linkchecker [#1770](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1770) ([@consideRatio](https://github.com/consideRatio)) +- Leave a comment about where HUB*SERVICE*\* values come from [#1766](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1766) ([@mriedem](https://github.com/mriedem)) +- Unindent lines to fix the bug in "Specify certificate through Secret resource" [#1755](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1755) ([@salvis2](https://github.com/salvis2)) +- [Documentation] Authenticating with Auth0 [#1736](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1736) ([@asubb](https://github.com/asubb)) +- Docs/schema.yaml patches [#1735](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1735) ([@rubdos](https://github.com/rubdos)) +- Fix broken link to Jupyter contributor guide [#1729](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1729) ([@sgibson91](https://github.com/sgibson91)) +- Fix link [#1728](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1728) ([@JarnoRFB](https://github.com/JarnoRFB)) +- docs: myst-parser deprecation adjustment [#1723](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1723) ([@consideRatio](https://github.com/consideRatio)) +- docs: fix linkcheck warning [#1720](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1720) ([@consideRatio](https://github.com/consideRatio)) +- Docs: fix squeezed logo, broken links, and strip unused CSS and templates [#1710](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1710) ([@consideRatio](https://github.com/consideRatio)) +- Add documentation to create a Kubernetes cluster on OVH [#1704](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1704) ([@jtpio](https://github.com/jtpio)) +- DX: final touches on CONTRIBUTING.md [#1696](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1696) ([@consideRatio](https://github.com/consideRatio)) +- Update Google auth to use a list for hosted_domain [#1695](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1695) ([@petebachant](https://github.com/petebachant)) +- Simplify setting up JupyterLab as default [#1690](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1690) ([@yuvipanda](https://github.com/yuvipanda)) +- Use --num-nodes instead of --size to resize gcloud cluster [#1688](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1688) ([@aculich](https://github.com/aculich)) +- docs: fix broken links [#1687](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1687) ([@consideRatio](https://github.com/consideRatio)) +- Change helm chart version in setup documentation [#1685](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1685) ([@ivanpokupec](https://github.com/ivanpokupec)) +- Docs: assume usage of helm3 over deprecated helm2 [#1684](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1684) ([@GeorgianaElena](https://github.com/GeorgianaElena)) +- removal: Vagrant for local dev [#1668](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1668) ([@consideRatio](https://github.com/consideRatio)) +- docs: fixed links [#1666](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1666) ([@consideRatio](https://github.com/consideRatio)) +- DX: k3s/k3d instead of kind & CI: autohttps testing [#1664](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1664) ([@consideRatio](https://github.com/consideRatio)) +- Reference static ip docs [#1663](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1663) ([@GeorgianaElena](https://github.com/GeorgianaElena)) +- Docs: remove too outdated cost-calculator [#1660](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1660) ([@consideRatio](https://github.com/consideRatio)) +- Update create service principle command. [#1654](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1654) ([@superyaniv](https://github.com/superyaniv)) +- proxy.service.type: Default is different from hub.service.type [#1647](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1647) ([@manics](https://github.com/manics)) +- Fix user storage customization variable [#1640](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1640) ([@bibz](https://github.com/bibz)) +- Fix broken links in the Reference documentation [#1639](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1639) ([@bibz](https://github.com/bibz)) +- Update index.rst [#1629](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1629) ([@deinal](https://github.com/deinal)) +- AWS documentation fixes [#1564](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1564) ([@metonymic-smokey](https://github.com/metonymic-smokey)) +- add Auth0 configuration documentation [#1436](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1436) ([@philvarner](https://github.com/philvarner)) #### Contributors to this release @@ -541,6 +541,7 @@ JupyterHub (1.1.0) and authenticators along with bug fixes and some additional helpful configuration options. Noteworthy: + - An issue with automatic acquisition of HTTPS certificates has been resolved since 0.9.0-beta.3. - Fixed a compatibility issue with Kubernetes 1.16+ @@ -549,6 +550,7 @@ Noteworthy: - [jupyterhub-nativeauthenticator](https://native-authenticator.readthedocs.io/en/latest/) added to the JupyterHub Docker image. Bumped dependencies: + - jupyterhub version 1.1.0 - jupyterhub-ldapauthenticator version 1.3.0 - jupyterhub-kubespawner version 0.11.1 @@ -559,7 +561,7 @@ Bumped dependencies: 1. If you are using Helm 2, upgrade to the latest Helm 2 version. And if you are using Helm 3, upgrade to the latest Helm 3 version. - + Upgrading to Helm 3 from Helm 2 requires additional steps not covered here, so for now please stay with your current major version of helm (2 or 3). @@ -631,67 +633,71 @@ the following upgrade attempt. #### Dependency updates -* Bump configurable-http-proxy image [#1598](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1598) ([@consideRatio](https://github.com/consideRatio)) -* fix: Bump to base-notebook with JH 1.1.0 etc [#1588](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1588) ([@bitnik](https://github.com/bitnik)) +- Bump configurable-http-proxy image [#1598](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1598) ([@consideRatio](https://github.com/consideRatio)) +- fix: Bump to base-notebook with JH 1.1.0 etc [#1588](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1588) ([@bitnik](https://github.com/bitnik)) #### Maintenance -* Docs: refactor/docs for local development of docs [#1617](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1617) ([@consideRatio](https://github.com/consideRatio)) -* [MRG] sphinx: linkcheck in travis (allowed to fail) [#1611](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1611) ([@manics](https://github.com/manics)) -* [MRG] Sphinx: warnings are errors [#1610](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1610) ([@manics](https://github.com/manics)) -* pydata theme [#1608](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1608) ([@choldgraf](https://github.com/choldgraf)) -* Small typo fix in doc [#1591](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1591) ([@sebastianpfischer](https://github.com/sebastianpfischer)) -* [MRG] Pin sphinx theme [#1589](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1589) ([@manics](https://github.com/manics)) -* init helm and tiller with history-max settings [#1587](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1587) ([@bitnik](https://github.com/bitnik)) -* Changelog for 0.9.0-beta.4 [#1585](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1585) ([@manics](https://github.com/manics)) -* freeze environment in hub image [#1562](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1562) ([@minrk](https://github.com/minrk)) +- Docs: refactor/docs for local development of docs [#1617](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1617) ([@consideRatio](https://github.com/consideRatio)) +- [MRG] sphinx: linkcheck in travis (allowed to fail) [#1611](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1611) ([@manics](https://github.com/manics)) +- [MRG] Sphinx: warnings are errors [#1610](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1610) ([@manics](https://github.com/manics)) +- pydata theme [#1608](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1608) ([@choldgraf](https://github.com/choldgraf)) +- Small typo fix in doc [#1591](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1591) ([@sebastianpfischer](https://github.com/sebastianpfischer)) +- [MRG] Pin sphinx theme [#1589](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1589) ([@manics](https://github.com/manics)) +- init helm and tiller with history-max settings [#1587](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1587) ([@bitnik](https://github.com/bitnik)) +- Changelog for 0.9.0-beta.4 [#1585](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1585) ([@manics](https://github.com/manics)) +- freeze environment in hub image [#1562](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1562) ([@minrk](https://github.com/minrk)) ### [0.9.0-beta.4] - 2020-02-26 #### Added -* Add nativeauthenticator to hub image [#1583](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1583) ([@consideRatio](https://github.com/consideRatio)) -* Add option to remove named server when culling [#1558](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1558) ([@betatim](https://github.com/betatim)) + +- Add nativeauthenticator to hub image [#1583](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1583) ([@consideRatio](https://github.com/consideRatio)) +- Add option to remove named server when culling [#1558](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1558) ([@betatim](https://github.com/betatim)) #### Dependency updates -* jupyterhub-ldapauthenticator==1.3 [#1576](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1576) ([@manics](https://github.com/manics)) -* First-class azuread support, oauth 0.11 [#1563](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1563) ([@minrk](https://github.com/minrk)) -* simplify hub-requirements [#1560](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1560) ([@minrk](https://github.com/minrk)) -* Bump to base-notebook with JH 1.1.0 etc [#1549](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1549) ([@consideRatio](https://github.com/consideRatio)) + +- jupyterhub-ldapauthenticator==1.3 [#1576](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1576) ([@manics](https://github.com/manics)) +- First-class azuread support, oauth 0.11 [#1563](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1563) ([@minrk](https://github.com/minrk)) +- simplify hub-requirements [#1560](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1560) ([@minrk](https://github.com/minrk)) +- Bump to base-notebook with JH 1.1.0 etc [#1549](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1549) ([@consideRatio](https://github.com/consideRatio)) #### Fixed -* Fix removing of named servers when culled [#1567](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1567) ([@consideRatio](https://github.com/consideRatio)) + +- Fix removing of named servers when culled [#1567](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1567) ([@consideRatio](https://github.com/consideRatio)) #### Maintenance -* Added gitlab URL [#1577](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1577) ([@metonymic-smokey](https://github.com/metonymic-smokey)) -* Fix reference doc link [#1570](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1570) ([@clkao](https://github.com/clkao)) -* Add contributor badge [#1559](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1559) ([@GeorgianaElena](https://github.com/GeorgianaElena)) -* Trying to clean up formatting [#1555](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1555) ([@jeremycadams](https://github.com/jeremycadams)) -* Remove unneeded directive in traefik config [#1554](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1554) ([@yuvipanda](https://github.com/yuvipanda)) -* Added documentation of secret https mode [#1553](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1553) ([@RossRKK](https://github.com/RossRKK)) -* Helm 3 preview [#1543](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1543) ([@manics](https://github.com/manics)) +- Added gitlab URL [#1577](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1577) ([@metonymic-smokey](https://github.com/metonymic-smokey)) +- Fix reference doc link [#1570](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1570) ([@clkao](https://github.com/clkao)) +- Add contributor badge [#1559](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1559) ([@GeorgianaElena](https://github.com/GeorgianaElena)) +- Trying to clean up formatting [#1555](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1555) ([@jeremycadams](https://github.com/jeremycadams)) +- Remove unneeded directive in traefik config [#1554](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1554) ([@yuvipanda](https://github.com/yuvipanda)) +- Added documentation of secret https mode [#1553](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1553) ([@RossRKK](https://github.com/RossRKK)) +- Helm 3 preview [#1543](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1543) ([@manics](https://github.com/manics)) ### [0.9.0-beta.3] - 2020-01-17 #### Dependency updates -* Deploy jupyterhub 1.1.0 stable [#1548](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1548) ([@minrk](https://github.com/minrk)) -* Bump chartpress for Helm 3 compatible dev releases [#1542](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1542) ([@consideRatio](https://github.com/consideRatio)) +- Deploy jupyterhub 1.1.0 stable [#1548](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1548) ([@minrk](https://github.com/minrk)) +- Bump chartpress for Helm 3 compatible dev releases [#1542](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1542) ([@consideRatio](https://github.com/consideRatio)) #### Fixed -* Replace kube-lego + nginx ingress with traefik [#1539](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1539) ([@yuvipanda](https://github.com/yuvipanda)) +- Replace kube-lego + nginx ingress with traefik [#1539](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1539) ([@yuvipanda](https://github.com/yuvipanda)) #### Maintenance -* Update step zero for Azure docs with commands to setup an VNet and network policy [#1527](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1527) ([@sgibson91](https://github.com/sgibson91)) -* Fix duplicate docs label [#1544](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1544) ([@manics](https://github.com/manics)) -* Made GCP docs of compute zone names generic [#1431](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1431) ([@metonymic-smokey](https://github.com/metonymic-smokey)) + +- Update step zero for Azure docs with commands to setup an VNet and network policy [#1527](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1527) ([@sgibson91](https://github.com/sgibson91)) +- Fix duplicate docs label [#1544](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1544) ([@manics](https://github.com/manics)) +- Made GCP docs of compute zone names generic [#1431](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1431) ([@metonymic-smokey](https://github.com/metonymic-smokey)) ### [0.9.0-beta.2] - 2019-12-26 #### Fixed -* Fix major breaking change if all HTTPS options was disabled introduced just before beta.1 [#1534](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1534) ([@dirkcgrunwald](https://github.com/dirkcgrunwald)) +- Fix major breaking change if all HTTPS options was disabled introduced just before beta.1 [#1534](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1534) ([@dirkcgrunwald](https://github.com/dirkcgrunwald)) ### [0.9.0-beta.1] - 2019-12-26 @@ -710,117 +716,115 @@ Some highlights of relevance for this release are: #### Added -* Added ability to configure liveness/readiness probes on the hub/proxy [#1480](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1480) ([@mrow4a](https://github.com/mrow4a)) -* Added ability to use an existing/shared image pull secret for hub and image pullers [#1426](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1426) ([@LaurentGoderre](https://github.com/LaurentGoderre)) -* Added ability to configure the proxy's load balancer service's access restrictions (`loadBalancerSourceRanges`) [#1418](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1418) ([@GergelyKalmar](https://github.com/GergelyKalmar)) -* Added `user-scheduler` pod->node scheduling policy configuration [#1409](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1409) ([@yuvipanda](https://github.com/yuvipanda)) -* Added ability to add additional ingress rules to k8s NetworkPolicy resources [#1380](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1380) ([@yuvipanda](https://github.com/yuvipanda)) -* Enabled the continuous image puller by default [#1276](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1276) ([@consideRatio](https://github.com/consideRatio)) -* Added ability to configure initContainers of the hub pod [#1274](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1274) ([@scottyhq](https://github.com/scottyhq)) -* Enabled the user-scheduler by default [#1272](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1272) ([@minrk](https://github.com/minrk)) -* Added ability to use an existing jupyterhub configuration k8s secret for hub (not recommended) [#1142](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1142) ([@koen92](https://github.com/koen92)) -* Added use of liveness/readinessProbe by default [#1004](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1004) ([@tmshn](https://github.com/tmshn)) +- Added ability to configure liveness/readiness probes on the hub/proxy [#1480](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1480) ([@mrow4a](https://github.com/mrow4a)) +- Added ability to use an existing/shared image pull secret for hub and image pullers [#1426](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1426) ([@LaurentGoderre](https://github.com/LaurentGoderre)) +- Added ability to configure the proxy's load balancer service's access restrictions (`loadBalancerSourceRanges`) [#1418](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1418) ([@GergelyKalmar](https://github.com/GergelyKalmar)) +- Added `user-scheduler` pod->node scheduling policy configuration [#1409](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1409) ([@yuvipanda](https://github.com/yuvipanda)) +- Added ability to add additional ingress rules to k8s NetworkPolicy resources [#1380](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1380) ([@yuvipanda](https://github.com/yuvipanda)) +- Enabled the continuous image puller by default [#1276](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1276) ([@consideRatio](https://github.com/consideRatio)) +- Added ability to configure initContainers of the hub pod [#1274](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1274) ([@scottyhq](https://github.com/scottyhq)) +- Enabled the user-scheduler by default [#1272](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1272) ([@minrk](https://github.com/minrk)) +- Added ability to use an existing jupyterhub configuration k8s secret for hub (not recommended) [#1142](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1142) ([@koen92](https://github.com/koen92)) +- Added use of liveness/readinessProbe by default [#1004](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1004) ([@tmshn](https://github.com/tmshn)) #### Dependency updates -* Bump JupyterHub to 1.1.0b1 [#1533](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1533) ([@consideRatio](https://github.com/consideRatio)) -* Update JupyterHub version [#1524](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1524) ([@bitnik](https://github.com/bitnik)) -* Re-add ltiauthenticator 0.4.0 to hub image [#1519](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1519) ([@consideRatio](https://github.com/consideRatio)) -* Fix hub image dependency versions, disable ltiauthenticator, use chartpress==0.5.0 [#1518](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1518) ([@consideRatio](https://github.com/consideRatio)) -* Update hub image dependencies and RELEASE.md regarding dependencies [#1484](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1484) ([@consideRatio](https://github.com/consideRatio)) -* Bump kubespawner to 0.11.1 for spawner progress bugfix [#1502](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1502) ([@consideRatio](https://github.com/consideRatio)) -* Updated hub image dependencies [#1484](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1484) ([@consideRatio](https://github.com/consideRatio)) -* Updated kube-scheduler binary used by user-scheduler, kubespawner, kubernetes python client, and oauthenticator [#1483](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1483) ([@consideRatio](https://github.com/consideRatio)) -* Bump CHP to 4.2.0 - we get quicker chart upgrades now [#1481](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1481) ([@consideRatio](https://github.com/consideRatio)) -* Bump singleuser-sample [#1473](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1473) ([@consideRatio](https://github.com/consideRatio)) -* Bump python-kubernetes to 9.0.* (later also to 10.0.*) [#1454](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1454) ([@clkao](https://github.com/clkao)) -* Bump tmpauthenticator to 0.6 (needed for jupyterhub 1.0) [#1299](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1299) ([@manics](https://github.com/manics)) -* Include jupyter-firstuseauthenticator. [#1288](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1288) ([@danielballan](https://github.com/danielballan)) -* Bump jupyterhub to 1.0.0 (later also to a post 1.0.0 commit) [#1263](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1263) ([@minrk](https://github.com/minrk)) -* Bump CHP image to 4.1.0 from 3.0.0 (later to 4.2.0) [#1246](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1246) ([@consideRatio](https://github.com/consideRatio)) -* Bump oauthenticator 0.8.2 (later to 0.10.0) [#1239](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1239) ([@minrk](https://github.com/minrk)) -* Bump jupyterhub to 1.0b2 (later to an post 1.0.0 commit) [#1224](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1224) ([@minrk](https://github.com/minrk)) +- Bump JupyterHub to 1.1.0b1 [#1533](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1533) ([@consideRatio](https://github.com/consideRatio)) +- Update JupyterHub version [#1524](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1524) ([@bitnik](https://github.com/bitnik)) +- Re-add ltiauthenticator 0.4.0 to hub image [#1519](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1519) ([@consideRatio](https://github.com/consideRatio)) +- Fix hub image dependency versions, disable ltiauthenticator, use chartpress==0.5.0 [#1518](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1518) ([@consideRatio](https://github.com/consideRatio)) +- Update hub image dependencies and RELEASE.md regarding dependencies [#1484](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1484) ([@consideRatio](https://github.com/consideRatio)) +- Bump kubespawner to 0.11.1 for spawner progress bugfix [#1502](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1502) ([@consideRatio](https://github.com/consideRatio)) +- Updated hub image dependencies [#1484](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1484) ([@consideRatio](https://github.com/consideRatio)) +- Updated kube-scheduler binary used by user-scheduler, kubespawner, kubernetes python client, and oauthenticator [#1483](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1483) ([@consideRatio](https://github.com/consideRatio)) +- Bump CHP to 4.2.0 - we get quicker chart upgrades now [#1481](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1481) ([@consideRatio](https://github.com/consideRatio)) +- Bump singleuser-sample [#1473](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1473) ([@consideRatio](https://github.com/consideRatio)) +- Bump python-kubernetes to 9.0._ (later also to 10.0._) [#1454](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1454) ([@clkao](https://github.com/clkao)) +- Bump tmpauthenticator to 0.6 (needed for jupyterhub 1.0) [#1299](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1299) ([@manics](https://github.com/manics)) +- Include jupyter-firstuseauthenticator. [#1288](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1288) ([@danielballan](https://github.com/danielballan)) +- Bump jupyterhub to 1.0.0 (later also to a post 1.0.0 commit) [#1263](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1263) ([@minrk](https://github.com/minrk)) +- Bump CHP image to 4.1.0 from 3.0.0 (later to 4.2.0) [#1246](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1246) ([@consideRatio](https://github.com/consideRatio)) +- Bump oauthenticator 0.8.2 (later to 0.10.0) [#1239](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1239) ([@minrk](https://github.com/minrk)) +- Bump jupyterhub to 1.0b2 (later to an post 1.0.0 commit) [#1224](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1224) ([@minrk](https://github.com/minrk)) #### Fixed -* Workaround upstream kubernetes issue regarding https health check [#1531](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1531) ([@sstarcher](https://github.com/sstarcher)) -* User-scheduler RBAC permissions for local-path-provisioner + increase robustness of hub.baseUrl interaction with the hub deployments health endpoint [#1530](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1530) ([@cutiechi](https://github.com/cutiechi)) -* Fixing #1300 User-scheduler doesn't work with rancher/local-path-provisioner [#1516](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1516) ([@cgiraldo](https://github.com/cgiraldo)) -* Move z2jh.py to a python and linux distribution agnostic path [#1478](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1478) ([@mrow4a](https://github.com/mrow4a)) -* Bugfix for proxy upgrade strategy in PR #1401 [#1404](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1404) ([@consideRatio](https://github.com/consideRatio)) -* Use recreate CHP proxy pod's deployment strategy [#1401](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1401) ([@consideRatio](https://github.com/consideRatio)) -* Proxy deployment: Change probes to https port [#1378](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1378) ([@chicocvenancio](https://github.com/chicocvenancio)) -* Readiness and liveness probes re-added [#1361](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1361) ([@consideRatio](https://github.com/consideRatio)) -* Use 443 as https port or redirection. FIX #806 [#1341](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1341) ([@chicocvenancio](https://github.com/chicocvenancio)) -* Revert "Configure liveness/readinessProbe" [#1356](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1356) ([@consideRatio](https://github.com/consideRatio)) -* Ensure helm chart configuration is passed to JupyterHub where needed [#1338](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1338) ([@bitnik](https://github.com/bitnik)) -* Make proxy redirect to the service port 443 instead of the container port 8443 [#1337](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1337) ([@LucidNeko](https://github.com/LucidNeko)) -* Disable becoming root inside hub and proxy containers [#1280](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1280) ([@yuvipanda](https://github.com/yuvipanda)) -* Configure KubeSpawner with the `singleuser.image.pullPolicy` properly [#1248](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1248) ([@vmarkovtsev](https://github.com/vmarkovtsev)) -* Supply `hub.runAsUser` for the hub at the container level instead of the pod level [#1240](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1240) ([@tmc](https://github.com/tmc)) -* Relax HSTS requirement on subdomains [#1219](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1219) ([@yuvipanda](https://github.com/yuvipanda)) +- Workaround upstream kubernetes issue regarding https health check [#1531](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1531) ([@sstarcher](https://github.com/sstarcher)) +- User-scheduler RBAC permissions for local-path-provisioner + increase robustness of hub.baseUrl interaction with the hub deployments health endpoint [#1530](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1530) ([@cutiechi](https://github.com/cutiechi)) +- Fixing #1300 User-scheduler doesn't work with rancher/local-path-provisioner [#1516](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1516) ([@cgiraldo](https://github.com/cgiraldo)) +- Move z2jh.py to a python and linux distribution agnostic path [#1478](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1478) ([@mrow4a](https://github.com/mrow4a)) +- Bugfix for proxy upgrade strategy in PR #1401 [#1404](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1404) ([@consideRatio](https://github.com/consideRatio)) +- Use recreate CHP proxy pod's deployment strategy [#1401](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1401) ([@consideRatio](https://github.com/consideRatio)) +- Proxy deployment: Change probes to https port [#1378](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1378) ([@chicocvenancio](https://github.com/chicocvenancio)) +- Readiness and liveness probes re-added [#1361](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1361) ([@consideRatio](https://github.com/consideRatio)) +- Use 443 as https port or redirection. FIX #806 [#1341](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1341) ([@chicocvenancio](https://github.com/chicocvenancio)) +- Revert "Configure liveness/readinessProbe" [#1356](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1356) ([@consideRatio](https://github.com/consideRatio)) +- Ensure helm chart configuration is passed to JupyterHub where needed [#1338](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1338) ([@bitnik](https://github.com/bitnik)) +- Make proxy redirect to the service port 443 instead of the container port 8443 [#1337](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1337) ([@LucidNeko](https://github.com/LucidNeko)) +- Disable becoming root inside hub and proxy containers [#1280](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1280) ([@yuvipanda](https://github.com/yuvipanda)) +- Configure KubeSpawner with the `singleuser.image.pullPolicy` properly [#1248](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1248) ([@vmarkovtsev](https://github.com/vmarkovtsev)) +- Supply `hub.runAsUser` for the hub at the container level instead of the pod level [#1240](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1240) ([@tmc](https://github.com/tmc)) +- Relax HSTS requirement on subdomains [#1219](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1219) ([@yuvipanda](https://github.com/yuvipanda)) #### Maintenance -* typo [#1529](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1529) ([@raybellwaves](https://github.com/raybellwaves)) -* fix link to Helm chart best practices [#1523](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1523) ([@rpwagner](https://github.com/rpwagner)) -* Adding Globus to the list of users [#1522](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1522) ([@rpwagner](https://github.com/rpwagner)) -* Missing page link for our RBAC documentation #1508 [#1514](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1514) ([@n3o-Bhushan](https://github.com/n3o-Bhushan)) -* Correction of warnings from: make html [#1513](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1513) ([@consideRatio](https://github.com/consideRatio)) -* Fixing URL for user-management documentation #1511 [#1512](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1512) ([@n3o-Bhushan](https://github.com/n3o-Bhushan)) -* DOC: fixing authentication link in user customization guide [#1510](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1510) ([@n3o-Bhushan](https://github.com/n3o-Bhushan)) -* DOC: fix kubernetes setup link [#1505](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1505) ([@raybellwaves](https://github.com/raybellwaves)) -* Update changelog for 0.9.0-beta.1 [#1503](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1503) ([@consideRatio](https://github.com/consideRatio)) -* Fix broken link in architecture.rst [#1488](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1488) ([@amcnicho](https://github.com/amcnicho)) -* Bump kind to 0.6.0 and kindest/node versions [#1487](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1487) ([@clkao](https://github.com/clkao)) -* Avoid rate limiting for k8s resource validation [#1485](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1485) ([@consideRatio](https://github.com/consideRatio)) -* Switching to the Pandas Sphinx theme [#1472](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1472) ([@choldgraf](https://github.com/choldgraf)) -* Add vi / less to hub image [#1471](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1471) ([@yuvipanda](https://github.com/yuvipanda)) -* Added existing pull secrets changes from PR #1426 to schema [#1461](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1461) ([@sgloutnikov](https://github.com/sgloutnikov)) -* Chart upgrade tests [#1459](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1459) ([@consideRatio](https://github.com/consideRatio)) -* Replaced broken links in authentication document #1449 [#1457](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1457) ([@n3o-Bhushan](https://github.com/n3o-Bhushan)) -* Fix typo in home page of docs [#1456](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1456) ([@celine168](https://github.com/celine168)) -* Use helm 2.15.1 [#1453](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1453) ([@consideRatio](https://github.com/consideRatio)) -* Support CD with git tags [#1450](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1450) ([@consideRatio](https://github.com/consideRatio)) -* Added Laurent Goderre as contributor [#1443](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1443) ([@LaurentGoderre](https://github.com/LaurentGoderre)) -* Note about future hard deprecation [#1441](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1441) ([@consideRatio](https://github.com/consideRatio)) -* Fix link formatting for ingress.enabled [#1438](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1438) ([@jtpio](https://github.com/jtpio)) -* CI rework - use kind, validate->test->publish, contrib and release rework [#1422](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1422) ([@consideRatio](https://github.com/consideRatio)) -* Mounting jupyterhub_config.py etc. [#1407](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1407) ([@consideRatio](https://github.com/consideRatio)) -* Ignore venv files [#1388](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1388) ([@GeorgianaElena](https://github.com/GeorgianaElena)) -* Added example for populating notebook user home directory [#1382](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1382) ([@gareth-j](https://github.com/gareth-j)) -* Fix typo in jupyterhub_config.py comment [#1376](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1376) ([@loganlinn](https://github.com/loganlinn)) -* Fixed formatting error in links [#1363](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1363) ([@tlkh](https://github.com/tlkh)) -* Instructions for adding GPUs and increasing shared memory [#1358](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1358) ([@tlkh](https://github.com/tlkh)) -* delete redundant prepuller documentation [#1348](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1348) ([@bitnik](https://github.com/bitnik)) -* Add py-spy to hub image [#1327](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1327) ([@yuvipanda](https://github.com/yuvipanda)) -* Changing Azure Container Service to Azure Kubernetes Service [#1322](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1322) ([@seanmck](https://github.com/seanmck)) -* add explanation for lifecycle_hooks in kubespawner_override [#1309](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1309) ([@clancychilds](https://github.com/clancychilds)) -* Update chart version to 0.8.2 in the docs [#1304](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1304) ([@jtpio](https://github.com/jtpio)) -* Fix azure cli VMSSPreview feature register command [#1298](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1298) ([@dazzag24](https://github.com/dazzag24)) -* Unbreak git build [#1294](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1294) ([@joshbode](https://github.com/joshbode)) -* Update Dockerfile to JH 1.0 [#1291](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1291) ([@vilhelmen](https://github.com/vilhelmen)) -* Fix a couple of mistakes in Google Kubernetes instructions [#1290](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1290) ([@astrofrog](https://github.com/astrofrog)) -* Suggest quotes around tag. [#1289](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1289) ([@danielballan](https://github.com/danielballan)) -* hub: Add useful debugging tools to hub image [#1279](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1279) ([@yuvipanda](https://github.com/yuvipanda)) -* Clean up a line in the CI logs [#1278](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1278) ([@consideRatio](https://github.com/consideRatio)) -* Fix prePuller.extraImages linting etc [#1275](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1275) ([@consideRatio](https://github.com/consideRatio)) -* Fixed minor bug in google pricing calculator [#1264](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1264) ([@noahbjohnson](https://github.com/noahbjohnson)) -* [MRG] Update to Docs: Deploying an Autoscaling Kubernetes cluster on Azure [#1258](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1258) ([@sgibson91](https://github.com/sgibson91)) -* Update to Docs: Add Azure scale command to Expanding/Contracting Cluster section [#1256](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1256) ([@sgibson91](https://github.com/sgibson91)) -* removing extra buttons [#1254](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1254) ([@choldgraf](https://github.com/choldgraf)) -* test appVersion in Chart.yaml [#1238](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1238) ([@minrk](https://github.com/minrk)) -* Adjusts whitespace for a code block in AWS instructions. [#1237](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1237) ([@arokem](https://github.com/arokem)) -* Change heading of multiple-profiles section [#1236](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1236) ([@moschlar](https://github.com/moschlar)) -* Suggest Discourse in issue template [#1234](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1234) ([@manics](https://github.com/manics)) -* Added OAuth callback URL to keycloak OIDC example [#1232](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1232) ([@sgloutnikov](https://github.com/sgloutnikov)) -* Updated notes, pod status to Running [#1231](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1231) ([@sgloutnikov](https://github.com/sgloutnikov)) -* Updated AWS EKS region-availability statement. [#1223](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1223) ([@javabrett](https://github.com/javabrett)) -* Fix the default value of lifecycleHooks [#1218](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1218) ([@consideRatio](https://github.com/consideRatio)) -* Update user-environment.rst [#1217](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1217) ([@manycoding](https://github.com/manycoding)) -* Add Digital Ocean Cloud Instructions for Kubernetes [#1192](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1192) ([@alexmorley](https://github.com/alexmorley)) - - +- typo [#1529](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1529) ([@raybellwaves](https://github.com/raybellwaves)) +- fix link to Helm chart best practices [#1523](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1523) ([@rpwagner](https://github.com/rpwagner)) +- Adding Globus to the list of users [#1522](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1522) ([@rpwagner](https://github.com/rpwagner)) +- Missing page link for our RBAC documentation #1508 [#1514](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1514) ([@n3o-Bhushan](https://github.com/n3o-Bhushan)) +- Correction of warnings from: make html [#1513](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1513) ([@consideRatio](https://github.com/consideRatio)) +- Fixing URL for user-management documentation #1511 [#1512](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1512) ([@n3o-Bhushan](https://github.com/n3o-Bhushan)) +- DOC: fixing authentication link in user customization guide [#1510](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1510) ([@n3o-Bhushan](https://github.com/n3o-Bhushan)) +- DOC: fix kubernetes setup link [#1505](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1505) ([@raybellwaves](https://github.com/raybellwaves)) +- Update changelog for 0.9.0-beta.1 [#1503](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1503) ([@consideRatio](https://github.com/consideRatio)) +- Fix broken link in architecture.rst [#1488](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1488) ([@amcnicho](https://github.com/amcnicho)) +- Bump kind to 0.6.0 and kindest/node versions [#1487](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1487) ([@clkao](https://github.com/clkao)) +- Avoid rate limiting for k8s resource validation [#1485](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1485) ([@consideRatio](https://github.com/consideRatio)) +- Switching to the Pandas Sphinx theme [#1472](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1472) ([@choldgraf](https://github.com/choldgraf)) +- Add vi / less to hub image [#1471](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1471) ([@yuvipanda](https://github.com/yuvipanda)) +- Added existing pull secrets changes from PR #1426 to schema [#1461](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1461) ([@sgloutnikov](https://github.com/sgloutnikov)) +- Chart upgrade tests [#1459](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1459) ([@consideRatio](https://github.com/consideRatio)) +- Replaced broken links in authentication document #1449 [#1457](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1457) ([@n3o-Bhushan](https://github.com/n3o-Bhushan)) +- Fix typo in home page of docs [#1456](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1456) ([@celine168](https://github.com/celine168)) +- Use helm 2.15.1 [#1453](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1453) ([@consideRatio](https://github.com/consideRatio)) +- Support CD with git tags [#1450](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1450) ([@consideRatio](https://github.com/consideRatio)) +- Added Laurent Goderre as contributor [#1443](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1443) ([@LaurentGoderre](https://github.com/LaurentGoderre)) +- Note about future hard deprecation [#1441](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1441) ([@consideRatio](https://github.com/consideRatio)) +- Fix link formatting for ingress.enabled [#1438](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1438) ([@jtpio](https://github.com/jtpio)) +- CI rework - use kind, validate->test->publish, contrib and release rework [#1422](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1422) ([@consideRatio](https://github.com/consideRatio)) +- Mounting jupyterhub_config.py etc. [#1407](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1407) ([@consideRatio](https://github.com/consideRatio)) +- Ignore venv files [#1388](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1388) ([@GeorgianaElena](https://github.com/GeorgianaElena)) +- Added example for populating notebook user home directory [#1382](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1382) ([@gareth-j](https://github.com/gareth-j)) +- Fix typo in jupyterhub_config.py comment [#1376](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1376) ([@loganlinn](https://github.com/loganlinn)) +- Fixed formatting error in links [#1363](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1363) ([@tlkh](https://github.com/tlkh)) +- Instructions for adding GPUs and increasing shared memory [#1358](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1358) ([@tlkh](https://github.com/tlkh)) +- delete redundant prepuller documentation [#1348](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1348) ([@bitnik](https://github.com/bitnik)) +- Add py-spy to hub image [#1327](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1327) ([@yuvipanda](https://github.com/yuvipanda)) +- Changing Azure Container Service to Azure Kubernetes Service [#1322](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1322) ([@seanmck](https://github.com/seanmck)) +- add explanation for lifecycle_hooks in kubespawner_override [#1309](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1309) ([@clancychilds](https://github.com/clancychilds)) +- Update chart version to 0.8.2 in the docs [#1304](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1304) ([@jtpio](https://github.com/jtpio)) +- Fix azure cli VMSSPreview feature register command [#1298](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1298) ([@dazzag24](https://github.com/dazzag24)) +- Unbreak git build [#1294](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1294) ([@joshbode](https://github.com/joshbode)) +- Update Dockerfile to JH 1.0 [#1291](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1291) ([@vilhelmen](https://github.com/vilhelmen)) +- Fix a couple of mistakes in Google Kubernetes instructions [#1290](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1290) ([@astrofrog](https://github.com/astrofrog)) +- Suggest quotes around tag. [#1289](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1289) ([@danielballan](https://github.com/danielballan)) +- hub: Add useful debugging tools to hub image [#1279](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1279) ([@yuvipanda](https://github.com/yuvipanda)) +- Clean up a line in the CI logs [#1278](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1278) ([@consideRatio](https://github.com/consideRatio)) +- Fix prePuller.extraImages linting etc [#1275](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1275) ([@consideRatio](https://github.com/consideRatio)) +- Fixed minor bug in google pricing calculator [#1264](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1264) ([@noahbjohnson](https://github.com/noahbjohnson)) +- [MRG] Update to Docs: Deploying an Autoscaling Kubernetes cluster on Azure [#1258](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1258) ([@sgibson91](https://github.com/sgibson91)) +- Update to Docs: Add Azure scale command to Expanding/Contracting Cluster section [#1256](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1256) ([@sgibson91](https://github.com/sgibson91)) +- removing extra buttons [#1254](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1254) ([@choldgraf](https://github.com/choldgraf)) +- test appVersion in Chart.yaml [#1238](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1238) ([@minrk](https://github.com/minrk)) +- Adjusts whitespace for a code block in AWS instructions. [#1237](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1237) ([@arokem](https://github.com/arokem)) +- Change heading of multiple-profiles section [#1236](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1236) ([@moschlar](https://github.com/moschlar)) +- Suggest Discourse in issue template [#1234](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1234) ([@manics](https://github.com/manics)) +- Added OAuth callback URL to keycloak OIDC example [#1232](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1232) ([@sgloutnikov](https://github.com/sgloutnikov)) +- Updated notes, pod status to Running [#1231](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1231) ([@sgloutnikov](https://github.com/sgloutnikov)) +- Updated AWS EKS region-availability statement. [#1223](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1223) ([@javabrett](https://github.com/javabrett)) +- Fix the default value of lifecycleHooks [#1218](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1218) ([@consideRatio](https://github.com/consideRatio)) +- Update user-environment.rst [#1217](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1217) ([@manycoding](https://github.com/manycoding)) +- Add Digital Ocean Cloud Instructions for Kubernetes [#1192](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/pull/1192) ([@alexmorley](https://github.com/alexmorley)) ## [0.8] @@ -848,8 +852,8 @@ To upgrade your cluster: 2. read changes here and make any needed updates to your configuration 3. upgrade the chart: - helm repo update - helm upgrade $RELEASE --force --version 0.8.0 --values config.yaml + helm repo update + helm upgrade $RELEASE --force --version 0.8.0 --values config.yaml The `--force` flag allows deletion and recreation of objects that have certain changes, such as different labels, @@ -859,7 +863,6 @@ which are forbidden otherwise. - Github organisation OAuth: `auth.github.org_whitelist` has been renamed to `auth.github.orgWhitelist` to be consistent with helm's camelCase style - #### Troubleshooting If you encounter issues with upgrades, check for changed configuration in this document, and make sure your config is up to date. @@ -870,7 +873,6 @@ to a previous version with: helm rollback $RELEASE - Feel free to [ping us on gitter](https://gitter.im/jupyterhub/jupyterhub) if you have problems or questions. @@ -899,12 +901,10 @@ It also ensures that the image puller DaemonSets have the same credentials to pu Want to make your autoscheduler work efficiently? Then you should schedule pods to pack tight instead of spread out. The user scheduler accomplishes this. - - **Pod priority and User placeholders** - #929 Want to scale up before users arrive so they don't end up waiting for the node to pull an image of several gigabytes in size? By adding a configurable fixed amount of user placeholder pods with a lower [pod priority](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) than real user pods, we can accomplish this. It requires k8s v1.11 though. - - **preferScheduleNextToRealUsers - improves autoscaling** - #930 This setting slightly improves the ability for a cluster autoscaler to scale down by increasing the likelihood of user placeholders being left alone on a node rather than real users. Real users can't be moved around while user placeholder pods can @@ -929,7 +929,7 @@ Want to scale up before users arrive so they don't end up waiting for the node t Migrate to more stable K8s resource APIs from `beta`. - **Update of the singleuser-sample image** - #888 `git` and `nbgitpuller` are now available by default -- **Switch to using a StatefulSet for the Hub** __*__ +- **Switch to using a StatefulSet for the Hub** **\*** The Hub should perhaps be a StatefulSet rather than a Deployment as it tends to be tied to a PV that can only be mounted by one single Hub. See this issue: https://github.com/helm/charts/issues/1863 - Show users deprecation and error messages when they use certain deprecated configuration (e.g. `hub.extraConfig` as a single string) @@ -977,7 +977,6 @@ For most Australians, summer means cricket. And cricket means hearing the dulcet tones of their favourite commentator, Richie Benaud. From the cream coloured suit, to the witty repartee with his colleagues, Benaud is the complete package - #### Contributors This release wouldn't have been possible without the wonderful contributors @@ -1379,7 +1378,6 @@ on issues, PRs and reviews since the last Zero to JupyterHub release. [邱雨波](https://github.com/CraftHeart) [高彦涛](https://github.com/gytlinux) - ## [0.7.0](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/compare/v0.6...0.7.0) - [Alex Blackwell](https://en.wikipedia.org/wiki/Alex_Blackwell) - 2018-09-03 This release contains JupyterHub version 0.9.2, additional configuration options @@ -1518,8 +1516,7 @@ helm upgrade $RELEASE_NAME jupyterhub/jupyterhub --install \ #### 6. Manage active users Active users with running pods must restart their pods. If they don't the next -time they attempt to access their server they may end up with `{“error”: -“invalid_redirect_uri”, “error_description”: “Invalid redirect URI”}`. +time they attempt to access their server they may end up with `{“error”: “invalid_redirect_uri”, “error_description”: “Invalid redirect URI”}`. You have the power to force this to happen, but it will abort what they are doing right now. If you want them to be able to do it in their own pace, you @@ -2221,6 +2218,7 @@ and configurability improvements! ### Breaking changes #### Pre-puller configuration + In prior versions (v0.5), if you wanted to disable the pre-puller, you would use: @@ -2433,7 +2431,6 @@ The following new authentication providers have been added: You can also set up a whitelist of users by adding to the list in `auth.whitelist.users`. - #### Easier customization of `jupyterhub_config.py` You can always put extra snippets of `jupyterhub_config.py` configuration in @@ -2650,7 +2647,6 @@ In alphabetical order, - [Zhenwen Zhang](https://github.com/zhangzhenwen) - [Zoltan Fedor](https://github.com/zoltan-fedor) - ## [0.4] - [Akram](#akram) - 2017-06-23 Stability, HTTPS & breaking changes. @@ -2664,63 +2660,63 @@ installation. ### Breaking changes -* The **name of a user pod** and a **dynamically created home directory [PVC (PersistentVolumeClaim)](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)** no longer include +- The **name of a user pod** and a **dynamically created home directory [PVC (PersistentVolumeClaim)](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)** no longer include the `userid` in them by default. If you are using dynamic PVCs for `home` - directories (which is the default), you will need to *manually rename* these + directories (which is the default), you will need to _manually rename_ these directories before upgrading. Otherwise, new PVCs will be created, and users might freak out when viewing the newly created directory and think that their home directory appears empty. See [PR #56](https://github.com/jupyterhub/kubespawner/pull/56) on what needs to change. -* A **[StorageClass](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#storageclasses)** +- A **[StorageClass](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#storageclasses)** is no longer created by default. This shouldn't affect most new installs, since most cloud provider installations have a default (as of Kubernetes 1.6). If you are using an older version of Kubernetes, the easiest thing to do is to upgrade to a newer version. If not, you can create a StorageClass manually and everything should continue to work. -* `token.proxy` is removed. Use **`proxy.secretToken`** instead. +- `token.proxy` is removed. Use **`proxy.secretToken`** instead. If your `config.yaml` contains something that looks like the following: ```yaml token: - proxy: + proxy: ``` you should change that to: ```yaml proxy: - secretToken: + secretToken: ``` ### Added -* Added **GitHub Authentication support**, thanks to [Jason Kuruzovich](https://github.com/jkuruzovich). -* Added **[Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) support**! +- Added **GitHub Authentication support**, thanks to [Jason Kuruzovich](https://github.com/jkuruzovich). +- Added **[Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) support**! If your cluster already has Ingress support (with automatic Let's Encrypt support, perhaps), you can easily use that now. -* We now add a **label** to user pods / PVCs with their usernames. -* Support using a **static PVC** for user `home` directories or for the hub database. This makes this release usable +- We now add a **label** to user pods / PVCs with their usernames. +- Support using a **static PVC** for user `home` directories or for the hub database. This makes this release usable with clusters where you only have one NFS share that must be used for the whole hub. -* **PostgreSQL** is now a supported hub database backend provider. -* You can set annotations & labels on the **proxy-public service** now. +- **PostgreSQL** is now a supported hub database backend provider. +- You can set annotations & labels on the **proxy-public service** now. ### Changed -* We now use the official [configurable http proxy](http://github.com/jupyterhub/configurable-http-proxy) +- We now use the official [configurable http proxy](http://github.com/jupyterhub/configurable-http-proxy) (CHP) as the proxy, rather than the unofficial [nchp](https://github.com/yuvipanda/jupyterhub-nginx-chp). This should be a no-op (or require no changes) for the most part. JupyterHub errors might display a nicer error page. -* The version of KubeSpawner uses the official Kubernetes +- The version of KubeSpawner uses the official Kubernetes [python client](https://github.com/kubernetes-incubator/client-python/) rather than [pycurl](http://pycurl.io/). This helps with scalability a little. ### Removed -* The deprecated `createNamespace` parameter no longer works, alongside the +- The deprecated `createNamespace` parameter no longer works, alongside the deprecated `name` parameter. You probably weren't using these anyway - they were kept only for backwards compatibility with very early versions. @@ -2729,11 +2725,11 @@ installation. This release made possible by the awesome work of the following contributors (in alphabetical order): -* [Analect](https://github.com/analect) -* [Carol Willing](https://github.com/willingc) -* [Jason Kuruzovich](https://github.com/jkuruzovich) -* [Min RK](https://github.com/minrk/) -* [Yuvi Panda](https://github.com/yuvipanda/) +- [Analect](https://github.com/analect) +- [Carol Willing](https://github.com/willingc) +- [Jason Kuruzovich](https://github.com/jkuruzovich) +- [Min RK](https://github.com/minrk/) +- [Yuvi Panda](https://github.com/yuvipanda/) <3 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9cf8328d2f..20b7a5f365 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,8 +20,6 @@ See [doc/README.md](doc/README.md). # Setting up for Helm chart development - - ## 1: Prerequisites This needs to be installed: @@ -57,7 +55,7 @@ With [k3s](https://github.com/rancher/k3s) we can _quickly_ create a Kubernetes cluster, and we _don't have to transfer docker images_ built on our computer to make them available in the Kubernetes cluster. -__Install__ +**Install** ```shell # Installs a ~50 MB k3s binary, setups and starts a systemctl service called @@ -75,7 +73,7 @@ curl -sfL https://get.k3s.io | sh -s - \ export KUBECONFIG=/etc/rancher/k3s/k3s.yaml ``` -__Start/Stop and Enable/Disable__ +**Start/Stop and Enable/Disable** With `systemctl` you can `start` and `stop` the service named `k3s` representing the cluster, as well as `enable` and `disable` the service's automatic startup @@ -88,7 +86,7 @@ sudo systemctl stop k3s docker stop $(docker container list --quiet --filter "name=k8s_") ``` -__Debug__ +**Debug** ```shell # what is the status of the k3s service? @@ -101,7 +99,7 @@ journalctl -u k3s --since "1 hour ago" docker container list --filter "name=k8s_" ``` -__Uninstall__ +**Uninstall** When k3s was installed with the installation script, an uninstallation script is made available as well. @@ -116,7 +114,7 @@ docker stop $(docker container list --all --quiet --filter "name=k8s_") | xargs ### Linux, Mac, and possibly Windows: Kubernetes setup with k3d -> __IMPORTANT:__ This setup assume k3d v1, because the k3d v3 doesn't support +> **IMPORTANT:** This setup assume k3d v1, because the k3d v3 doesn't support > the `--docker` flag. This is tracked in [this issue](https://github.com/rancher/k3d/issues/113). [k3d](https://github.com/rancher/k3d) encapsulates k3s in containers. It is less @@ -125,7 +123,7 @@ docker images to be pushed to a dedicated registry before they can be accessed by the pods in the Kubernetes cluster, until [this issue](https://github.com/rancher/k3d/issues/113) is resolved. -__Install__ +**Install** ```shell k3d create --publish 30443:30443 --publish 32444:32444 --wait 60 \ @@ -142,18 +140,19 @@ export KUBECONFIG="$(k3d get-kubeconfig --name='k3s-default')" # These instructions aren't maintained, you need to figure it out yourself =/ ``` -__About the published ports__ +**About the published ports** + - 30443: This port exposes the `proxy-public` service. It will route to the - `autohttps` pod for TLS termination, then onwards to the `proxy` pod - that routes to the `hub` pod or individual user pods depending on paths - (`/hub` vs `/user`) and how JupyterHub dynamically has configured it. + `autohttps` pod for TLS termination, then onwards to the `proxy` pod + that routes to the `hub` pod or individual user pods depending on paths + (`/hub` vs `/user`) and how JupyterHub dynamically has configured it. - 32444: This port exposes the `pebble` service which which accepts two ports, - and this specific port will route to the `pebble` pod's management API - where we can access paths like `/roots/0`. For more details about - Pebble which we use as a local ACME server, see the section below and - https://github.com/jupyterhub/pebble-helm-chart. + and this specific port will route to the `pebble` pod's management API + where we can access paths like `/roots/0`. For more details about + Pebble which we use as a local ACME server, see the section below and + https://github.com/jupyterhub/pebble-helm-chart. -__Stop__ +**Stop** ```shell k3d delete @@ -172,7 +171,7 @@ Pebble is a an ACME server like Let's Encrypt solely meant for testing purposes. For more information, see [jupyterhub/pebble-helm-chart](https://github.com/jupyterhub/pebble-helm-chart). -__Install Pebble__ +**Install Pebble** ```shell helm repo add jupyterhub https://jupyterhub.github.io/helm-chart/ @@ -218,8 +217,7 @@ only rebuild images if their dependent files in their respective directories or helm upgrade --install jupyterhub ./jupyterhub --cleanup-on-fail --values dev-config.yaml ``` - Note that `--cleanup-on-fail` is a very good practice to avoid ` already exist` errors in future upgrades following a failed upgrade. + Note that `--cleanup-on-fail` is a very good practice to avoid ` already exist` errors in future upgrades following a failed upgrade. ## 5: Visit the JupyterHub @@ -261,7 +259,6 @@ and commit any changes. You can configure pre-commit to automatically run as a git hook, see the [pre-commit installation instructions](https://pre-commit.com/). - # Debugging Various things can go wrong while working with the local development @@ -323,8 +320,7 @@ development setup is `coredns`. Have you seen the hub pod get a restart count > 0? JupyterHub 1.1.0 is typically crashing after 20 seconds if it started up without the configurable proxy pod -available. This harmless error can be confirmed by doing a `kubectl logs -deploy/hub --previous` if you spot a message about a timeout after ~20 seconds in +available. This harmless error can be confirmed by doing a `kubectl logs deploy/hub --previous` if you spot a message about a timeout after ~20 seconds in the logs. ## Network errors @@ -346,7 +342,7 @@ recognize such issues if you get errors like the ones above. As you may notice, typical keywords associated with network errors are: -- *resolve host* -- *name resolution* -- *timeout* -- *no route to host* +- _resolve host_ +- _name resolution_ +- _timeout_ +- _no route to host_ diff --git a/README.md b/README.md index 0d4922efb3..ab00103b59 100644 --- a/README.md +++ b/README.md @@ -12,8 +12,7 @@ [![Gitter](https://img.shields.io/badge/social_chat-gitter-blue?logo=gitter)](https://gitter.im/jupyterhub/jupyterhub) [![Contribute](https://img.shields.io/badge/I_want_to_contribute!-grey?logo=jupyter)](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/blob/master/CONTRIBUTING.md) - -This repo contains a *Helm chart* for JupyterHub and a guide to use it. Together +This repo contains a _Helm chart_ for JupyterHub and a guide to use it. Together they allow you to make a JupyterHub available to a very large group of users such as the staff and students of a university. diff --git a/RELEASE.md b/RELEASE.md index c7f777078e..7a70bb8db5 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -45,10 +45,10 @@ Also the images we build are based on some image specified in the `FROM` stateme - [ ] [network-tools](images/network-tools/Dockerfile) - [ ] [singleuser-sample](images/singleuser-sample/Dockerfile) - ## Pre-release iteration - Update `CHANGELOG.md` + - [ ] Generate a list of PRs using [executablebooks/github-activity](https://github.com/executablebooks/github-activity) ```bash github-activity --output github-activity-output.md --since jupyterhub/zero-to-jupyterhub-k8s @@ -59,6 +59,7 @@ Also the images we build are based on some image specified in the `FROM` stateme - [ ] Summarize the release changes - Tag a x.y.z-beta.1 release + - [ ] Create and push a git tag ```bash git checkout master @@ -73,6 +74,7 @@ Also the images we build are based on some image specified in the `FROM` stateme ## Final release - Update `CHANGELOG.md` + - [ ] Generate a list of merged PRs and a list of contributors and update the changelog. ```bash github-activity --output github-activity-output.md --since jupyterhub/zero-to-jupyterhub-k8s @@ -80,7 +82,9 @@ Also the images we build are based on some image specified in the `FROM` stateme - [ ] Link out to the downstream projects within the JupyterHub org to celebrate work done there as well. - Release + - [ ] Create and push a git tag. + ```bash git checkout master git reset --hard /master @@ -89,7 +93,7 @@ Also the images we build are based on some image specified in the `FROM` stateme ``` - [ ] Create a GitHub release. - Visit the [release page](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/releases) and create a new release referencing the recent tag. Add a brief text like the one below. + Visit the [release page](https://github.com/jupyterhub/zero-to-jupyterhub-k8s/releases) and create a new release referencing the recent tag. Add a brief text like the one below. - Communicate - [ ] Update the beta release's discourse post. diff --git a/doc/source/administrator/advanced.md b/doc/source/administrator/advanced.md index adea598d70..e6e0df4c85 100644 --- a/doc/source/administrator/advanced.md +++ b/doc/source/administrator/advanced.md @@ -11,6 +11,7 @@ to use any of this information, but these topics are essential for more complex installations. (ingress)= + ## Ingress If you are using a Kubernetes Cluster that does not provide public IPs for @@ -68,9 +69,9 @@ and **google cloud's ingress controller**. annotations: kubernetes.io/tls-acme: "true" tls: - - hosts: - - - secretName: kubelego-tls-jupyterhub + - hosts: + - + secretName: kubelego-tls-jupyterhub ``` This should provision a certificate, and keep renewing it whenever it gets close @@ -102,7 +103,7 @@ Code. Some examples of things you can do: 3. Set traitlets for JupyterHub / Spawner / Authenticator that are not currently supported in the helm chart -Unfortunately, you have to write your python *in* your YAML file. There's no way +Unfortunately, you have to write your python _in_ your YAML file. There's no way to include a file in `config.yaml`. You can specify `hub.extraConfig` as a raw string (remember to use the `|` for multi-line @@ -163,9 +164,9 @@ In your `hub.extraConfig`, 3. `z2jh.get_config('custom.myDict')` will return a dict `{"key": "value"}` 4. `z2jh.get_config('custom.myLongString')` will return a string `"Line1\nLine2"` 5. `z2jh.get_config('custom.nonExistent')` will return `None` (since you didn't - specify any value for `nonExistent`) + specify any value for `nonExistent`) 6. `z2jh.get_config('custom.myDefault', True)` will return `True`, since that is - specified as the second parameter (default) + specified as the second parameter (default) You need to have a `import z2jh` at the top of your `extraConfig` for `z2jh.get_config()` to work. diff --git a/doc/source/administrator/authentication.md b/doc/source/administrator/authentication.md index 8a66e24e9c..79ae78cf58 100644 --- a/doc/source/administrator/authentication.md +++ b/doc/source/administrator/authentication.md @@ -131,10 +131,10 @@ so that users can authenticate with their GitHub username/password. To create OAuth credentials on GitHub, follow these steps: -- Click your profile picture -> settings -> developer settings -- Make sure you're on the "OAuth Apps" tab, then click "New OAuth App" -- Fill out the forms (you'll need your hub address) and generate your - ID/Secret. +- Click your profile picture -> settings -> developer settings +- Make sure you're on the "OAuth Apps" tab, then click "New OAuth App" +- Fill out the forms (you'll need your hub address) and generate your + ID/Secret. To enable GitHub authentication, your `config.yaml` should contain the following configuration: @@ -271,8 +271,8 @@ hub: #### Azure Active Directory [Azure Active Directory](https://docs.microsoft.com/en-us/azure/active-directory/) - is an identity provider from Microsoft Azure. Apart from needing a OAuth2 - _client id_ and _client secret_, you will also need a _tenant id_. +is an identity provider from Microsoft Azure. Apart from needing a OAuth2 +_client id_ and _client secret_, you will also need a _tenant id_. ```yaml hub: @@ -308,6 +308,7 @@ hub: JupyterHub: authenticator_class: auth0 ``` + #### GenericOAuthenticator - OpenID Connect [OpenID Connect](https://openid.net/connect) is an identity layer on top of the diff --git a/doc/source/administrator/cost.md b/doc/source/administrator/cost.md index b734965258..6a0bfc3819 100644 --- a/doc/source/administrator/cost.md +++ b/doc/source/administrator/cost.md @@ -17,9 +17,9 @@ on providers selected and your use cases. Below are several links to cost estimators for cloud providers: -* [Google Cloud Platform cost calculator](https://cloud.google.com/products/calculator/) -* [Amazon AWS cost calculator](https://calculator.s3.amazonaws.com/index.html) -* [Microsoft Azure cost claculator](https://azure.microsoft.com/en-us/pricing/calculator/) +- [Google Cloud Platform cost calculator](https://cloud.google.com/products/calculator/) +- [Amazon AWS cost calculator](https://calculator.s3.amazonaws.com/index.html) +- [Microsoft Azure cost claculator](https://azure.microsoft.com/en-us/pricing/calculator/) ## Factors influencing costs @@ -61,7 +61,7 @@ will users use JupyterHub at different times of day? The usage patterns and peak load on the system have important implications for the resources you need to provide. In the future JupyterHub will have auto-scaling functionality, but currently it does not. This means that you need -to provision resources for the *maximum* expected number of users at one time. +to provision resources for the _maximum_ expected number of users at one time. ## Examples @@ -80,5 +80,3 @@ of CPU and memory usage. Ryan Lovett put together a short Jupyter notebook needs. [estimating the cost for computational resources]: https://github.com/data-8/jupyterhub-k8s/blob/master/docs/cost-estimation/gce_budgeting.ipynb - - diff --git a/doc/source/administrator/debug.md b/doc/source/administrator/debug.md index 71629b58de..b429c4e19a 100644 --- a/doc/source/administrator/debug.md +++ b/doc/source/administrator/debug.md @@ -186,8 +186,7 @@ Kubernetes. `Error` or `CrashLoopBackoff` state, or appears to be running but accessing the website for the JupyterHub returns an error message in the browser). -**Investigating:** the output of `kubectl --namespace=jhub logs -hub...` shows something like: +**Investigating:** the output of `kubectl --namespace=jhub logs hub...` shows something like: ``` File "/usr/local/lib/python3.5/dist-packages/jupyterhub/proxy.py", line 589, in get_all_routes @@ -206,12 +205,14 @@ communicate with the proxy pod API, likely because of a problem in the ``` openssl rand -hex 32 ``` + 2. Add the token to `config.yaml` like so: ``` proxy: secretToken: '' ``` + 3. Redeploy the helm chart: ``` diff --git a/doc/source/administrator/optimization.md b/doc/source/administrator/optimization.md index 2ea06125aa..96c6895712 100644 --- a/doc/source/administrator/optimization.md +++ b/doc/source/administrator/optimization.md @@ -1,4 +1,5 @@ (optimization)= + # Optimizations This page contains information and guidelines for improving the reliability, @@ -7,16 +8,16 @@ described is only purposeful for a better autoscaling experience. To summarize, for a good autoscaling experience, we recommend you to: -- Enable the *continuous image puller*, to prepare added nodes for arriving +- Enable the _continuous image puller_, to prepare added nodes for arriving users. -- Enable *pod priority* and add *user placeholders*, to scale up nodes ahead of +- Enable _pod priority_ and add _user placeholders_, to scale up nodes ahead of real users' arrivals. -- Enable the *user scheduler*, to pack users tight on some nodes and let other +- Enable the _user scheduler_, to pack users tight on some nodes and let other nodes become empty and scaled down. -- Set up an autoscaling node pool and dedicate it to user pods by *tainting* the - node and requiring user pods, which *tolerate* the nodes' taint, to schedule +- Set up an autoscaling node pool and dedicate it to user pods by _tainting_ the + node and requiring user pods, which _tolerate_ the nodes' taint, to schedule on these nodes. This way, only user pods can then block scale down. -- Set appropriate user resource *requests* and *limits*, to allow a reasonable +- Set appropriate user resource _requests_ and _limits_, to allow a reasonable amount of users to share a node. A reasonable final configuration for efficient autoscaling could look something @@ -57,6 +58,7 @@ singleuser: ``` (pulling-images-before-users-arrive)= + ## Pulling images before users arrive If a user pod is scheduled on a node requesting a Docker image that isn't @@ -66,61 +68,62 @@ situations: 1. A new single-user image is introduced (`helm upgrade`) - With the *hook-image-puller* enabled (the default), the user images being - introduced will be pulled to the nodes before the hub pod is updated to - utilize the new image. The name hook-image-puller is a technical name - referring to how a [Helm - hook](https://helm.sh/docs/topics/charts_hooks/) is used to accomplish - this, a more informative name would have been *pre-upgrade-image-puller*. + With the _hook-image-puller_ enabled (the default), the user images being + introduced will be pulled to the nodes before the hub pod is updated to + utilize the new image. The name hook-image-puller is a technical name + referring to how a [Helm + hook](https://helm.sh/docs/topics/charts_hooks/) is used to accomplish + this, a more informative name would have been _pre-upgrade-image-puller_. - **NOTE**: With this enabled your `helm upgrade` will take a long time if you - introduce a new image as it will wait for the pulling to complete. We - recommend that you add `--timeout 10m0s` or similar to your `helm upgrade` - command to give it enough time. + **NOTE**: With this enabled your `helm upgrade` will take a long time if you + introduce a new image as it will wait for the pulling to complete. We + recommend that you add `--timeout 10m0s` or similar to your `helm upgrade` + command to give it enough time. - The hook-image-puller is enabled by default. To disable it, use the - following snippet in your `config.yaml`: + The hook-image-puller is enabled by default. To disable it, use the + following snippet in your `config.yaml`: - ```yaml - prePuller: - hook: - enabled: false - ``` + ```yaml + prePuller: + hook: + enabled: false + ``` 2. A node is added (Cluster Autoscaler) - The amount of nodes in a Kubernetes cluster can increase, either by manually - scaling up the cluster size or by a cluster autoscaler. As new nodes will - come fresh without any images on their disks, a user pod arriving to this - node will be forced to wait while the image is pulled. - - With the *continuous-image-puller* enabled (**enabled** by default), the user's - container image will be pulled when a new node is added. New nodes can for - example be added manually or by a cluster autoscaler. The continuous - image-puller uses a - [daemonset](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) - to force Kubernetes to pull the user image on all nodes as soon as a node is - present. - - The continuous-image-puller is enabled by default. To disable it, use the - following snippet in your `config.yaml`: - - ```yaml - prePuller: - continuous: - # NOTE: if used with a Cluster Autoscaler, also add user-placeholders - enabled: false - ``` - - It is important to realize that if the continuous-image-puller together with - a Cluster Autoscaler (CA) won't guarantee a reduced wait time for users. It - only helps if the CA scales up before real users arrive, but the CA will - generally fail to do so. This is because it will only add a node if one or - more pods won't fit on the current nodes but would fit more if a node is - added, but at that point users are already waiting. To scale up nodes ahead - of time we can use [user-placeholders](#scaling-up-in-time-user-placeholders). + The amount of nodes in a Kubernetes cluster can increase, either by manually + scaling up the cluster size or by a cluster autoscaler. As new nodes will + come fresh without any images on their disks, a user pod arriving to this + node will be forced to wait while the image is pulled. + + With the _continuous-image-puller_ enabled (**enabled** by default), the user's + container image will be pulled when a new node is added. New nodes can for + example be added manually or by a cluster autoscaler. The continuous + image-puller uses a + [daemonset](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) + to force Kubernetes to pull the user image on all nodes as soon as a node is + present. + + The continuous-image-puller is enabled by default. To disable it, use the + following snippet in your `config.yaml`: + + ```yaml + prePuller: + continuous: + # NOTE: if used with a Cluster Autoscaler, also add user-placeholders + enabled: false + ``` + + It is important to realize that if the continuous-image-puller together with + a Cluster Autoscaler (CA) won't guarantee a reduced wait time for users. It + only helps if the CA scales up before real users arrive, but the CA will + generally fail to do so. This is because it will only add a node if one or + more pods won't fit on the current nodes but would fit more if a node is + added, but at that point users are already waiting. To scale up nodes ahead + of time we can use [user-placeholders](#scaling-up-in-time-user-placeholders). (images-that-will-be-pulled)= + ### The images that will be pulled The hook-image-puller and the continuous-image-puller has various sources @@ -130,12 +133,14 @@ provided with the Helm chart (that can be overridden with `config.yaml`) under the following paths: #### Relevant image sources + - `singleuser.image` - `singleuser.profileList[].kubespawner_override.image` - `singleuser.extraContainers[].image` - `prePuller.extraImages.someName` #### Additional sources + - `singleuser.networkTools.image` - `prePuller.pause.image` @@ -165,17 +170,18 @@ prePuller: ``` (efficient-cluster-autoscaling)= + ## Efficient Cluster Autoscaling -A [*Cluster -Autoscaler*](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) +A [_Cluster +Autoscaler_](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) (CA) will help you add and remove nodes from the cluster. But the CA needs some help to function well. Without help, it will both fail to scale up before users arrive and scale down nodes aggressively enough without disrupting users. ### Scaling up in time (user placeholders) -A *Cluster Autoscaler* (CA) will add nodes when pods don't fit on available +A _Cluster Autoscaler_ (CA) will add nodes when pods don't fit on available nodes but would fit if another node is added. But, this may lead to a long waiting time for the pod, and as a pod can represent a user, it can lead to a long waiting time for a user. There are now options to address this. @@ -185,7 +191,7 @@ Preemption](https://kubernetes.io/docs/concepts/configuration/pod-priority-preem was introduced. This allows pods with higher priority to preempt / evict pods with lower priority if that would help the higher priority pod fit on a node. -This priority mechanism allows us to add dummy users or *user-placeholders* with +This priority mechanism allows us to add dummy users or _user-placeholders_ with low priority that can take up resources until a real user with (higher priority) requires it. At this time, the lower priority pod will get preempted to make room for the high priority pod. This now evicted user-placeholder will now be @@ -201,7 +207,6 @@ with adjusted resource requests as specified in `singleuser.profileList`. To use three user placeholders for example, that can do their job thanks to pod priority, add the following configuration: - ```yaml scheduling: podPriority: @@ -235,23 +240,23 @@ and some JupyterHub pods (without a permissive Consider for example that many users arrive to your JupyterHub during the daytime. New nodes are added by the CA. Some system pod ends up on the new nodes along with the user pods for some reason. At night when the -[*culler*](culling-user-pods) has removed many inactive +[_culler_](culling-user-pods) has removed many inactive pods from some nodes. They are now free from user pods but there is still a single system pod stopping the CA from removing the node. -To avoid these scale down failures, we recommend using a *dedicated node pool* +To avoid these scale down failures, we recommend using a _dedicated node pool_ for the user pods. That way, all the important system pods will run at one or a limited set of nodes, so the autoscaling user nodes can scale from 0 to X and back from X to 0. -This section about scaling down efficiently, will also explains how the *user -scheduler* can help you reduce the failures to scale down due to blocking user +This section about scaling down efficiently, will also explains how the _user +scheduler_ can help you reduce the failures to scale down due to blocking user pods. #### Using a dedicated node pool for users -To set up a dedicated node pool for user pods, we can use [*taints and -tolerations*](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). +To set up a dedicated node pool for user pods, we can use [_taints and +tolerations_](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/). If we add a taint to all the nodes in the node pool, and a toleration on the user pods to tolerate being scheduled on a tainted node, we have practically dedicated the node pool to be used only by user pods. @@ -261,43 +266,42 @@ following: 1. Setup a node pool (with autoscaling), a certain label, and a certain taint. - If you need help on how to do this, please refer to your cloud providers - documentation. A node pool may be called a node group. + If you need help on how to do this, please refer to your cloud providers + documentation. A node pool may be called a node group. - - The label: `hub.jupyter.org/node-purpose=user` + - The label: `hub.jupyter.org/node-purpose=user` - **NOTE**: Cloud providers often have their own labels, separate from - kubernetes labels, but this label must be a kubernetes label. + **NOTE**: Cloud providers often have their own labels, separate from + kubernetes labels, but this label must be a kubernetes label. - - The taint: `hub.jupyter.org/dedicated=user:NoSchedule` + - The taint: `hub.jupyter.org/dedicated=user:NoSchedule` - **NOTE**: You may need to replace `/` with `_` due cloud provider - limitations. Both taints are tolerated by the user pods. + **NOTE**: You may need to replace `/` with `_` due cloud provider + limitations. Both taints are tolerated by the user pods. 2. Make user pods require to be scheduled on the node pool setup above - If you don't require the user pods to schedule on their dedicated node, you - may fill up the nodes where the other software runs. This can cause a `helm - upgrade` command to fail. For example, you may have run out of resources for - non-user pods that cannot schedule on the autoscaling node pool as they need - during a rolling update. - - The default setting is to make user pods *prefer* to be scheduled on nodes - with the `hub.jupyter.org/node-purpose=user` label, but you can also make it - *required* using the configuration below. - - ```yaml - scheduling: - userPods: - nodeAffinity: - # matchNodePurpose valid options: - # - ignore - # - prefer (the default) - # - require - matchNodePurpose: require - ``` - -**NOTE**: If you end up *not* using a dedicated node pool for users and want to + If you don't require the user pods to schedule on their dedicated node, you + may fill up the nodes where the other software runs. This can cause a `helm upgrade` command to fail. For example, you may have run out of resources for + non-user pods that cannot schedule on the autoscaling node pool as they need + during a rolling update. + + The default setting is to make user pods _prefer_ to be scheduled on nodes + with the `hub.jupyter.org/node-purpose=user` label, but you can also make it + _required_ using the configuration below. + + ```yaml + scheduling: + userPods: + nodeAffinity: + # matchNodePurpose valid options: + # - ignore + # - prefer (the default) + # - require + matchNodePurpose: require + ``` + +**NOTE**: If you end up _not_ using a dedicated node pool for users and want to scale down efficiently, you will need to learn about PodDisruptionBudget resources and do quite a bit more work in order to avoid ending up with almost empty nodes not scaling down. @@ -307,10 +311,10 @@ empty nodes not scaling down. If you have users starting new servers while the total number of active users decreasing, how will you free up a node so it can be scaled down? -This is what the *user scheduler* helps you with. The user scheduler's only task -is to schedule new user pods to the *most utilized node*. This can be compared -to the *default scheduler* that instead always tries to schedule pods so the -*least utilized node*. Only the user scheduler would allow the underutilized +This is what the _user scheduler_ helps you with. The user scheduler's only task +is to schedule new user pods to the _most utilized node_. This can be compared +to the _default scheduler_ that instead always tries to schedule pods so the +_least utilized node_. Only the user scheduler would allow the underutilized nodes to free up over time as the total amount of users decrease but a few users still arrive. @@ -336,7 +340,7 @@ scheduling: **NOTE**: For the user scheduler to work well, you need old user pods to shut down at some point. Make sure to properly configure the -[*culler*](culling-user-pods). +[_culler_](culling-user-pods). ## Balancing "guaranteed" vs "maximum" memory and CPU @@ -357,7 +361,7 @@ Using resource _limits_ and _guarantees_, you can use your cloud resources more The ratio of these two numbers is the _limit to guarantee ratio_. In the above case, your _limit to guarantee ratio_ is `1:1`. -If you set a *guarantee* of 1GB and a *limit* of 20GB then you have a _limit to guarantee ratio_ of `20:1`. +If you set a _guarantee_ of 1GB and a _limit_ of 20GB then you have a _limit to guarantee ratio_ of `20:1`. Your node will fit many more users on average. When a user starts a session, if there is at least 1GB of RAM available on the node then their session will start there. If not, a new node will be created (and your costs just went up). @@ -369,7 +373,7 @@ Uh oh, we are now well over the 100GB limit, and user sessions will start crashi This is what happens when your _limit to guarantee ratio_ is too big. The problem? Your user's behavior was not the right fit for your _limit to guarantee ratio_. -You should *increase* the guaranteed amount of RAM for each user, so that in general fewer users will be on a given node, and they are less-likely to saturate that node's memory by asking for RAM all at once. +You should _increase_ the guaranteed amount of RAM for each user, so that in general fewer users will be on a given node, and they are less-likely to saturate that node's memory by asking for RAM all at once. Choosing the right _limit to guarantee ratio_ ratio is an art, not a science. We suggest **starting with a ratio of 2 to 1** and adjusting from there based on whether you run into problems on your hub. diff --git a/doc/source/administrator/security.md b/doc/source/administrator/security.md index 240949f15c..de929ba7b0 100644 --- a/doc/source/administrator/security.md +++ b/doc/source/administrator/security.md @@ -16,6 +16,7 @@ If you prefer to encrypt your security reports, you can use [this PGP public key](https://ipython.org/ipython-doc/2/_downloads/ipython_security.asc). (https)= + ## HTTPS This section describes how to enable HTTPS on your JupyterHub. The easiest way to do so is by using [Let's Encrypt](https://letsencrypt.org/), though we'll also cover how to set up your own HTTPS credentials. For more information @@ -32,6 +33,7 @@ on HTTPS security see the certificates section of [this blog post](https://blog. It is important that you wait - prematurely going to the next step might cause problems! (setup-automatic-https)= + ### Set up automatic HTTPS JupyterHub uses [Let's Encrypt](https://letsencrypt.org/) to automatically create @@ -55,7 +57,8 @@ changes to your `config.yaml` file: 2. Apply the config changes by running `helm upgrade ...` 3. Wait for about a minute, now your hub should be HTTPS enabled! -*** +--- + **NOTE:** If the proxy service is of type `LoadBalancer`, which it is by default, then a specific static IP address can be requested (if available) instead of a dynamically acquired one. @@ -64,15 +67,17 @@ This ensures the same IP address for multiple deployments. The IP can be provided like: ```yaml - proxy: - service: - loadBalancerIP: xxx.xxx.xxx.xxx +proxy: + service: + loadBalancerIP: xxx.xxx.xxx.xxx ``` More info about this can be found on the [Configuration Reference](helm-chart-configuration-reference) page. -*** + +--- (setup-manual-https)= + ### Set up manual HTTPS If you have your own HTTPS certificates & want to use those instead of the automatically provisioned Let's Encrypt ones, that's also possible. Note that this is considered an advanced option, so we recommend not doing it unless you have good reasons. @@ -99,9 +104,8 @@ There are two ways to specify your manual certificate, directly in the config.ya -----END CERTIFICATE----- ``` -2. Apply the config changes by running helm upgrade .... -3. Wait for about a minute, now your hub should be HTTPS enabled! - +2. Apply the config changes by running helm upgrade .... +3. Wait for about a minute, now your hub should be HTTPS enabled! #### Specify certificate through Secret resource @@ -111,16 +115,16 @@ There are two ways to specify your manual certificate, directly in the config.ya 2. Add your domain and the name of your `secret` to your config.yaml. - ```yaml - proxy: - https: - enabled: true - hosts: - - - type: secret - secret: - name: example-tls - ``` + ```yaml + proxy: + https: + enabled: true + hosts: + - + type: secret + secret: + name: example-tls + ``` 3. Apply the config changes by running helm upgrade .... 4. Wait for about a minute, now your hub should be HTTPS enabled! @@ -170,7 +174,6 @@ Helm 3 supports the security, identity, and authorization features of modern Kub Read more about organizing cluster access using kubeconfig files in the [Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/). - ## Delete the Kubernetes Dashboard The [Kubernetes Dashboard](https://github.com/kubernetes/dashboard) gets created by default in many installations. Although the Dashboard contains useful information, the Dashboard also poses a security risk. We **recommend** deleting it and not using it for the time being until the Dashboard becomes properly securable. @@ -188,6 +191,7 @@ kubectl --namespace=kube-system delete rc kubernetes-dashboard ``` (rbac)= + ## Use Role Based Access Control (RBAC) Kubernetes supports, and often requires, using [Role Based Access Control (RBAC)](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) @@ -307,7 +311,7 @@ You can enable or disable enforcement of each network policy in config.yaml: ```yaml hub: networkPolicy: - enabled: true # or false to disable + enabled: true # or false to disable proxy: networkPolicy: enabled: true @@ -349,10 +353,10 @@ Here is an example set of labels granting access to all jupyterhub components metadata: name: my-service labels: - hub.jupyter.org/network-access-hub: "true" # access the hub api - hub.jupyter.org/network-access-proxy-http: "true" # access proxy public http endpoint - hub.jupyter.org/network-access-proxy-api: "true" # access proxy api - hub.jupyter.org/network-access-singleuser: "true" # access single-user servers directly + hub.jupyter.org/network-access-hub: "true" # access the hub api + hub.jupyter.org/network-access-proxy-http: "true" # access proxy public http endpoint + hub.jupyter.org/network-access-proxy-api: "true" # access proxy api + hub.jupyter.org/network-access-singleuser: "true" # access single-user servers directly ``` You can also add additional `ingress` rules to each network policy in your `config.yaml`. diff --git a/doc/source/administrator/troubleshooting.md b/doc/source/administrator/troubleshooting.md index 9cd11ac886..9b9194f14d 100644 --- a/doc/source/administrator/troubleshooting.md +++ b/doc/source/administrator/troubleshooting.md @@ -24,5 +24,3 @@ managing cost with JupyterHub, see {ref}`cost`. Each Helm Chart is packaged with a specific version of JupyterHub (and other software as well). See see the [Helm Chart repository](https://github.com/jupyterhub/helm-chart#release-notes) for information about the versions of relevant software packages. - - diff --git a/doc/source/administrator/upgrading.md b/doc/source/administrator/upgrading.md index de0a64075c..01bd385bad 100644 --- a/doc/source/administrator/upgrading.md +++ b/doc/source/administrator/upgrading.md @@ -93,10 +93,10 @@ will be performed automatically when you do a `helm upgrade`. db: upgrade: true ``` + 4. Do a [`helm upgrade`](#upgrade-command). This should perform the database upgrade needed. 5. Remove the lines added in step 3, and do another [`helm upgrade`](#upgrade-command). - ### Custom Docker Images: JupyterHub version match If you are using a custom built image, make sure that the version of the diff --git a/doc/source/index.md b/doc/source/index.md index 2a119fd445..81a8578662 100644 --- a/doc/source/index.md +++ b/doc/source/index.md @@ -40,20 +40,20 @@ the Helm references in this documentation are Helm v3. This guide will help you deploy and customize your own JupyterHub on a cloud. While doing this, you will gain valuable experience with: -- **A cloud provider** such as Google Cloud, Microsoft Azure, Amazon - EC2, IBM Cloud\... -- **Kubernetes** to manage resources on the cloud -- **Helm v3** to configure and control the packaged JupyterHub - installation -- **JupyterHub** to give users access to a Jupyter computing - environment -- **A terminal interface** on some operating system +- **A cloud provider** such as Google Cloud, Microsoft Azure, Amazon + EC2, IBM Cloud\... +- **Kubernetes** to manage resources on the cloud +- **Helm v3** to configure and control the packaged JupyterHub + installation +- **JupyterHub** to give users access to a Jupyter computing + environment +- **A terminal interface** on some operating system It\'s also possible you end up getting some experience with: -- **Docker** to build customized image for the users -- **Domain registration** to make the hub available at - +- **Docker** to build customized image for the users +- **Domain registration** to make the hub available at + ```{admonition} Note For a more elaborate introduction to the tools and services that @@ -73,7 +73,7 @@ kubernetes/index ## Setup JupyterHub -This tutorial starts from *Step Zero: Your Kubernetes cluster* and +This tutorial starts from _Step Zero: Your Kubernetes cluster_ and describes the steps needed for you to create a complete initial JupyterHub deployment. Please ensure you have a working installation of Kubernetes and Helm before proceeding with this section. @@ -141,7 +141,7 @@ This guide and the associated helm chart would not be possible without the amazing institutional support from the following organizations (and the organizations that support them!) -- [UC Berkeley Data Science Division](https://data.berkeley.edu/) -- [Berkeley Institute for Data Science](https://bids.berkeley.edu/) -- [Cal Poly, San Luis Obispo](https://www.calpoly.edu/) -- [Simula Research Institute](https://www.simula.no/) +- [UC Berkeley Data Science Division](https://data.berkeley.edu/) +- [Berkeley Institute for Data Science](https://bids.berkeley.edu/) +- [Cal Poly, San Luis Obispo](https://www.calpoly.edu/) +- [Simula Research Institute](https://www.simula.no/) diff --git a/doc/source/jupyterhub/customizing/extending-jupyterhub.md b/doc/source/jupyterhub/customizing/extending-jupyterhub.md index 586b405366..4a9658e8d1 100644 --- a/doc/source/jupyterhub/customizing/extending-jupyterhub.md +++ b/doc/source/jupyterhub/customizing/extending-jupyterhub.md @@ -18,7 +18,7 @@ The general method to modify your Kubernetes deployment is to: ``` RELEASE=jhub NAMESPACE=jhub - + helm upgrade --cleanup-on-fail \ $RELEASE jupyterhub/jupyterhub \ --namespace $NAMESPACE \ @@ -27,12 +27,13 @@ The general method to modify your Kubernetes deployment is to: ``` Note that `helm list` should display `` if you forgot it. -3. Verify that the *hub* and *proxy* pods entered the `Running` state after + +3. Verify that the _hub_ and _proxy_ pods entered the `Running` state after the upgrade completed. ``` NAMESPACE=jhub - + kubectl get pod --namespace $NAMESPACE ``` diff --git a/doc/source/jupyterhub/customizing/user-environment.md b/doc/source/jupyterhub/customizing/user-environment.md index 362b7c347b..63f9e7bf95 100644 --- a/doc/source/jupyterhub/customizing/user-environment.md +++ b/doc/source/jupyterhub/customizing/user-environment.md @@ -6,7 +6,7 @@ This page contains instructions for common ways to enhance the user experience. For a list of all the configurable Helm chart options, see the {ref}`helm-chart-configuration-reference`. -The *user environment* is the set of software packages, environment variables, +The _user environment_ is the set of software packages, environment variables, and various files that are present when the user logs into JupyterHub. The user may also see different tools that provide interfaces to perform specialized tasks, such as JupyterLab, RStudio, RISE and others. @@ -36,7 +36,6 @@ image containing useful tools and libraries for datascience, complete these step 1. Modify your `config.yaml` file to specify the image. For example: ```yaml - singleuser: image: # Get the latest image tag at: @@ -45,7 +44,6 @@ image containing useful tools and libraries for datascience, complete these step # https://github.com/jupyter/docker-stacks/tree/master/datascience-notebook/Dockerfile name: jupyter/datascience-notebook tag: 177037d09156 - ``` ```{note} @@ -55,13 +53,15 @@ image containing useful tools and libraries for datascience, complete these step `latest` as it might cause a several minute delay, confusion, or failures for users when a new version of the image is released. ``` + 2. Apply the changes by following the directions listed in {ref}`apply the changes `. - If you have configured *prePuller.hook.enabled*, all the nodes in your + If you have configured _prePuller.hook.enabled_, all the nodes in your cluster will pull the image before the hub is upgraded to let users use the image. The image pulling may take several minutes to complete, depending on the size of the image. + 3. Restart your server from JupyterHub control panel if you are already logged in. ```{note} @@ -99,7 +99,7 @@ If you are missing something in the image that you would like all users to have, we recommend that you build a new image on top of an existing Docker image from jupyter/docker-stacks. -Below is an example {term}`Dockerfile` building on top of the *minimal-notebook* +Below is an example {term}`Dockerfile` building on top of the _minimal-notebook_ image. This file can be built to a {term}`Docker image`, and pushed to a {term}`image registry`, and finally configured in {term}`config.yaml` to be used by the Helm chart. @@ -125,8 +125,7 @@ details on this. ## Set environment variables -One way to affect your user's environment is by setting {term}`environment -variables`. While you can set them up in your Docker image if you build it +One way to affect your user's environment is by setting {term}`environment variables`. While you can set them up in your Docker image if you build it yourself, it is often easier to configure your Helm chart through values provided in your {term}`config.yaml`. @@ -163,7 +162,7 @@ directory. In practice this means that everything a user writes to the home directory (`/home/jovyan`) will remain, and everything else will be reset in between server restarts. -A server can be shut down by *culling*. By default, JupyterHub's culling service +A server can be shut down by _culling_. By default, JupyterHub's culling service is configured to cull a users server that has been inactive for one hour. Note that JupyterLab will autosave files, and as long as the file was within the users home directory no work is lost. @@ -201,15 +200,15 @@ multiple commands. ```yaml singleuser: - lifecycleHooks: - postStart: - exec: - command: - - "sh" - - "-c" - - > - cp -r /tmp/foo /home/jovyan; - cp -r /tmp/bar /home/jovyan + lifecycleHooks: + postStart: + exec: + command: + - "sh" + - "-c" + - > + cp -r /tmp/foo /home/jovyan; + cp -r /tmp/bar /home/jovyan ``` Keep in mind that commands will be run **each time** a user starts @@ -222,7 +221,7 @@ your user folders with a git repository. We recommend using the tool [nbgitpuller](https://github.com/jupyterhub/nbgitpuller) to synchronize a folder in your user's filesystem with a `git` repository whenever a user -starts their server. This synchronization can also be triggered by +starts their server. This synchronization can also be triggered by letting a user visit a link like `https://your-domain.com/hub/user-redirect/git-pull?repo=https://github.com/data-8/materials-fa18` (e.g., as alternative start url). @@ -237,7 +236,13 @@ singleuser: lifecycleHooks: postStart: exec: - command: ["gitpuller", "https://github.com/data-8/materials-fa17", "master", "materials-fa"] + command: + [ + "gitpuller", + "https://github.com/data-8/materials-fa17", + "master", + "materials-fa", + ] ``` This will synchronize the master branch of the repository to a folder called diff --git a/doc/source/jupyterhub/customizing/user-management.md b/doc/source/jupyterhub/customizing/user-management.md index 62a73b6b11..8c30483348 100644 --- a/doc/source/jupyterhub/customizing/user-management.md +++ b/doc/source/jupyterhub/customizing/user-management.md @@ -4,14 +4,16 @@ This section describes management of users and their permissions on JupyterHub. (culling-user-pods)= + ## Culling user pods + JupyterHub will automatically delete any user pods that have no activity for a period of time. This helps free up computational resources and keeps costs down if you are using an autoscaling cluster. When these users navigate back to your JupyterHub, they will have to start their server again, and the state of their previous session (variables they've created, any in-memory data, etc) -will be lost. This is known as *culling*. +will be lost. This is known as _culling_. ```{note} In JupyterHub, "inactivity" is defined as no response from the user's @@ -29,7 +31,7 @@ cull: By default, JupyterHub will run the culling process every ten minutes and will cull any user pods that have been inactive for more than one hour. -You can configure this behavior in your ``config.yaml`` file with the following +You can configure this behavior in your `config.yaml` file with the following fields: ```yaml diff --git a/doc/source/jupyterhub/customizing/user-resources.md b/doc/source/jupyterhub/customizing/user-resources.md index 3d3c4a228f..ff6eb7ab50 100644 --- a/doc/source/jupyterhub/customizing/user-resources.md +++ b/doc/source/jupyterhub/customizing/user-resources.md @@ -24,23 +24,23 @@ groups improves the user experience for all Hub users. ## Set user memory and CPU guarantees / limits Each user on your JupyterHub gets a slice of memory and CPU to use. There are -two ways to specify how much users get to use: resource *guarantees* and -resource *limits*. +two ways to specify how much users get to use: resource _guarantees_ and +resource _limits_. -A resource *guarantee* means that all users will have *at least* this resource +A resource _guarantee_ means that all users will have _at least_ this resource available at all times, but they may be given more resources if they're -available. For example, if users are *guaranteed* 1G of RAM, users can +available. For example, if users are _guaranteed_ 1G of RAM, users can technically use more than 1G of RAM if these resources aren't being used by other users. -A resource *limit* sets a hard limit on the resources available. In the example +A resource _limit_ sets a hard limit on the resources available. In the example above, if there were a 1G memory limit, it would mean that users could use no more than 1G of RAM, no matter what other resources are being used on the machines. -By default, each user is *guaranteed* 1G of RAM. All users have *at least* 1G, +By default, each user is _guaranteed_ 1G of RAM. All users have _at least_ 1G, but they can technically use more if it is available. You can easily change the -amount of these resources, and whether they are a *guarantee* or a *limit*, by +amount of these resources, and whether they are a _guarantee_ or a _limit_, by changing your `config.yaml` file. This is done with the following structure. ```yaml @@ -116,14 +116,14 @@ The following configuration will increase the SHM allocation by mounting a ```yaml singleuser: - storage: - extraVolumes: - - name: shm-volume - emptyDir: - medium: Memory - extraVolumeMounts: - - name: shm-volume - mountPath: /dev/shm + storage: + extraVolumes: + - name: shm-volume + emptyDir: + medium: Memory + extraVolumeMounts: + - name: shm-volume + mountPath: /dev/shm ``` The volume `shm-volume` will be created when the user's pod is created, diff --git a/doc/source/jupyterhub/customizing/user-storage.md b/doc/source/jupyterhub/customizing/user-storage.md index 9ade6b712f..7ea48fad15 100644 --- a/doc/source/jupyterhub/customizing/user-storage.md +++ b/doc/source/jupyterhub/customizing/user-storage.md @@ -1,4 +1,5 @@ (user-storage)= + # Customizing User Storage For the purposes of this guide, we'll describe "storage" as @@ -13,8 +14,8 @@ JupyterHub uses Kubernetes to manage user storage. There are two primary Kubernetes objects involved in allocating storage to pods: -* A `PersistentVolumeClaim` (`PVC`) specifies what kind of storage is required. Its configuration is specified in your `config.yaml` file. -* A `PersistentVolume` (`PV`) is the actual volume where the user's data resides. It is created by Kubernetes using details in a `PVC`. +- A `PersistentVolumeClaim` (`PVC`) specifies what kind of storage is required. Its configuration is specified in your `config.yaml` file. +- A `PersistentVolume` (`PV`) is the actual volume where the user's data resides. It is created by Kubernetes using details in a `PVC`. As Kubernetes objects, they can be queried with the standard `kubectl` commands (e.g., `kubectl --namespace= get pvc`) @@ -40,7 +41,7 @@ under-the-hood and automatically when a user logs in. deleted unless the `PersistentVolumeClaim` is explicitly deleted by the JupyterHub administrator. When a user shuts down their server, their user pod is deleted and their volume is -detached from the pod, *but the `PVC` and `PV` objects still exist*. +detached from the pod, _but the `PVC` and `PV` objects still exist_. In the future, when the user logs back in, JupyterHub will detect that the user has a pre-existing `PVC` and will simply attach it to their new pod, rather than creating a new `PVC`. @@ -77,7 +78,7 @@ demonstrate here how to configure those. Note that new `PVC`s for pre-existing users will **not** be created unless the old ones are destroyed. If you update your -users' `PVC` config via `config.yaml`, then any **new** users will +users' `PVC` config via `config.yaml`, then any **new** users will have the new `PVC` created for them, but **old** users will not. To force an upgrade of the storage type for old users, you will need to manually delete their `PVC` (e.g. @@ -148,8 +149,8 @@ from the commandline. The [Kubernetes Docs](https://kubernetes.io/docs/concepts/ have more information on what the various fields mean. The most important field is `parameters.type`, which specifies the type of storage you wish to use. The two options are: -* `pd-ssd` makes `StorageClass` provision SSDs. -* `pd-standard` will provision non-SSD disks. +- `pd-ssd` makes `StorageClass` provision SSDs. +- `pd-standard` will provision non-SSD disks. Once you have created this `StorageClass`, you can configure your JupyterHub's `PVC` template with the following in your `config.yaml`: @@ -196,7 +197,6 @@ singleuser: Next {ref}`apply the changes `. - After the changes are applied, new users will no longer be allocated a persistent `$HOME` directory. Any currently running users will still have access to their storage until their server is restarted. You might have to diff --git a/doc/source/jupyterhub/installation.md b/doc/source/jupyterhub/installation.md index 3e0093d356..57c1f7900c 100644 --- a/doc/source/jupyterhub/installation.md +++ b/doc/source/jupyterhub/installation.md @@ -2,8 +2,7 @@ # Installing JupyterHub -Now that we have a {doc}`Kubernetes cluster ` and {doc}`Helm -` setup, we can proceed by using Helm to install JupyterHub +Now that we have a {doc}`Kubernetes cluster ` and {doc}`Helm ` setup, we can proceed by using Helm to install JupyterHub and related {term}`Kubernetes resources ` using a {term}`Helm chart`. @@ -14,25 +13,25 @@ configuration file that we will refer to as `config.yaml`. It will contain the m {term}`Helm values` to be provided to a JupyterHub {term}`Helm chart` developed specifically together with this guide. -Helm charts contains {term}`templates -` that with provided values will render to {term}`Kubernetes -resources ` to be installed in a Kubernetes cluster. This +Helm charts contains {term}`templates ` that with provided values will render to {term}`Kubernetes resources ` to be installed in a Kubernetes cluster. This config file will provide the values to be used by our Helm chart. 1. Generate a random hex string representing 32 bytes to use as a security token. Run this command in a terminal and copy the output: ```{code-block} bash - + openssl rand -hex 32 ``` + 2. Create and start editing a file called `config.yaml`. In the code snippet below we start the widely available [nano editor](https://en.wikipedia.org/wiki/GNU_nano), but any editor will do. ``` nano config.yaml ``` + 3. Write the following into the `config.yaml` file but instead of writing `` paste the generated hex string you copied in step 1. @@ -43,6 +42,7 @@ config file will provide the values to be used by our Helm chart. It is common practice for Helm and Kubernetes YAML files to indent using two spaces. + 4. Save the `config.yaml` file. In the nano editor this is done by pressing **CTRL+X** or **CMD+X** followed by a confirmation to save the changes. @@ -70,6 +70,7 @@ security issue. ...Successfully got an update from the "jupyterhub" chart repository Update Complete. ⎈ Happy Helming!⎈ ``` + 2. Now install the chart configured by your `config.yaml` by running this command from the directory that contains your `config.yaml`: @@ -78,7 +79,7 @@ security issue. # free to use different values. RELEASE=jhub NAMESPACE=jhub - + helm upgrade --cleanup-on-fail \ --install $RELEASE jupyterhub/jupyterhub \ --namespace $NAMESPACE \ @@ -102,23 +103,22 @@ security issue. * This step may take a moment, during which time there will be no output to your terminal. JupyterHub is being installed in the background. * If you get a `release named already exists` error, - then you should delete the release by running `helm delete - `. Then reinstall by repeating this step. If it + then you should delete the release by running `helm delete `. Then reinstall by repeating this step. If it persists, also do `kubectl delete namespace ` and try again. - * In general, if something goes *wrong* with the install step, delete the + * In general, if something goes _wrong_ with the install step, delete the Helm release by running `helm delete ` before re-running the install command. * If you're pulling from a large Docker image you may get a `Error: timed out waiting for the condition` error, add a - `--timeout=ms` parameter to the `helm - install` command. - * The `--version` parameter corresponds to the *version of the Helm - chart*, not the version of JupyterHub. Each version of the JupyterHub + `--timeout=ms` parameter to the `helm install` command. + * The `--version` parameter corresponds to the _version of the Helm + chart_, not the version of JupyterHub. Each version of the JupyterHub Helm chart is paired with a specific version of JupyterHub. E.g., `0.7.0` of the Helm chart runs JupyterHub `0.9.2`. For a list of which JupyterHub version is installed in each version of the Z2JH Helm Chart, see the [Helm Chart repository](https://github.com/jupyterhub/helm-chart#release-notes). + 3. While Step 2 is running, you can see the pods being created by entering in a different terminal: @@ -132,13 +132,15 @@ security issue. ``` kubectl config set-context $(kubectl config current-context) --namespace ${NAMESPACE:-jhub} ``` -4. Wait for the *hub* and *proxy* pod to enter the `Running` state. + +4. Wait for the _hub_ and _proxy_ pod to enter the `Running` state. ``` NAME READY STATUS RESTARTS AGE hub-5d4ffd57cf-k68z8 1/1 Running 0 37s proxy-7cb9bc4cc-9bdlp 1/1 Running 0 37s ``` + 5. Find the IP we can use to access the JupyterHub. Run the following command until the `EXTERNAL-IP` of the `proxy-public` [service](https://kubernetes.io/docs/concepts/services-networking/service/) is available like in the example output. @@ -161,16 +163,15 @@ security issue. kubectl describe service proxy-public --namespace jhub ``` -7. To use JupyterHub, enter the external IP for the `proxy-public` service in - to a browser. JupyterHub is running with a default *dummy* authenticator so +6. To use JupyterHub, enter the external IP for the `proxy-public` service in + to a browser. JupyterHub is running with a default _dummy_ authenticator so entering any username and password combination will let you enter the hub. -Congratulations! Now that you have basic JupyterHub running, you can {ref}`extend it -` and {ref}`optimize it ` in many +Congratulations! Now that you have basic JupyterHub running, you can {ref}`extend it ` and {ref}`optimize it ` in many ways to meet your needs. Some examples of customizations are: -* Configure the login to use the account that makes sense to you (Google, GitHub, etc.). -* Use a suitable pre-built image for the user container or build your own. -* Host it on . +- Configure the login to use the account that makes sense to you (Google, GitHub, etc.). +- Use a suitable pre-built image for the user container or build your own. +- Host it on . diff --git a/doc/source/jupyterhub/uninstall.md b/doc/source/jupyterhub/uninstall.md index bb2a5965e0..2d387e90c0 100644 --- a/doc/source/jupyterhub/uninstall.md +++ b/doc/source/jupyterhub/uninstall.md @@ -29,12 +29,12 @@ before doing the cloud provider specific setup. ```bash helm delete ``` - + `` is the name provided to `helm upgrade` when initially setting up the hub. If you had forgotten what you used, you can run `helm list` to find all the release names in your cluster. You can also see the `namespace` value here that will be used in the next step. - + 2. Next, delete the Kubernetes namespace the hub was installed in. This deletes any disks that may have been created to store user's data, and any IP addresses that may have been provisioned. @@ -59,6 +59,7 @@ before doing the cloud provider specific setup. ``` gcloud container clusters delete --zone= ``` + 3. Double check to make sure all the resources are now deleted, since anything you have not deleted will cost you money! You can check the [web console](https://console.cloud.google.com) (make sure you are in the right project and account) to verify that everything @@ -94,6 +95,7 @@ before doing the cloud provider specific setup. Be careful to delete the correct Resource Group, as doing so will irreversibly delete all resources within the group! + 3. Double check to make sure all the resources are now deleted, since anything you have not deleted will cost you money! You can check the [web portal](https://portal.azure.com) (check the "Resource Groups" page) to verify that everything has been deleted. @@ -110,10 +112,10 @@ before doing the cloud provider specific setup. ```bash kops delete cluster --yes - + # Leave CI host exit - + # Terminate CI host aws ec2 stop-instances --instance-ids aws ec2 terminate-instances --instance-ids diff --git a/doc/source/kubernetes/amazon/efs_storage.md b/doc/source/kubernetes/amazon/efs_storage.md index fff61908a6..e2a9e793f0 100644 --- a/doc/source/kubernetes/amazon/efs_storage.md +++ b/doc/source/kubernetes/amazon/efs_storage.md @@ -1,32 +1,32 @@ --- orphan: true - --- (amazon-efs)= # Setting up EFS storage on AWS -ElasticFileSystem is distributed file system which speaks the NFS protocol. It is rumored to be a GlusterFS fork behind the scenes at AWS. +ElasticFileSystem is distributed file system which speaks the NFS protocol. It is rumored to be a GlusterFS fork behind the scenes at AWS. Drawbacks: -* Setting permissions on persistent volumes is not nailed down in the Kubernetes spec yet. This adds some complications we will discuss later. -* A crafty user may be able to contact the EFS server directly and read other user's files depending on how the system is setup. +- Setting permissions on persistent volumes is not nailed down in the Kubernetes spec yet. This adds some complications we will discuss later. +- A crafty user may be able to contact the EFS server directly and read other user's files depending on how the system is setup. Procedure: 1. Setting up an EFS volume - Go through the EFS setup wizard in AWS (in the future this part may be scripted). The new EFS volume must be in - the same VPC as your cluster. This can be changed in the AWS settings after it has been created. + Go through the EFS setup wizard in AWS (in the future this part may be scripted). The new EFS volume must be in + the same VPC as your cluster. This can be changed in the AWS settings after it has been created. - Next, create a new security group for NFS traffic (target other instances in that group). Add a rule for incoming NFS traffic to the node security group and to the master security group. Change the EFS volume to use that security group. + Next, create a new security group for NFS traffic (target other instances in that group). Add a rule for incoming NFS traffic to the node security group and to the master security group. Change the EFS volume to use that security group. To verify that your EFS volume is working correctly, ssh into one of the master nodes and su to root. Next, follow the steps on the EFS console page for mounting your NFS volume. The DNS entry may take a few minutes to show up. Once the mount succeeds, unmount it and disconnect from the admin node. + 2. Configuring Kubernetes to understand your EFS volume Create test_efs.yaml: @@ -62,22 +62,22 @@ Procedure: storage: 11Gi ``` - The sizes in these files are misleading. There is no quota enforced with EFS. In the + The sizes in these files are misleading. There is no quota enforced with EFS. In the future we want to set the efs PersistentVolume size to something ridiculously large - like 8EiB and the PersistentVolumeClaim to 10GB. As far as we know at the moment, these sizes don't matter. + like 8EiB and the PersistentVolumeClaim to 10GB. As far as we know at the moment, these sizes don't matter. - A PersistentVolume defines a service which can perform a mount inside of a container. The + A PersistentVolume defines a service which can perform a mount inside of a container. The PersistentVolumeClaim is a way of reserving a portion of the PersistentVolume and potentially locking access to it. - The storageClassName setting looks innocuous, but it is incredibly critical. The only non storage - class PV in the cluster is the one we defined above. In the future we should tag different PV's + The storageClassName setting looks innocuous, but it is incredibly critical. The only non storage + class PV in the cluster is the one we defined above. In the future we should tag different PV's and use tag filters in the PVC instead of relying on a default of "". - We are going to configure jupyterhub to use the same "static" claim among all of the containers. This + We are going to configure jupyterhub to use the same "static" claim among all of the containers. This means that all of our users will be using the same EFS share which should be able to scale as high as we need. - This part is a little different than the standard guide. We need to create these PV's and PVC's in the + This part is a little different than the standard guide. We need to create these PV's and PVC's in the namespace that our app will live in. Choose a namespace (this will be the same as the namespace you will use in the helm install step later on) @@ -89,7 +89,8 @@ Procedure: kubectl --namespace= apply -f test_efs_claim.yaml ``` - I don't know if the PV needs to be in the namespace, but the arg does not seem to hurt anything. The PVC must be in the namespace or stuff will break in weird ways. + I don't know if the PV needs to be in the namespace, but the arg does not seem to hurt anything. The PVC must be in the namespace or stuff will break in weird ways. + 3. Configuring your application to use EFS as it's backing storage We now add the following to config.yaml: @@ -103,32 +104,32 @@ Procedure: type: "static" static: pvcName: "efs-persist" - subPath: 'home/{username}' + subPath: "home/{username}" extraEnv: - CHOWN_HOME: 'yes' + CHOWN_HOME: "yes" uid: 0 fsGid: 0 cmd: "start-singleuser.sh" ``` The image setting overrides the default pinned jh base image since it has not yet been updated - to include the CHOWN_HOME setting. This will be fixed in Z2JH 0.7. + to include the CHOWN_HOME setting. This will be fixed in Z2JH 0.7. type static tells jh not to use a storage class and instead use a PVC defined below. pvcName matches the claim name we specified before - subPath tells where on the supplied storage the mount point should be. In this case it will + subPath tells where on the supplied storage the mount point should be. In this case it will be "$EFS_ROOT/home/{username}" It turns out there is a bug in jupyterhub where the default subPath does not work, and setting the subPath to "{username}" breaks in the same way. The extraEnv section set's environmental variables before trying to start jupyterhub inside of the user's - container. CHOWN_HOME is needed to force the ownership change of the home directory. + container. CHOWN_HOME is needed to force the ownership change of the home directory. Kubernetes is still conflicted if a uid and a gid should be passed in to change how the directory is mounted - inside of the container. What we do for now is auto-chown the directory before jupyterhub has been started. + inside of the container. What we do for now is auto-chown the directory before jupyterhub has been started. - The UID/fsGID is necessary to force the container to run the start-singleuser.sh as root. Once + The UID/fsGID is necessary to force the container to run the start-singleuser.sh as root. Once start-singleuser.sh has properly changed the ownership of the directory, it su's to the jupyterhub user. diff --git a/doc/source/kubernetes/amazon/step-zero-aws-eks.md b/doc/source/kubernetes/amazon/step-zero-aws-eks.md index 5dabcc9dd3..31bdbb4a11 100644 --- a/doc/source/kubernetes/amazon/step-zero-aws-eks.md +++ b/doc/source/kubernetes/amazon/step-zero-aws-eks.md @@ -11,53 +11,55 @@ This guide uses AWS to set up a cluster. This mirrors the steps found at [Gettin 1. Create a IAM Role for EKS Service Role. It should have the following policies - * AmazonEKSClusterPolicy - * AmazonEKSServicePolicy - * AmazonEC2ContainerRegistryReadOnly + - AmazonEKSClusterPolicy + - AmazonEKSServicePolicy + - AmazonEC2ContainerRegistryReadOnly (From the user interface, select EKS as the service, then follow the default steps) + 2. Create a VPC if you don't already have one. - This step has a lot of variability so it is left to the user. However, one deployment can be found at [Getting Started with Amazon EKS][getting started with amazon eks], under *Create your Amazon EKS Cluster VPC* + This step has a lot of variability so it is left to the user. However, one deployment can be found at [Getting Started with Amazon EKS][getting started with amazon eks], under _Create your Amazon EKS Cluster VPC_ 3. Create a Security Group for the EKS Control Plane to use You do not need to set any permissions on this. The steps below will automatically define access control between the EKS Control Plane and the individual nodes 4. Create your EKS cluster (using the user interface) Use the IAM Role in step 1 and Security Group defined in step 3. The cluster name is going to be used throughout. We'll use `Z2JHKubernetesCluster` as an example. 5. Install **kubectl** and **aws-iam-authenticator** - Refer to [Getting Started with Amazon EKS][getting started with amazon eks] on *Configure kubectl for Amazon EKS* -6. Configure *kubeconfig* - Also see [Getting Started with Amazon EKS][getting started with amazon eks] *Step 2: Configure kubectl for Amazon EKS* + Refer to [Getting Started with Amazon EKS][getting started with amazon eks] on _Configure kubectl for Amazon EKS_ +6. Configure _kubeconfig_ + Also see [Getting Started with Amazon EKS][getting started with amazon eks] _Step 2: Configure kubectl for Amazon EKS_ From the user interface on AWS you can retrieve the `endpoint-url`, `base64-encoded-ca-cert`. `cluster-name` is the name given in step 4. If you are using profiles in your AWS configuration, you can uncomment the `env` block and specify your profile as `aws-profile`.: ```yaml apiVersion: v1 clusters: - - cluster: - server: - certificate-authority-data: - name: kubernetes - contexts: - - context: - cluster: kubernetes - user: aws - name: aws - current-context: aws - kind: Config - preferences: {} - users: - - name: aws - user: - exec: - apiVersion: client.authentication.k8s.io/v1alpha1 - command: aws-iam-authenticator - args: - - "token" - - "-i" - - "" - # env: - # - name: AWS_PROFILE - # value: "" + - cluster: + server: + certificate-authority-data: + name: kubernetes + contexts: + - context: + cluster: kubernetes + user: aws + name: aws + current-context: aws + kind: Config + preferences: {} + users: + - name: aws + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + command: aws-iam-authenticator + args: + - "token" + - "-i" + - "" + # env: + # - name: AWS_PROFILE + # value: "" ``` + 7. Verify kubectl works ``` @@ -65,16 +67,19 @@ This guide uses AWS to set up a cluster. This mirrors the steps found at [Gettin ``` should return `kubernetes` and `ClusterIP` + 8. Create the nodes using CloudFormation - See [Getting Started with Amazon EKS][getting started with amazon eks] *Step 3: Launch and Configure Amazon EKS Worker Nodes* - + See [Getting Started with Amazon EKS][getting started with amazon eks] _Step 3: Launch and Configure Amazon EKS Worker Nodes_ + **Warning** if you are endeavoring to deploy on a private network, the cloudformation template creates a public IP for each worker node though there is no route to get there if you specified only private subnets. Regardless, if you wish to correct this, you can edit the cloudformation template by changing `Resources.NodeLaunchConfig.Properties.AssociatePublicIpAddress` from `'true'` to `'false'` + 9. Create a AWS authentication ConfigMap This is necessary for the workers to find the master plane. - - See [Getting Started with Amazon EKS][getting started with amazon eks] *Step 3: Launch and Configure Amazon EKS Worker Nodes* + + See [Getting Started with Amazon EKS][getting started with amazon eks] _Step 3: Launch and Configure Amazon EKS Worker Nodes_ + 10. Preparing authenticator for Helm There might be a better way to configure this @@ -84,7 +89,7 @@ This guide uses AWS to set up a cluster. This mirrors the steps found at [Gettin ``` kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous ``` - + [getting started with amazon eks]: https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html [selected regions]: https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/ diff --git a/doc/source/kubernetes/amazon/step-zero-aws.md b/doc/source/kubernetes/amazon/step-zero-aws.md index 88b44f40b1..a0f4378cf6 100644 --- a/doc/source/kubernetes/amazon/step-zero-aws.md +++ b/doc/source/kubernetes/amazon/step-zero-aws.md @@ -6,7 +6,7 @@ AWS does not have native support for Kubernetes, however there are many organizations that have put together their own solutions and guides for setting up Kubernetes on AWS. -This guide uses kops to setup a cluster on AWS. This should be seen as a rough +This guide uses kops to setup a cluster on AWS. This should be seen as a rough template you will use to setup and shape your cluster. ## The Procedure @@ -16,12 +16,13 @@ template you will use to setup and shape your cluster. This role will be used to give your CI host permission to create and destroy resources on AWS - * AmazonEC2FullAccess - * IAMFullAccess - * AmazonS3FullAccess - * AmazonVPCFullAccess - * Route53FullAccess (Optional) -2. Create a new instance to use as your CI host. This node will deal with + - AmazonEC2FullAccess + - IAMFullAccess + - AmazonS3FullAccess + - AmazonVPCFullAccess + - Route53FullAccess (Optional) + +2. Create a new instance to use as your CI host. This node will deal with provisioning and tearing down the cluster. This instance can be small (t2.micro for example). @@ -29,45 +30,52 @@ template you will use to setup and shape your cluster. When creating it, assign the IAM role created in step 1. Once created, download ssh keys. + 3. SSH to your CI host 4. Install kops and kubectl on your CI host - * Follow the instructions here: + - Follow the instructions here: + 5. Choose a cluster name: Since we are not using pre-configured DNS we will use the suffix - ".k8s.local". Per the docs, if the DNS name ends in .k8s.local the cluster + ".k8s.local". Per the docs, if the DNS name ends in .k8s.local the cluster will use internal hosted DNS. ``` export NAME=.k8s.local ``` + 6. Setup an ssh keypair to use with the cluster: ``` ssh-keygen ``` + 7. Create an S3 bucket to store your cluster configuration - Since we are on AWS we can use a S3 backing store. It is recommended to + Since we are on AWS we can use a S3 backing store. It is recommended to enabling versioning on the S3 bucket. We don't need to pass this into the - KOPS commands. It is automatically detected by the kops tool as an env + KOPS commands. It is automatically detected by the kops tool as an env variable. ``` export KOPS_STATE_STORE=s3:// ``` + 8. Set the region to deploy in: ``` export REGION=`curl -s http://169.254.169.254/latest/dynamic/instance-identity/document|grep region|awk -F\" '{print $4}'` ``` + 9. Install the AWS CLI: ``` sudo apt-get update sudo apt-get install awscli ``` + 10. Set the availability zones for the nodes For this guide we will be allowing nodes to be deployed in all AZs: @@ -75,6 +83,7 @@ template you will use to setup and shape your cluster. ``` export ZONES=$(aws ec2 describe-availability-zones --region $REGION | grep ZoneName | awk '{print $2}' | tr -d '"') ``` + 11. Create the cluster For a basic setup run the following (All sizes measured in GB): @@ -97,9 +106,9 @@ template you will use to setup and shape your cluster. --networking weave \ ``` - This creates a cluster where all of the masters and nodes are in private subnets and don't have external IP addresses. A mis-configured security group or insecure ssh configuration is less likely to compromise the cluster. - In order to SSH into your cluster you will need to set up a bastion node. Make sure you do that step below. - If you have the default number of elastic IPs (10) you may need to put in a request to AWS support to bump up that limit. The alternative is reducing the number of zones specified. + This creates a cluster where all of the masters and nodes are in private subnets and don't have external IP addresses. A mis-configured security group or insecure ssh configuration is less likely to compromise the cluster. + In order to SSH into your cluster you will need to set up a bastion node. Make sure you do that step below. + If you have the default number of elastic IPs (10) you may need to put in a request to AWS support to bump up that limit. The alternative is reducing the number of zones specified. More reading on this subject: https://github.com/kubernetes/kops/blob/master/docs/networking.md @@ -127,6 +136,7 @@ template you will use to setup and shape your cluster. Consider [setting a cloud budget](https://aws.amazon.com/aws-cost-management/aws-budgets/) for your AWS account in order to make sure you don't accidentally spend more than you wish to. + 12. Wait for the cluster to start-up Running the `kops validate cluster` command will tell us what the current state of setup is. @@ -142,6 +152,7 @@ template you will use to setup and shape your cluster. can be used to automate the waiting process. If at any point you wish to destroy your cluster after this step, run `kops delete cluster $NAME --yes` + 13. Confirm that `kubectl` is connected to your Kubernetes cluster. Run: @@ -154,24 +165,26 @@ template you will use to setup and shape your cluster. If you want to use kubectl and helm locally: - * run the following on CI host: `kops export kubecfg` - * copy the contents of `~/.kube/config` to the same place on your local system + - run the following on CI host: `kops export kubecfg` + - copy the contents of `~/.kube/config` to the same place on your local system If you wish to put the kube config file in a different location, you will need to run: ``` export KUBECONFIG= ``` + 14. Configure ssh bastion (Skip this step if you did not go with the **--topology private** option above!) - Ideally we would simply be passing the `--bastion` flag into the kops command above. However that flag is not functioning as intended at the moment. + Ideally we would simply be passing the `--bastion` flag into the kops command above. However that flag is not functioning as intended at the moment. Instead we need to follow this guide: At this point there are a few public endpoints left open which need to be addressed - * Bastion ELB security group defaults to access from 0.0.0.0 - * API ELB security group defaults to access from 0.0.0.0 + - Bastion ELB security group defaults to access from 0.0.0.0 + - API ELB security group defaults to access from 0.0.0.0 + 15. Enable dynamic storage on your Kubernetes cluster. Create a file, `storageclass.yml` on your local computer, and enter @@ -244,11 +257,12 @@ Then perform the following steps: 1. Verify weave is running: - ``` + ``` kubectl --namespace kube-system get pods ``` You should see several pods of the form `weave-net-abcde` + 2. Create Kubernetes secret with a private password of sufficient strength. A random 128 bytes is used in this example: ``` @@ -257,6 +271,7 @@ Then perform the following steps: ``` It is important that the secret name and its value (taken from the filename) are the same. If they do not match you may get a `ConfigError` + 3. Patch Weave with the password: ``` @@ -268,6 +283,7 @@ Then perform the following steps: ``` kubectl patch --namespace=kube-system daemonset/weave-net --type json -p '[ { "op": "remove", "path": "/spec/template/spec/containers/0/env/0"} ]' ``` + 4. Check to see that the pods are restarted. To expedite the process you can delete the old pods. 5. You can verify encryption is turned on with the following command: diff --git a/doc/source/kubernetes/digital-ocean/step-zero-digital-ocean.md b/doc/source/kubernetes/digital-ocean/step-zero-digital-ocean.md index 30aedc2b9d..96db97c976 100644 --- a/doc/source/kubernetes/digital-ocean/step-zero-digital-ocean.md +++ b/doc/source/kubernetes/digital-ocean/step-zero-digital-ocean.md @@ -12,19 +12,20 @@ If you prefer to use the Digital Ocean portal see the [Digital Ocean Get Started 1. **Install command-line tools locally**. You'll need at least v1.13.0. You can either follow the [installation instructions](https://github.com/digitalocean/doctl/blob/master/README.md) or use the commands below: - + ``` wget https://github.com/digitalocean/doctl/releases/download/v1.13.0/doctl-1.13.0-linux-amd64.tar.gz tar -xvf doctl-1.13.0-linux-amd64.tar.gz sudo mv doctl /usr/bin/ ``` - + 2. Create an API token on the Digital Ocean portal. Navigate to API then Generate New Token. 3. Connect your local CLI with your account: ``` doctl auth init ``` + 2. Create your cluster. Digital Ocean's use of doctl for kubernetes support is in beta so you'll need to run the following (add it to your `.bashrc` if you want to make this change permanent). @@ -32,6 +33,7 @@ If you prefer to use the Digital Ocean portal see the [Digital Ocean Get Started export DIGITALOCEAN_ENABLE_BETA=1 doctl k8s cluster create jupyter-kubernetes --region lon1 --version 1.18.8-do.0 --node-pool="name=worker-pool;count=3" ``` + 3. Export your cluster config. You can change the default location from $HOME/.kube by setting the KUBECONFIG environment variable. @@ -39,6 +41,7 @@ If you prefer to use the Digital Ocean portal see the [Digital Ocean Get Started mkdir -p ~/.kube doctl k8s cluster kubeconfig show bindertime-k8s > ~/.kube/config ``` + 4. Create an ssh key to secure your cluster. ``` @@ -52,6 +55,7 @@ If you prefer to use the Digital Ocean portal see the [Digital Ocean Get Started This command will also print out something to your terminal screen. You don't need to do anything with this text. + 5. Check if your cluster is fully functional ``` diff --git a/doc/source/kubernetes/google/step-zero-gcp.md b/doc/source/kubernetes/google/step-zero-gcp.md index bd027bc296..683bdc3e7d 100644 --- a/doc/source/kubernetes/google/step-zero-gcp.md +++ b/doc/source/kubernetes/google/step-zero-gcp.md @@ -16,6 +16,7 @@ your google cloud account. for your Google Cloud account in order to make sure you don't accidentally spend more than you wish to. ``` + 2. Go to and enable the [Kubernetes Engine API](https://console.cloud.google.com/apis/api/container.googleapis.com/overview). 3. Choose a terminal. @@ -26,7 +27,7 @@ your google cloud account. 1. **Use a web based terminal:** - Start *Google Cloud Shell* from [console.cloud.google.com](https://console.cloud.google.com) by clicking the button shown below. + Start _Google Cloud Shell_ from [console.cloud.google.com](https://console.cloud.google.com) by clicking the button shown below. You are now in control of a virtual machine with various tools preinstalled. If you save something in a user folder they will remain available to you if you return at a later stage. Additional documentation @@ -35,22 +36,24 @@ your google cloud account. ```{image} ../../_static/images/google/start_interactive_cli.png :align: center ``` + 2. **Use your own computer's terminal:** 1. Download and install the `gcloud` command line tool at its [install page](https://cloud.google.com/sdk/docs/install). It will help you create and communicate with a Kubernetes cluster. - 2. Install `kubectl` (reads *kube control*), it is a tool for controlling + 2. Install `kubectl` (reads _kube control_), it is a tool for controlling Kubernetes clusters in general. From your terminal, enter: ``` gcloud components install kubectl ``` + 4. Create a managed Kubernetes cluster and a default node pool. Ask Google Cloud to create a managed Kubernetes cluster and a default [node pool](https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools) - to get nodes from. *Nodes* represents hardware and a *node pool* will + to get nodes from. _Nodes_ represents hardware and a _node pool_ will keep track of how much of a certain type of hardware that you would like. ``` @@ -62,20 +65,21 @@ your google cloud account. ``` - * Replace `` with a name that can be used to refer to this cluster + - Replace `` with a name that can be used to refer to this cluster in the future. - * `--machine-type` specifies the amount of CPU and RAM in each node within + - `--machine-type` specifies the amount of CPU and RAM in each node within this default node pool. There is a [variety of types](https://cloud.google.com/compute/docs/machine-types) to choose from. - * `--num-nodes` specifies how many nodes to spin up. You can change this + - `--num-nodes` specifies how many nodes to spin up. You can change this later through the cloud console or using the `gcloud` command line tool. - * `--zone` specifies the data center zone where your cluster will be created. + - `--zone` specifies the data center zone where your cluster will be created. You can pick something from [this list](https://cloud.google.com/compute/docs/regions-zones/#available) that is not too far away from your users. - * A region in GCP is a geographical region with at least three zones, where each zone is representing a datacenter with servers etc. - * A regional cluster creates pods across zones in a region(three by default), distributing Kubernetes resources across multiple zones in the region. This is different from the default cluster, which has all its resources within a single zone(as shown above). - * A regional cluster has Highly Available (HA) kubernetes api-servers, this allows jupyterhub which uses them to have no downtime during upgrades of kubernetes itself. - * They also increase control plane uptime to 99.95%. - * To avoid tripling the number of nodes while still having HA kubernetes, the `--node-locations` flag can be used to specify a single zone to use. + - A region in GCP is a geographical region with at least three zones, where each zone is representing a datacenter with servers etc. + - A regional cluster creates pods across zones in a region(three by default), distributing Kubernetes resources across multiple zones in the region. This is different from the default cluster, which has all its resources within a single zone(as shown above). + - A regional cluster has Highly Available (HA) kubernetes api-servers, this allows jupyterhub which uses them to have no downtime during upgrades of kubernetes itself. + - They also increase control plane uptime to 99.95%. + - To avoid tripling the number of nodes while still having HA kubernetes, the `--node-locations` flag can be used to specify a single zone to use. + 5. To test if your cluster is initialized, run: ``` @@ -84,6 +88,7 @@ your google cloud account. The response should list two running nodes (or however many nodes you set with `--num-nodes` above). + 6. Give your account permissions to perform all administrative actions needed. ``` @@ -95,8 +100,8 @@ your google cloud account. Replace `` with the exact email of the Google account you used to sign up for Google Cloud. - Did you enter your email correctly? If not, you can run `kubectl delete - clusterrolebinding cluster-admin-binding` and do it again. + Did you enter your email correctly? If not, you can run `kubectl delete clusterrolebinding cluster-admin-binding` and do it again. + 7. [optional] Create a node pool for users This is an optional step, for those who want to separate @@ -112,8 +117,8 @@ your google cloud account. about 0.2 CPU will be requested by system pods. It is a suitable choice for a free account that has a limit on a total of 8 CPU cores. - Note that the node pool is *tainted*. Only user pods that are configured - with a *toleration* for this taint can schedule on the node pool's nodes. + Note that the node pool is _tainted_. Only user pods that are configured + with a _toleration_ for this taint can schedule on the node pool's nodes. This is done in order to ensure the autoscaler will be able to scale down when the user pods have stopped. @@ -134,6 +139,7 @@ your google cloud account. preemptible node recommendation not included pending handling of evictions in jupyterhub/kubespawner#223 --> + ```{note} Consider adding the ``--preemptible`` flag to reduce the cost significantly. You can `compare the prices here diff --git a/doc/source/kubernetes/ibm/step-zero-ibm.md b/doc/source/kubernetes/ibm/step-zero-ibm.md index 37e9ef6884..5ed4ac6fa5 100644 --- a/doc/source/kubernetes/ibm/step-zero-ibm.md +++ b/doc/source/kubernetes/ibm/step-zero-ibm.md @@ -7,8 +7,8 @@ It should provide you with enough knowledge to create a cluster, deploy your app Before you begin: -* Understand the [Kubernetes basics][https://kubernetes.io/docs/tutorials/kubernetes-basics/]. -* Install the IBM Cloud Developer Tools +- Understand the [Kubernetes basics][https://kubernetes.io/docs/tutorials/kubernetes-basics/]. +- Install the IBM Cloud Developer Tools - Install the [IBM Cloud CLI](https://cloud.ibm.com/docs/cli?topic=cloud-cli-getting-started). - `curl -sL https://ibm.biz/idt-installer | bash` - Verify your installation @@ -27,21 +27,26 @@ Procedure: 1. Create a Kubernetes cluster Kubernetes Service delivers powerful tools by combining Docker and Kubernetes technologies, an intuitive user experience, and built-in security and isolation to automate the deployment, operation, scaling, and monitoring of containerized apps in a cluster of computing hosts. - To set up the Kubernetes cluster: - 1. Create a Kubernetes cluster from the [IBM Cloud catalog](https://cloud.ibm.com/kubernetes/catalog/create)). - 2. When configuring the new cluster, select the **Cluster type** and click **Create Cluster** to provision a Kubernetes cluster. - 2.1 In the case of a free cluster you will see something similar to: - ```{image} ../../_static/images/ibm/create-free-kubernetes-cluster-ibm-cloud.png - :align: center - ``` - - 2.2 In the case of a paid cluster you will see something similar to: - ```{image} ../../_static/images/ibm/create-paid-kubernetes-cluster-ibm-cloud.png - :align: center - ``` - 3. Check the status of your **Cluster** and **Worker Nodes** and wait for them to be **ready**. - - Or, if you prefer, create the cluster using the [IBM Cloud CLI tools](https://cloud.ibm.com/docs/containers?topic=containers-cs_cli_install)) + To set up the Kubernetes cluster: + + 1. Create a Kubernetes cluster from the [IBM Cloud catalog](https://cloud.ibm.com/kubernetes/catalog/create)). + 2. When configuring the new cluster, select the **Cluster type** and click **Create Cluster** to provision a Kubernetes cluster. + 2.1 In the case of a free cluster you will see something similar to: + + ```{image} ../../_static/images/ibm/create-free-kubernetes-cluster-ibm-cloud.png + :align: center + ``` + + 2.2 In the case of a paid cluster you will see something similar to: + + ```{image} ../../_static/images/ibm/create-paid-kubernetes-cluster-ibm-cloud.png + :align: center + ``` + + 3. Check the status of your **Cluster** and **Worker Nodes** and wait for them to be **ready**. + + Or, if you prefer, create the cluster using the [IBM Cloud CLI tools](https://cloud.ibm.com/docs/containers?topic=containers-cs_cli_install)) + 2. Configure kubectl [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) is a CLI tool to interact with a Kubernetes cluster. In this occasion, you will use it to point forward to the created Kubernetes cluster. @@ -50,17 +55,20 @@ Procedure: ``` ibmcloud cs cluster-config ``` - 3. Copy and paste the **export** command to set the KUBECONFIG environment variable as directed. The command should be something similar to: + 3. Copy and paste the **export** command to set the KUBECONFIG environment variable as directed. The command should be something similar to: + ``` export KUBECONFIG=/Users/user/.bluemix/plugins/container-service/clusters/JupyterHub/kube-config-***-JupyterHub.yml ``` - To verify whether the KUBECONFIG environment variable is set correctly or not, run the following command: + To verify whether the KUBECONFIG environment variable is set correctly or not, run the following command: ``` echo $KUBECONFIG ``` - 4. Check that the `kubectl` command is correctly configured + + 4. Check that the `kubectl` command is correctly configured + ``` kubectl cluster-info ``` @@ -71,6 +79,7 @@ Procedure: Hooray! You have your Kubernetes cluster running; it's time to begin {ref}`setup-helm`. -More info and readings: +More info and readings: + - - diff --git a/doc/source/kubernetes/microsoft/step-zero-azure-autoscale.md b/doc/source/kubernetes/microsoft/step-zero-azure-autoscale.md index 2a8c39586b..83937512d5 100644 --- a/doc/source/kubernetes/microsoft/step-zero-azure-autoscale.md +++ b/doc/source/kubernetes/microsoft/step-zero-azure-autoscale.md @@ -15,7 +15,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta the Azure interactive shell, the other is to install the Azure command-line tools locally. Instructions for each are below. - * **Using the Azure interactive shell**. The [Azure Portal](https://portal.azure.com) + - **Using the Azure interactive shell**. The [Azure Portal](https://portal.azure.com) contains an interactive shell that you can use to communicate with your Kubernetes cluster. To access this shell, go to [portal.azure.com](https://portal.azure.com) and click on the button below. @@ -31,19 +31,20 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta account where your shell filesystem will live. ``` - * **Install command-line tools locally**. You can access the Azure CLI via + - **Install command-line tools locally**. You can access the Azure CLI via a package that you can install locally. To do so, first follow the [installation instructions](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) in the Azure documentation. Then run the following command to connect your local CLI with your account: - ``` + ``` az login ``` You'll need to open a browser and follow the instructions in your terminal to log in. + 2. Activate the correct subscription. Azure uses the concept of **subscriptions** to manage spending. You can get a list of subscriptions your account has access to by running: @@ -59,6 +60,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta ``` az account set --subscription ``` + 3. Setup the CLI for Autoscaling features. First install the [aks-preview](https://github.com/Azure/azure-cli-extensions/tree/master/src/aks-preview) CLI extension. This will grant access to new commands. @@ -89,10 +91,11 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta ``` az provider register --namespace Microsoft.ContainerService ``` + 4. Create a resource group. Azure uses the concept of **resource groups** to group related resources together. We need to create a resource group in a given data center location. We will create - computational resources *within* this resource group. + computational resources _within_ this resource group. ``` az group create \ @@ -103,20 +106,21 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta where: - * `--name` specifies the name of your resource group. We recommend using something + - `--name` specifies the name of your resource group. We recommend using something that uniquely identifies this hub. For example, if you are creating a resource group for UC Berkeley's 2018 Spring Data100 Course, you may give it a `` of `ucb_2018sp_data100_hub`. - * `--location` specifies the location of the data center you want your resource to be in. + - `--location` specifies the location of the data center you want your resource to be in. For options, see the [Azure list of locations that support AKS](https://docs.microsoft.com/en-us/azure/aks/quotas-skus-regions#region-availability). - * `--output table` specifies that the output should be in human readable + - `--output table` specifies that the output should be in human readable format, rather than the default JSON output. We shall use this with most commands when executing them by hand. Consider [setting a cloud budget](https://docs.microsoft.com/en-us/partner-center/set-an-azure-spending-budget-for-your-customers) for your Azure account in order to make sure you don't accidentally spend more than you wish to. + 5. Choose a cluster name. In the following steps we'll run commands that ask you to input a cluster @@ -131,6 +135,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta mkdir cd ``` + 6. Create an ssh key to secure your cluster. ``` @@ -144,6 +149,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta This command will also print out something to your terminal screen. You don't need to do anything with this text. + 7. Create a virtual network and sub-network. Kubernetes does not by default come with a controller that enforces `networkpolicy` resources. @@ -164,11 +170,11 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta where: - * `--resource-group` is the ResourceGroup you created - * `--name` is the name you want to assign to your virtual network, for example, `hub-vnet` - * `--address-prefixes` are the IP address prefixes for your virtual network - * `--subnet-name` is your desired name for your subnet, for example, `hub-subnet` - * `--subnet-prefixes` are the IP address prefixes in [CIDR format](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for the subnet + - `--resource-group` is the ResourceGroup you created + - `--name` is the name you want to assign to your virtual network, for example, `hub-vnet` + - `--address-prefixes` are the IP address prefixes for your virtual network + - `--subnet-name` is your desired name for your subnet, for example, `hub-subnet` + - `--subnet-prefixes` are the IP address prefixes in [CIDR format](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for the subnet We will now retrieve the application IDs of the VNet and subnet we just created and save them to bash variables. @@ -203,6 +209,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta ``` You will need Owner role on your subscription for this step to succeed. + 8. Create an AKS cluster. The following command will request a Kubernetes cluster within the resource @@ -232,29 +239,30 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta where: - * `--name` is the name you want to use to refer to your cluster - * `--resource-group` is the ResourceGroup you created - * `--ssh-key-value` is the ssh public key created - * `--node-count` is the number of nodes you want in your Kubernetes cluster - * `--node-vm-size` is the size of the nodes you want to use, which varies based on + - `--name` is the name you want to use to refer to your cluster + - `--resource-group` is the ResourceGroup you created + - `--ssh-key-value` is the ssh public key created + - `--node-count` is the number of nodes you want in your Kubernetes cluster + - `--node-vm-size` is the size of the nodes you want to use, which varies based on what you are using your cluster for and how much RAM/CPU each of your users need. There is a [list of all possible node sizes](https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs) for you to choose from, but not all might be available in your location. If you get an error whilst creating the cluster you can try changing either the region or the node size. - * `--enable-vmss` deploys the cluster as a scale set. - * `--enable-cluster-autoscaler` installs a [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) onto the cluster (though counterintuitively, does not enable it!). - * `--min-count`/`--max-count` are the minimum/maximum number of nodes in the cluster at any time. - * `--kubernetes-version` installs a specific version of Kubernetes onto the cluster. To autoscale, we require `>= v 1.12.4`, though it's recommended to use the most recent version available (you can find out what the most recent version of kubernetes available is by running `az aks get-versions --location `). - * `--service-principal` is the application ID of the service principal we created - * `--client-secret` is the password for the service principal we created - * `--dns-service-ip` is an IP address assigned to the [Kubernetes DNS service](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) - * `--docker-bridge-address` is a specific IP address and netmask for the Docker bridge, using standard CIDR notation - * `--network-plugin` is the Kubernetes network plugin to use. In this example, we have used Azure's own implementation. - * `--network-policy` is the Kubernetes network policy to use. In this example, we have used Azure's own implementation. - * `--service-cidr` is a CIDR notation IP range from which to assign service cluster IPs - * `vnet-subnet-id` is the application ID of the subnet we created + - `--enable-vmss` deploys the cluster as a scale set. + - `--enable-cluster-autoscaler` installs a [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) onto the cluster (though counterintuitively, does not enable it!). + - `--min-count`/`--max-count` are the minimum/maximum number of nodes in the cluster at any time. + - `--kubernetes-version` installs a specific version of Kubernetes onto the cluster. To autoscale, we require `>= v 1.12.4`, though it's recommended to use the most recent version available (you can find out what the most recent version of kubernetes available is by running `az aks get-versions --location `). + - `--service-principal` is the application ID of the service principal we created + - `--client-secret` is the password for the service principal we created + - `--dns-service-ip` is an IP address assigned to the [Kubernetes DNS service](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) + - `--docker-bridge-address` is a specific IP address and netmask for the Docker bridge, using standard CIDR notation + - `--network-plugin` is the Kubernetes network plugin to use. In this example, we have used Azure's own implementation. + - `--network-policy` is the Kubernetes network policy to use. In this example, we have used Azure's own implementation. + - `--service-cidr` is a CIDR notation IP range from which to assign service cluster IPs + - `vnet-subnet-id` is the application ID of the subnet we created This should take a few minutes and provide you with a working Kubernetes cluster! + 9. If you're using the Azure CLI locally, install [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), a tool for accessing the Kubernetes API from the commandline: @@ -263,6 +271,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta ``` Note: kubectl is already installed in Azure Cloud Shell. + 10. Get credentials from Azure for `kubectl` to work: ``` @@ -274,10 +283,11 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta where: - * `--name` is the name you gave your cluster - * `--resource-group` is the ResourceGroup you created + - `--name` is the name you gave your cluster + - `--resource-group` is the ResourceGroup you created This automatically updates your Kubernetes client configuration file. + 11. Check if your cluster is fully functional ``` @@ -287,6 +297,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta The response should list three running nodes and their Kubernetes versions! Each node should have the status of `Ready`, note that this may take a few moments. + 12. Enabling Autoscaling We now move to the Azure Portal to enable autoscaling and set rules to manage the Cluster Autoscaler. @@ -316,8 +327,8 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta Click the blue "Enable autoscaling" button and an autogenerated form for a scale condition will appear. We will add two new rules to this condition: - * Increase the instance count by 1 when the average CPU usage over 10 minutes is greater than 70% - * Decrease the instance count by 1 when the average CPU usage over 10 minutes is less than 5% + - Increase the instance count by 1 when the average CPU usage over 10 minutes is greater than 70% + - Decrease the instance count by 1 when the average CPU usage over 10 minutes is less than 5% ```{image} ../../_static/images/azure/scale_condition.png :align: center diff --git a/doc/source/kubernetes/microsoft/step-zero-azure.md b/doc/source/kubernetes/microsoft/step-zero-azure.md index 7a04bec46b..aca090d839 100644 --- a/doc/source/kubernetes/microsoft/step-zero-azure.md +++ b/doc/source/kubernetes/microsoft/step-zero-azure.md @@ -11,7 +11,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta the Azure interactive shell, the other is to install the Azure command-line tools locally. Instructions for each are below. - * **Using the Azure interactive shell**. The [Azure Portal](https://portal.azure.com) + - **Using the Azure interactive shell**. The [Azure Portal](https://portal.azure.com) contains an interactive shell that you can use to communicate with your Kubernetes cluster. To access this shell, go to [portal.azure.com](https://portal.azure.com) and click on the button below. @@ -26,8 +26,8 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta * The first time you do this, you'll be asked to create a storage account where your shell filesystem will live. ``` - - * **Install command-line tools locally**. You can access the Azure CLI via + + - **Install command-line tools locally**. You can access the Azure CLI via a package that you can install locally. To do so, first follow the [installation instructions](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) in the @@ -40,6 +40,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta You'll need to open a browser and follow the instructions in your terminal to log in. + 2. Activate the correct subscription. Azure uses the concept of **subscriptions** to manage spending. You can get a list of subscriptions your account has access to by running: @@ -55,10 +56,11 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta ``` az account set --subscription ``` + 3. Create a resource group. Azure uses the concept of **resource groups** to group related resources together. We need to create a resource group in a given data center location. We will create - computational resources *within* this resource group. + computational resources _within_ this resource group. ``` az group create \ @@ -69,20 +71,21 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta where: - * `--name` specifies the name of your resource group. We recommend using something + - `--name` specifies the name of your resource group. We recommend using something that uniquely identifies this hub. For example, if you are creating a resource group for UC Berkeley's 2018 Spring Data100 Course, you may give it a `` of `ucb_2018sp_data100_hub`. - * `--location` specifies the location of the data center you want your resource to be in. + - `--location` specifies the location of the data center you want your resource to be in. In this case, we used the `centralus` location. For other options, see the [Azure list of locations that support AKS](https://docs.microsoft.com/en-us/azure/aks/quotas-skus-regions#region-availability). - * `--output table` specifies that the output should be in human readable + - `--output table` specifies that the output should be in human readable format, rather than the default JSON output. We shall use this with most commands when executing them by hand. Consider [setting a cloud budget](https://docs.microsoft.com/en-us/partner-center/set-an-azure-spending-budget-for-your-customers) for your Azure account in order to make sure you don't accidentally spend more than you wish to. + 4. Choose a cluster name. In the following steps we'll run commands that ask you to input a cluster @@ -97,6 +100,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta mkdir cd ``` + 5. Create an ssh key to secure your cluster. ``` @@ -110,6 +114,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta This command will also print out something to your terminal screen. You don't need to do anything with this text. + 6. Create a virtual network and sub-network. Kubernetes does not by default come with a controller that enforces `networkpolicy` resources. @@ -130,11 +135,11 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta where: - * `--resource-group` is the ResourceGroup you created - * `--name` is the name you want to assign to your virtual network, for example, `hub-vnet` - * `--address-prefixes` are the IP address prefixes for your virtual network - * `--subnet-name` is your desired name for your subnet, for example, `hub-subnet` - * `--subnet-prefixes` are the IP address prefixes in [CIDR format](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for the subnet + - `--resource-group` is the ResourceGroup you created + - `--name` is the name you want to assign to your virtual network, for example, `hub-vnet` + - `--address-prefixes` are the IP address prefixes for your virtual network + - `--subnet-name` is your desired name for your subnet, for example, `hub-subnet` + - `--subnet-prefixes` are the IP address prefixes in [CIDR format](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) for the subnet We will now retrieve the application IDs of the VNet and subnet we just created and save them to bash variables. @@ -169,6 +174,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta ``` You will need Owner role on your subscription for this step to succeed. + 7. Create an AKS cluster. At this stage, you may wish to think about customising your deployment. The @@ -200,26 +206,27 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta where: - * `--name` is the name you want to use to refer to your cluster - * `--resource-group` is the ResourceGroup you created - * `--ssh-key-value` is the ssh public key created - * `--node-count` is the number of nodes you want in your Kubernetes cluster - * `--node-vm-size` is the size of the nodes you want to use, which varies based on + - `--name` is the name you want to use to refer to your cluster + - `--resource-group` is the ResourceGroup you created + - `--ssh-key-value` is the ssh public key created + - `--node-count` is the number of nodes you want in your Kubernetes cluster + - `--node-vm-size` is the size of the nodes you want to use, which varies based on what you are using your cluster for and how much RAM/CPU each of your users need. There is a [list of all possible node sizes](https://docs.microsoft.com/en-us/azure/cloud-services/cloud-services-sizes-specs) for you to choose from, but not all might be available in your location. If you get an error whilst creating the cluster you can try changing either the region or the node size. - * `--service-principal` is the application ID of the service principal we created - * `--client-secret` is the password for the service principal we created - * `--dns-service-ip` is an IP address assigned to the [Kubernetes DNS service](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) - * `--docker-bridge-address` is a specific IP address and netmask for the Docker bridge, using standard CIDR notation - * `--network-plugin` is the Kubernetes network plugin to use. In this example, we have used Azure's own implementation. - * `--network-policy` is the Kubernetes network policy to use. In this example, we have used Azure's own implementation. - * `--service-cidr` is a CIDR notation IP range from which to assign service cluster IPs - * `vnet-subnet-id` is the application ID of the subnet we created - * This command will install the default version of Kubernetes. You can pass `--kubernetes-version` to install a different version. + - `--service-principal` is the application ID of the service principal we created + - `--client-secret` is the password for the service principal we created + - `--dns-service-ip` is an IP address assigned to the [Kubernetes DNS service](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) + - `--docker-bridge-address` is a specific IP address and netmask for the Docker bridge, using standard CIDR notation + - `--network-plugin` is the Kubernetes network plugin to use. In this example, we have used Azure's own implementation. + - `--network-policy` is the Kubernetes network policy to use. In this example, we have used Azure's own implementation. + - `--service-cidr` is a CIDR notation IP range from which to assign service cluster IPs + - `vnet-subnet-id` is the application ID of the subnet we created + - This command will install the default version of Kubernetes. You can pass `--kubernetes-version` to install a different version. This should take a few minutes and provide you with a working Kubernetes cluster! + 8. If you're using the Azure CLI locally, install [kubectl](https://kubernetes.io/docs/reference/kubectl/overview/), a tool for accessing the Kubernetes API from the commandline: @@ -228,6 +235,7 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta ``` Note: kubectl is already installed in Azure Cloud Shell. + 9. Get credentials from Azure for `kubectl` to work: ``` @@ -239,10 +247,11 @@ If you prefer to use the Azure portal see the [Azure Kubernetes Service quicksta where: - * `--name` is the name you gave your cluster - * `--resource-group` is the ResourceGroup you created + - `--name` is the name you gave your cluster + - `--resource-group` is the ResourceGroup you created This automatically updates your Kubernetes client configuration file. + 10. Check if your cluster is fully functional ``` diff --git a/doc/source/kubernetes/ovh/step-zero-ovh.md b/doc/source/kubernetes/ovh/step-zero-ovh.md index 4fcadd6c29..c8fe419f3c 100644 --- a/doc/source/kubernetes/ovh/step-zero-ovh.md +++ b/doc/source/kubernetes/ovh/step-zero-ovh.md @@ -10,7 +10,6 @@ This page describes how to create a Kubernetes cluster using the OVH Control Pan and how to access the cluster using the command line with `kubectl`. 1. Log in to the [OVH Control Panel](https://www.ovh.com/auth/). - ```{note} You first need to create an OVH account if you don't have one already. ``` @@ -38,31 +37,28 @@ and how to access the cluster using the command line with `kubectl`. ``` 8. Click on **Send** 9. Once the cluster is ready, click on **Nodes** to add 2 nodes: - ```{image} ../../_static/images/ovh/add-nodes.png :alt: Add nodes to the cluster ``` - You can start with the **b2-7** flavor, or choosing a different flavor based on your requirements. 10. Download the `kubeconfig` file and store it under `~/.kube/config` on your machine. - - ```{image} ../../_static/images/ovh/kubeconfig.png - :alt: Download the kubeconfig - ``` + ```{image} ../../_static/images/ovh/kubeconfig.png + :alt: Download the kubeconfig + ``` 11. To test if your cluster is initialized, run: - ``` - kubectl get node - ``` + ``` + kubectl get node + ``` - The response should list two running nodes (or however many nodes you - set with `--num-nodes` above). + The response should list two running nodes (or however many nodes you + set with `--num-nodes` above). - ```{note} - Check out the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/tools/install-kubectl) - to install `kubectl`. - ``` + ```{note} + Check out the [Kubernetes Documentation](https://kubernetes.io/docs/tasks/tools/ install-kubectl) + to install `kubectl`. + ``` Congrats! Now that you have your Kubernetes cluster running, it's time to begin {ref}`setup-helm`. diff --git a/doc/source/kubernetes/setup-helm.md b/doc/source/kubernetes/setup-helm.md index 6123412355..659ab2ea11 100644 --- a/doc/source/kubernetes/setup-helm.md +++ b/doc/source/kubernetes/setup-helm.md @@ -4,7 +4,7 @@ [Helm](https://helm.sh/), the package manager for Kubernetes, is a useful tool for: installing, upgrading and managing applications on a Kubernetes cluster. -Helm packages are called *charts*. +Helm packages are called _charts_. We will be installing and managing JupyterHub on our Kubernetes cluster using a Helm chart. Charts are abstractions describing how to install packages onto a Kubernetes @@ -31,8 +31,8 @@ simplest way to install Helm is to run Helm's installer script in a terminal: curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash ``` -* The minimum supported version of Helm in Z2JH is `3.2.0`. -* Helm 3 uses the same security mechanisms as other Kubernetes clients such as `kubectl`. +- The minimum supported version of Helm in Z2JH is `3.2.0`. +- Helm 3 uses the same security mechanisms as other Kubernetes clients such as `kubectl`. ## Verify diff --git a/doc/source/kubernetes/setup-helm2.md b/doc/source/kubernetes/setup-helm2.md index 1cd085c543..ace9136f83 100644 --- a/doc/source/kubernetes/setup-helm2.md +++ b/doc/source/kubernetes/setup-helm2.md @@ -1,6 +1,5 @@ --- orphan: true - --- (setup-helm2)= @@ -15,7 +14,7 @@ clusters. Helm 2 is deprecated since of November 2019, and [Helm](https://helm.sh/), the package manager for Kubernetes, is a useful tool for: installing, upgrading and managing applications on a Kubernetes cluster. -Helm packages are called *charts*. +Helm packages are called _charts_. We will be installing and managing JupyterHub on our Kubernetes cluster using a Helm chart. @@ -27,7 +26,7 @@ the package. Helm has two parts: a client (`helm`) and a server (`tiller`). Tiller runs inside of your Kubernetes cluster as a pod in the kube-system namespace. Tiller -manages both, the *releases* (installations) and *revisions* (versions) of charts deployed +manages both, the _releases_ (installations) and _revisions_ (versions) of charts deployed on the cluster. When you run `helm` commands, your local Helm client sends instructions to `tiller` in the cluster that in turn make the requested changes. @@ -60,6 +59,7 @@ cluster: ``` kubectl --namespace kube-system create serviceaccount tiller ``` + 2. Give the `ServiceAccount` full permissions to manage the cluster. If you know your kubernetes cluster does not have RBAC enabled, you **must** skip this step. @@ -70,6 +70,7 @@ cluster: ``` See [our RBAC documentation](rbac) for more information. + 3. Initialize `helm` and `tiller`. ``` @@ -105,7 +106,7 @@ kubectl patch deployment tiller-deploy --namespace=kube-system --type=json --pat `tiller` s port is exposed in the cluster without authentication and if you probe this port directly (i.e. by bypassing `helm`) then `tiller` s permissions can be exploited. This step forces `tiller` to listen to commands from localhost (i.e. -`helm`) *only* so that e.g. other pods inside the cluster cannot ask `tiller` to +`helm`) _only_ so that e.g. other pods inside the cluster cannot ask `tiller` to install a new chart granting them arbitrary, elevated RBAC privileges and exploit them. [More details here.](https://engineering.bitnami.com/articles/helm-security.html) diff --git a/doc/source/repo2docker.md b/doc/source/repo2docker.md index 02754789f2..6951b77f51 100644 --- a/doc/source/repo2docker.md +++ b/doc/source/repo2docker.md @@ -1,6 +1,5 @@ --- orphan: true - --- ```{eval-rst} -.. glossary:: +.. glossary:: `admin user `_ A user who can access the JupyterHub admin panel. They can start/stop user diff --git a/doc/source/resources/reference-docs.md b/doc/source/resources/reference-docs.md index 273fefafb3..9c3f91ceda 100644 --- a/doc/source/resources/reference-docs.md +++ b/doc/source/resources/reference-docs.md @@ -2,10 +2,10 @@ # Related Projects -* The [JupyterHub Documentation](https://jupyterhub.readthedocs.io/en/latest/) +- The [JupyterHub Documentation](https://jupyterhub.readthedocs.io/en/latest/) provides information about JupyterHub itself (not the Kubernetes deployment). -* [Binder](https://mybinder.org) allows users to create sharable computational +- [Binder](https://mybinder.org) allows users to create sharable computational environments on-the-fly. It makes heavy use of JupyterHub. -* The [2016 JupyterHub Workshop](https://github.com/jupyter-resources/jupyterhub-2016-workshop) +- The [2016 JupyterHub Workshop](https://github.com/jupyter-resources/jupyterhub-2016-workshop) was an informal gathering to share experience in deploying JupyterHub for various use-cases, including teaching and high-performance computing. diff --git a/doc/source/resources/tools.md b/doc/source/resources/tools.md index 4c82c2c5c5..7234434629 100644 --- a/doc/source/resources/tools.md +++ b/doc/source/resources/tools.md @@ -104,7 +104,7 @@ processes in order to ensure that they remain running if needed. ### Pods -Pods are essentially a collection of one or more *containers* that +Pods are essentially a collection of one or more _containers_ that run together. You can think of them as a way of combining containers that, as a group, accomplish some goal. @@ -112,10 +112,10 @@ For example, say you want to create a web server that is open to the world, but you also want authentication so that only a select group of users can access it. You could use a single pod with two containers. -* One that does the authentication. It would have something like Apache +- One that does the authentication. It would have something like Apache specified in its container image, and would be connected to the outside world. -* One that receives information from the authentication container, and +- One that receives information from the authentication container, and does something fancy with it (maybe it runs a python process). This is useful because it lets you compartmentalize the components of the @@ -228,8 +228,8 @@ Finally, the output of JupyterHub is a user pod, which specifies the computational environment in which a single user will operate. So essentially a JupyterHub is a collection of: -* Pods that contain the JupyterHub Machinery -* A bunch of user pods that are constantly being created or destroyed. +- Pods that contain the JupyterHub Machinery +- A bunch of user pods that are constantly being created or destroyed. Below we'll describe the primary JupyterHub pods. @@ -239,9 +239,9 @@ This is the user-facing pod. It provides the IP address that people will go to in order to access JupyterHub. When a new users goes to this pod, it will decide whether to: -* send that user to the Hub pod, which will create a container for that +- send that user to the Hub pod, which will create a container for that user, or -* if that user's container already exists, send them directly to that +- if that user's container already exists, send them directly to that container instead. Information about the user's identity is stored as a cookie on their diff --git a/images/image-awaiter/README.md b/images/image-awaiter/README.md index f3ac407a94..8eaafa8ccb 100644 --- a/images/image-awaiter/README.md +++ b/images/image-awaiter/README.md @@ -9,14 +9,14 @@ pods are ready, and exits when they are. ## Why would one use it? Because it can delay the hub to be upgraded before the relevant images are made -available, and that can for large images cut down startup time from almost ten +available, and that can for large images cut down startup time from almost ten minutes to a few seconds. ## FAQ ### What technical knowledge is needed to understand this? -You need to know about [Kubernetes Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) and [Kubernetes DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), about [Helm and helm hooks](https://github.com/kubernetes/helm/blob/master/docs/charts_hooks.md), +You need to know about [Kubernetes Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) and [Kubernetes DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/), about [Helm and helm hooks](https://github.com/kubernetes/helm/blob/master/docs/charts_hooks.md), and about the programming language Go. ### Why is this project in Go? Isn't the Jupyter Infrastructure ecosystem mostly Python? diff --git a/images/singleuser-sample/README.md b/images/singleuser-sample/README.md index ce3f3670a9..859ced1b01 100644 --- a/images/singleuser-sample/README.md +++ b/images/singleuser-sample/README.md @@ -8,7 +8,7 @@ which also contains many other images suitable for use with the Helm chart. To help you choose another one see [the docker-stacks documentation on selecting a user image](http://jupyter-docker-stacks.readthedocs.io/en/latest/using/selecting.html). -For a brief introduction to *Dockerfiles*, *images* and *containers*, see [the +For a brief introduction to _Dockerfiles_, _images_ and _containers_, see [the guide's summary about container technology.](https://z2jh.jupyter.org/en/latest/tools.html#container-technology). ## Basic usage @@ -26,6 +26,7 @@ docker run -it --rm -p 8888:8888 -e JUPYTER_ENABLE_LAB=true jupyterhub/k8s-si This image available tags can be found [here](https://hub.docker.com/r/jupyterhub/k8s-singleuser-sample/tags/). ## In the base-notebook image + - Ubuntu Linux - v18.04 aka. Bionic - JupyterHub - required by with Helm chart since KubeSpawner requires it - [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/) and [JupyterLab-Hub extension](https://jupyterlab.readthedocs.io/en/stable/user/jupyterhub.html) - to activate it over the classical UI by default, see [the guide's instructions](https://z2jh.jupyter.org/en/latest/user-environment.html#use-jupyterlab-by-default).