diff --git a/.github/dependabot.yml b/.github/dependabot.yml index f516e56ded..5bf500ba12 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -5,6 +5,10 @@ updates: directory: "/" schedule: interval: "weekly" + groups: + actions: + patterns: + - "*" # Python - package-ecosystem: "pip" directory: "/" diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml index 8763635490..f20f0d9ef3 100644 --- a/.github/workflows/downstream.yml +++ b/.github/workflows/downstream.yml @@ -122,7 +122,7 @@ jobs: uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: jupytext - test_command: pip install pytest-jupyter[server] gitpython pre-commit && python -m ipykernel install --name jupytext-dev --user && pytest -vv -raXxs -W default --durations 10 --color=yes --ignore=tests/test_doc_files_are_notebooks.py --ignore=tests/test_changelog.py + test_command: pip install pytest-jupyter[server] gitpython pre-commit && python -m ipykernel install --name jupytext-dev --user && pytest -vv -raXxs -W default --durations 10 --color=yes downstream_check: # This job does nothing and is only used for the branch protection if: always() diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1fce7856ec..cb949b907d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -21,7 +21,7 @@ repos: - id: trailing-whitespace - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.27.2 + rev: 0.27.3 hooks: - id: check-github-workflows @@ -33,17 +33,11 @@ repos: [mdformat-gfm, mdformat-frontmatter, mdformat-footnote] - repo: https://github.com/pre-commit/mirrors-prettier - rev: "v3.1.0" + rev: "v4.0.0-alpha.8" hooks: - id: prettier types_or: [yaml, html, json] - - repo: https://github.com/adamchainz/blacken-docs - rev: "1.16.0" - hooks: - - id: blacken-docs - additional_dependencies: [black==23.7.0] - - repo: https://github.com/codespell-project/codespell rev: "v2.2.6" hooks: @@ -58,7 +52,7 @@ repos: - id: rst-inline-touching-normal - repo: https://github.com/pre-commit/mirrors-mypy - rev: "v1.7.1" + rev: "v1.8.0" hooks: - id: mypy files: jupyter_server @@ -67,7 +61,7 @@ repos: ["traitlets>=5.13", "jupyter_core>=5.5", "jupyter_client>=8.5"] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.6 + rev: v0.1.9 hooks: - id: ruff types_or: [python, jupyter] @@ -76,7 +70,7 @@ repos: types_or: [python, jupyter] - repo: https://github.com/scientific-python/cookie - rev: "2023.11.17" + rev: "2023.12.21" hooks: - id: sp-repo-review additional_dependencies: ["repo-review[cli]"] diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 208e5c37e2..2ce0728970 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -12,3 +12,8 @@ python: path: . extra_requirements: - docs +formats: + - epub + - htmlzip + # TODO: evaluate, see https://github.com/jupyter-server/jupyter_server/issues/1378 + # - pdf diff --git a/CHANGELOG.md b/CHANGELOG.md index fd6d970440..6ba32a69df 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,65 @@ All notable changes to this project will be documented in this file. +## 2.12.4 + +([Full Changelog](https://github.com/jupyter-server/jupyter_server/compare/v2.12.3...7bb21b45392c889b5c87eb0d1b48662a497ba15a)) + +### Bugs fixed + +- Fix log arguments for gateway client error [#1385](https://github.com/jupyter-server/jupyter_server/pull/1385) ([@minrk](https://github.com/minrk)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/jupyter-server/jupyter_server/graphs/contributors?from=2024-01-09&to=2024-01-11&type=c)) + +[@minrk](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Aminrk+updated%3A2024-01-09..2024-01-11&type=Issues) + + + +## 2.12.3 + +([Full Changelog](https://github.com/jupyter-server/jupyter_server/compare/v2.12.2...99b9126853b69aafb700b4c92b50b83b7ca00e32)) + +### Bugs fixed + +- Import User unconditionally [#1384](https://github.com/jupyter-server/jupyter_server/pull/1384) ([@yuvipanda](https://github.com/yuvipanda)) + +### Maintenance and upkeep improvements + +- Simplify the jupytext downstream test [#1383](https://github.com/jupyter-server/jupyter_server/pull/1383) ([@mwouts](https://github.com/mwouts)) +- Fix test param for pytest-xdist [#1382](https://github.com/jupyter-server/jupyter_server/pull/1382) ([@tornaria](https://github.com/tornaria)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/jupyter-server/jupyter_server/graphs/contributors?from=2024-01-04&to=2024-01-09&type=c)) + +[@mwouts](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Amwouts+updated%3A2024-01-04..2024-01-09&type=Issues) | [@tornaria](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Atornaria+updated%3A2024-01-04..2024-01-09&type=Issues) | [@welcome](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Awelcome+updated%3A2024-01-04..2024-01-09&type=Issues) | [@yuvipanda](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Ayuvipanda+updated%3A2024-01-04..2024-01-09&type=Issues) + +## 2.12.2 + +([Full Changelog](https://github.com/jupyter-server/jupyter_server/compare/v2.12.1...49915685531ce90baae9d2a4d6baa9c533beffcc)) + +### Bugs fixed + +- Fix a typo in error message [#1381](https://github.com/jupyter-server/jupyter_server/pull/1381) ([@krassowski](https://github.com/krassowski)) +- Force legacy ws subprotocol when using gateway [#1311](https://github.com/jupyter-server/jupyter_server/pull/1311) ([@epignot](https://github.com/epignot)) + +### Maintenance and upkeep improvements + +- Update pre-commit deps [#1380](https://github.com/jupyter-server/jupyter_server/pull/1380) ([@blink1073](https://github.com/blink1073)) +- Use ruff docstring-code-format [#1377](https://github.com/jupyter-server/jupyter_server/pull/1377) ([@blink1073](https://github.com/blink1073)) + +### Documentation improvements + +- Enable htmlzip and epub on readthedocs [#1379](https://github.com/jupyter-server/jupyter_server/pull/1379) ([@bollwyvl](https://github.com/bollwyvl)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/jupyter-server/jupyter_server/graphs/contributors?from=2023-12-06&to=2024-01-04&type=c)) + +[@blink1073](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Ablink1073+updated%3A2023-12-06..2024-01-04&type=Issues) | [@bollwyvl](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Abollwyvl+updated%3A2023-12-06..2024-01-04&type=Issues) | [@epignot](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Aepignot+updated%3A2023-12-06..2024-01-04&type=Issues) | [@krassowski](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Akrassowski+updated%3A2023-12-06..2024-01-04&type=Issues) + ## 2.12.1 ([Full Changelog](https://github.com/jupyter-server/jupyter_server/compare/v2.12.0...a59beb9b7bf3decc00af782821561435f47bbb16)) @@ -19,8 +78,6 @@ All notable changes to this project will be documented in this file. [@minrk](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3Aminrk+updated%3A2023-12-05..2023-12-06&type=Issues) | [@Zsailer](https://github.com/search?q=repo%3Ajupyter-server%2Fjupyter_server+involves%3AZsailer+updated%3A2023-12-05..2023-12-06&type=Issues) - - ## 2.12.0 ([Full Changelog](https://github.com/jupyter-server/jupyter_server/compare/v2.11.2...3bd347b6f2ead5897a18c6171db1174eaaf6176d)) diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000000..a70d8488c1 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,8 @@ +# This is a comment. +# Each line is a file pattern followed by one or more owners. +# These owners will be the default owners for everything in +# the repo. Unless a later match takes precedence, +# “team-big-data” team members will be requested for +# review when someone opens a pull request. +* +@Spotinst/team-big-data diff --git a/examples/simple/pyproject.toml b/examples/simple/pyproject.toml index 694b7a4f50..38ae8e71a7 100644 --- a/examples/simple/pyproject.toml +++ b/examples/simple/pyproject.toml @@ -31,6 +31,9 @@ source = "nodejs" [tool.hatch.build.targets.wheel.shared-data] "etc/jupyter/jupyter_server_config.d" = "etc/jupyter/jupyter_server_config.d" +[tool.hatch.build.targets.wheel] +packages = ["simple_ext1", "simple_ext2", "simple_ext11"] + [tool.hatch.build.hooks.jupyter-builder] dependencies = [ "hatch-jupyter-builder>=0.8.2", diff --git a/jupyter_server/_version.py b/jupyter_server/_version.py index 45cb8501b5..57321c2483 100644 --- a/jupyter_server/_version.py +++ b/jupyter_server/_version.py @@ -6,7 +6,7 @@ from typing import List # Version string must appear intact for automatic versioning -__version__ = "2.12.1" +__version__ = "2.12.4" # Build up version_info tuple for backwards compatibility pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" diff --git a/jupyter_server/auth/security.py b/jupyter_server/auth/security.py index a5ae185f1e..ede64db522 100644 --- a/jupyter_server/auth/security.py +++ b/jupyter_server/auth/security.py @@ -41,7 +41,7 @@ def passwd(passphrase=None, algorithm="argon2"): Examples -------- - >>> passwd('mypassword') # doctest: +ELLIPSIS + >>> passwd("mypassword") # doctest: +ELLIPSIS 'argon2:...' """ @@ -93,15 +93,14 @@ def passwd_check(hashed_passphrase, passphrase): Examples -------- - >>> myhash = passwd('mypassword') - >>> passwd_check(myhash, 'mypassword') + >>> myhash = passwd("mypassword") + >>> passwd_check(myhash, "mypassword") True - >>> passwd_check(myhash, 'otherpassword') + >>> passwd_check(myhash, "otherpassword") False - >>> passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a', - ... 'mypassword') + >>> passwd_check("sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a", "mypassword") True """ if hashed_passphrase.startswith("argon2:"): diff --git a/jupyter_server/base/handlers.py b/jupyter_server/base/handlers.py index 47abb1cd5d..f35166b09e 100644 --- a/jupyter_server/base/handlers.py +++ b/jupyter_server/base/handlers.py @@ -33,6 +33,7 @@ from jupyter_server._sysinfo import get_sys_info from jupyter_server._tz import utcnow from jupyter_server.auth.decorator import authorized +from jupyter_server.auth.identity import User from jupyter_server.i18n import combine_translations from jupyter_server.services.security import csp_report_uri from jupyter_server.utils import ( @@ -50,7 +51,7 @@ from tornado.concurrent import Future from jupyter_server.auth.authorizer import Authorizer - from jupyter_server.auth.identity import IdentityProvider, User + from jupyter_server.auth.identity import IdentityProvider from jupyter_server.serverapp import ServerApp from jupyter_server.services.config.manager import ConfigManager from jupyter_server.services.contents.manager import ContentsManager diff --git a/jupyter_server/gateway/connections.py b/jupyter_server/gateway/connections.py index 028a0f8f4e..ab794bf7c7 100644 --- a/jupyter_server/gateway/connections.py +++ b/jupyter_server/gateway/connections.py @@ -13,7 +13,7 @@ from tornado.escape import json_decode, url_escape, utf8 from tornado.httpclient import HTTPRequest from tornado.ioloop import IOLoop -from traitlets import Bool, Instance, Int +from traitlets import Bool, Instance, Int, Unicode from ..services.kernels.connection.base import BaseKernelWebsocketConnection from ..utils import url_path_join @@ -31,6 +31,11 @@ class GatewayWebSocketConnection(BaseKernelWebsocketConnection): retry = Int(0) + # When opening ws connection to gateway, server already negotiated subprotocol with notebook client. + # Same protocol must be used for client and gateway, so legacy ws subprotocol for client is enforced here. + + kernel_ws_protocol = Unicode("", allow_none=True, config=True) + async def connect(self): """Connect to the socket.""" # websocket is initialized before connection diff --git a/jupyter_server/gateway/gateway_client.py b/jupyter_server/gateway/gateway_client.py index 437d54d227..a1ca0057fe 100644 --- a/jupyter_server/gateway/gateway_client.py +++ b/jupyter_server/gateway/gateway_client.py @@ -757,11 +757,12 @@ async def _is_retryable(self, method: str, exception: Exception) -> bool: async def gateway_request(endpoint: str, **kwargs: ty.Any) -> HTTPResponse: """Make an async request to kernel gateway endpoint, returns a response""" - kwargs = GatewayClient.instance().load_connection_args(**kwargs) + gateway_client = GatewayClient.instance() + kwargs = gateway_client.load_connection_args(**kwargs) rhc = RetryableHTTPClient() try: response = await rhc.fetch(endpoint, **kwargs) - GatewayClient.instance().emit( + gateway_client.emit( data={STATUS_KEY: SUCCESS_STATUS, STATUS_CODE_KEY: 200, MESSAGE_KEY: "success"} ) # Trap a set of common exceptions so that we can inform the user that their Gateway url is incorrect @@ -769,10 +770,12 @@ async def gateway_request(endpoint: str, **kwargs: ty.Any) -> HTTPResponse: # NOTE: We do this here since this handler is called during the server's startup and subsequent refreshes # of the tree view. except HTTPClientError as e: - GatewayClient.instance().emit( + gateway_client.emit( data={STATUS_KEY: ERROR_STATUS, STATUS_CODE_KEY: e.code, MESSAGE_KEY: str(e.message)} ) - error_reason = f"Exception while attempting to connect to Gateway server url '{GatewayClient.instance().url}'" + error_reason = ( + f"Exception while attempting to connect to Gateway server url '{gateway_client.url}'" + ) error_message = e.message if e.response: try: @@ -788,38 +791,39 @@ async def gateway_request(endpoint: str, **kwargs: ty.Any) -> HTTPResponse: "Ensure gateway url is valid and the Gateway instance is running.", ) from e except ConnectionError as e: - GatewayClient.instance().emit( + gateway_client.emit( data={STATUS_KEY: ERROR_STATUS, STATUS_CODE_KEY: 503, MESSAGE_KEY: str(e)} ) raise web.HTTPError( 503, - f"ConnectionError was received from Gateway server url '{GatewayClient.instance().url}'. " + f"ConnectionError was received from Gateway server url '{gateway_client.url}'. " "Check to be sure the Gateway instance is running.", ) from e except gaierror as e: - GatewayClient.instance().emit( + gateway_client.emit( data={STATUS_KEY: ERROR_STATUS, STATUS_CODE_KEY: 404, MESSAGE_KEY: str(e)} ) raise web.HTTPError( 404, - f"The Gateway server specified in the gateway_url '{GatewayClient.instance().url}' doesn't " + f"The Gateway server specified in the gateway_url '{gateway_client.url}' doesn't " f"appear to be valid. Ensure gateway url is valid and the Gateway instance is running.", ) from e except Exception as e: - GatewayClient.instance().emit( + gateway_client.emit( data={STATUS_KEY: ERROR_STATUS, STATUS_CODE_KEY: 505, MESSAGE_KEY: str(e)} ) logging.getLogger("ServerApp").error( - f"Exception while trying to launch kernel via Gateway URL {GatewayClient.instance().url} , {e}", + "Exception while trying to launch kernel via Gateway URL %s: %s", + gateway_client.url, e, ) raise e - if GatewayClient.instance().accept_cookies: + if gateway_client.accept_cookies: # Update cookies on GatewayClient from server if configured. cookie_values = response.headers.get("Set-Cookie") if cookie_values: cookie: SimpleCookie = SimpleCookie() cookie.load(cookie_values) - GatewayClient.instance().update_cookies(cookie) + gateway_client.update_cookies(cookie) return response diff --git a/jupyter_server/gateway/managers.py b/jupyter_server/gateway/managers.py index 4a89cfb158..cd0b27b50d 100644 --- a/jupyter_server/gateway/managers.py +++ b/jupyter_server/gateway/managers.py @@ -25,9 +25,9 @@ from .._tz import UTC, utcnow from ..services.kernels.kernelmanager import ( - AsyncMappingKernelManager, - ServerKernelManager, - emit_kernel_action_event, + AsyncMappingKernelManager, + ServerKernelManager, + emit_kernel_action_event, ) from ..services.sessions.sessionmanager import SessionManager from ..utils import url_path_join @@ -35,844 +35,844 @@ class GatewayMappingKernelManager(AsyncMappingKernelManager): - """Kernel manager that supports remote kernels hosted by Jupyter Kernel or Enterprise Gateway.""" + """Kernel manager that supports remote kernels hosted by Jupyter Kernel or Enterprise Gateway.""" - # We'll maintain our own set of kernel ids - _kernels: dict[str, GatewayKernelManager] = {} # type:ignore[assignment] + # We'll maintain our own set of kernel ids + _kernels: dict[str, GatewayKernelManager] = {} # type:ignore[assignment] - @default("kernel_manager_class") - def _default_kernel_manager_class(self): - return "jupyter_server.gateway.managers.GatewayKernelManager" + @default("kernel_manager_class") + def _default_kernel_manager_class(self): + return "jupyter_server.gateway.managers.GatewayKernelManager" - @default("shared_context") - def _default_shared_context(self): - return False # no need to share zmq contexts + @default("shared_context") + def _default_shared_context(self): + return False # no need to share zmq contexts - def __init__(self, **kwargs): - """Initialize a gateway mapping kernel manager.""" - super().__init__(**kwargs) - self.kernels_url = url_path_join( - GatewayClient.instance().url or "", GatewayClient.instance().kernels_endpoint or "" - ) - - def remove_kernel(self, kernel_id): - """Complete override since we want to be more tolerant of missing keys""" - try: - return self._kernels.pop(kernel_id) - except KeyError: - pass - - async def start_kernel(self, *, kernel_id=None, path=None, **kwargs): - """Start a kernel for a session and return its kernel_id. - - Parameters - ---------- - kernel_id : uuid - The uuid to associate the new kernel with. If this - is not None, this kernel will be persistent whenever it is - requested. - path : API path - The API path (unicode, '/' delimited) for the cwd. - Will be transformed to an OS path relative to root_dir. - """ - self.log.info(f"Request start kernel: kernel_id={kernel_id}, path='{path}'") - - if kernel_id is None and path is not None: - kwargs["cwd"] = self.cwd_for_path(path) + def __init__(self, **kwargs): + """Initialize a gateway mapping kernel manager.""" + super().__init__(**kwargs) + self.kernels_url = url_path_join( + GatewayClient.instance().url or "", GatewayClient.instance().kernels_endpoint or "" + ) - km = self.kernel_manager_factory(parent=self, log=self.log) - await km.start_kernel(kernel_id=kernel_id, **kwargs) - kernel_id = km.kernel_id - self._kernels[kernel_id] = km - # Initialize culling if not already - if not self._initialized_culler: - self.initialize_culler() + def remove_kernel(self, kernel_id): + """Complete override since we want to be more tolerant of missing keys""" + try: + return self._kernels.pop(kernel_id) + except KeyError: + pass + + async def start_kernel(self, *, kernel_id=None, path=None, **kwargs): + """Start a kernel for a session and return its kernel_id. + + Parameters + ---------- + kernel_id : uuid + The uuid to associate the new kernel with. If this + is not None, this kernel will be persistent whenever it is + requested. + path : API path + The API path (unicode, '/' delimited) for the cwd. + Will be transformed to an OS path relative to root_dir. + """ + self.log.info(f"Request start kernel: kernel_id={kernel_id}, path='{path}'") + + if kernel_id is None and path is not None: + kwargs["cwd"] = self.cwd_for_path(path) + + km = self.kernel_manager_factory(parent=self, log=self.log) + await km.start_kernel(kernel_id=kernel_id, **kwargs) + kernel_id = km.kernel_id + self._kernels[kernel_id] = km + # Initialize culling if not already + if not self._initialized_culler: + self.initialize_culler() + + return kernel_id + + async def kernel_model(self, kernel_id): + """Return a dictionary of kernel information described in the + JSON standard model. + + Parameters + ---------- + kernel_id : uuid + The uuid of the kernel. + """ + model = None + km = self.get_kernel(str(kernel_id)) + if km: # type:ignore[truthy-bool] + model = km.kernel # type:ignore[attr-defined] + return model + + async def list_kernels(self, **kwargs): + """Get a list of running kernels from the Gateway server. + + We'll use this opportunity to refresh the models in each of + the kernels we're managing. + """ + self.log.debug(f"Request list kernels: {self.kernels_url}") + response = await gateway_request(self.kernels_url, method="GET") + kernels = json_decode(response.body) + # Refresh our models to those we know about, and filter + # the return value with only our kernels. + kernel_models = {} + for model in kernels: + kid = model["id"] + if kid in self._kernels: + await self._kernels[kid].refresh_model(model) + kernel_models[kid] = model + # Remove any of our kernels that may have been culled on the gateway server + our_kernels = self._kernels.copy() + culled_ids = [] + for kid, _ in our_kernels.items(): + if kid not in kernel_models: + # The upstream kernel was not reported in the list of kernels. + self.log.warning( + f"Kernel {kid} not present in the list of kernels - possibly culled on Gateway server." + ) + try: + # Try to directly refresh the model for this specific kernel in case + # the upstream list of kernels was erroneously incomplete. + # + # That might happen if the case of a proxy that manages multiple + # backends where there could be transient connectivity issues with + # a single backend. + # + # Alternatively, it could happen if there is simply a bug in the + # upstream gateway server. + # + # Either way, including this check improves our reliability in the + # face of such scenarios. + model = await self._kernels[kid].refresh_model() + except web.HTTPError: + model = None + if model: + kernel_models[kid] = model + else: + self.log.warning( + f"Kernel {kid} no longer active - probably culled on Gateway server." + ) + self._kernels.pop(kid, None) + culled_ids.append(kid) # TODO: Figure out what do with these. + return list(kernel_models.values()) + + async def shutdown_kernel(self, kernel_id, now=False, restart=False): + """Shutdown a kernel by its kernel uuid. + + Parameters + ========== + kernel_id : uuid + The id of the kernel to shutdown. + now : bool + Shutdown the kernel immediately (True) or gracefully (False) + restart : bool + The purpose of this shutdown is to restart the kernel (True) + """ + km = self.get_kernel(kernel_id) + await ensure_async(km.shutdown_kernel(now=now, restart=restart)) + self.remove_kernel(kernel_id) + + async def restart_kernel(self, kernel_id, now=False, **kwargs): + """Restart a kernel by its kernel uuid. + + Parameters + ========== + kernel_id : uuid + The id of the kernel to restart. + """ + km = self.get_kernel(kernel_id) + await ensure_async(km.restart_kernel(now=now, **kwargs)) + + async def interrupt_kernel(self, kernel_id, **kwargs): + """Interrupt a kernel by its kernel uuid. + + Parameters + ========== + kernel_id : uuid + The id of the kernel to interrupt. + """ + km = self.get_kernel(kernel_id) + await ensure_async(km.interrupt_kernel()) + + async def shutdown_all(self, now=False): + """Shutdown all kernels.""" + kids = list(self._kernels) + for kernel_id in kids: + km = self.get_kernel(kernel_id) + await ensure_async(km.shutdown_kernel(now=now)) + self.remove_kernel(kernel_id) + + async def cull_kernels(self): + """Override cull_kernels, so we can be sure their state is current.""" + await self.list_kernels() + await super().cull_kernels() - return kernel_id - async def kernel_model(self, kernel_id): - """Return a dictionary of kernel information described in the - JSON standard model. +class GatewayKernelSpecManager(KernelSpecManager): + """A gateway kernel spec manager.""" - Parameters - ---------- - kernel_id : uuid - The uuid of the kernel. - """ - model = None - km = self.get_kernel(str(kernel_id)) - if km: # type:ignore[truthy-bool] - model = km.kernel # type:ignore[attr-defined] - return model + def __init__(self, **kwargs): + """Initialize a gateway kernel spec manager.""" + super().__init__(**kwargs) + base_endpoint = url_path_join( + GatewayClient.instance().url or "", GatewayClient.instance().kernelspecs_endpoint + ) - async def list_kernels(self, **kwargs): - """Get a list of running kernels from the Gateway server. + self.base_endpoint = GatewayKernelSpecManager._get_endpoint_for_user_filter(base_endpoint) + self.base_resource_endpoint = url_path_join( + GatewayClient.instance().url or "", + GatewayClient.instance().kernelspecs_resource_endpoint, + ) - We'll use this opportunity to refresh the models in each of - the kernels we're managing. - """ - self.log.debug(f"Request list kernels: {self.kernels_url}") - response = await gateway_request(self.kernels_url, method="GET") - kernels = json_decode(response.body) - # Refresh our models to those we know about, and filter - # the return value with only our kernels. - kernel_models = {} - for model in kernels: - kid = model["id"] - if kid in self._kernels: - await self._kernels[kid].refresh_model(model) - kernel_models[kid] = model - # Remove any of our kernels that may have been culled on the gateway server - our_kernels = self._kernels.copy() - culled_ids = [] - for kid, _ in our_kernels.items(): - if kid not in kernel_models: - # The upstream kernel was not reported in the list of kernels. - self.log.warning( - f"Kernel {kid} not present in the list of kernels - possibly culled on Gateway server." + @staticmethod + def _get_endpoint_for_user_filter(default_endpoint): + """Get the endpoint for a user filter.""" + kernel_user = os.environ.get("KERNEL_USERNAME") + if kernel_user: + return "?user=".join([default_endpoint, kernel_user]) + return default_endpoint + + def _replace_path_kernelspec_resources(self, kernel_specs): + """Helper method that replaces any gateway base_url with the server's base_url + This enables clients to properly route through jupyter_server to a gateway + for kernel resources such as logo files + """ + if not self.parent: + return {} + kernelspecs = kernel_specs["kernelspecs"] + for kernel_name in kernelspecs: + resources = kernelspecs[kernel_name]["resources"] + for resource_name in resources: + original_path = resources[resource_name] + split_eg_base_url = str.rsplit(original_path, sep="/kernelspecs/", maxsplit=1) + if len(split_eg_base_url) > 1: + new_path = url_path_join( + self.parent.base_url, "kernelspecs", split_eg_base_url[1] + ) + kernel_specs["kernelspecs"][kernel_name]["resources"][resource_name] = new_path + if original_path != new_path: + self.log.debug( + f"Replaced original kernel resource path {original_path} with new " + f"path {kernel_specs['kernelspecs'][kernel_name]['resources'][resource_name]}" + ) + return kernel_specs + + def _get_kernelspecs_endpoint_url(self, kernel_name=None): + """Builds a url for the kernels endpoint + Parameters + ---------- + kernel_name : kernel name (optional) + """ + if kernel_name: + return url_path_join(self.base_endpoint, url_escape(kernel_name)) + + return self.base_endpoint + + async def get_all_specs(self): + """Get all of the kernel specs for the gateway.""" + fetched_kspecs = await self.list_kernel_specs() + + # get the default kernel name and compare to that of this server. + # If different log a warning and reset the default. However, the + # caller of this method will still return this server's value until + # the next fetch of kernelspecs - at which time they'll match. + if not self.parent: + return {} + km = self.parent.kernel_manager + remote_default_kernel_name = fetched_kspecs.get("default") + if remote_default_kernel_name != km.default_kernel_name: + self.log.info( + f"Default kernel name on Gateway server ({remote_default_kernel_name}) differs from " + f"Notebook server ({km.default_kernel_name}). Updating to Gateway server's value." + ) + km.default_kernel_name = remote_default_kernel_name + + remote_kspecs = fetched_kspecs.get("kernelspecs") + return remote_kspecs + + async def list_kernel_specs(self): + """Get a list of kernel specs.""" + kernel_spec_url = self._get_kernelspecs_endpoint_url() + self.log.debug(f"Request list kernel specs at: {kernel_spec_url}") + response = await gateway_request(kernel_spec_url, method="GET") + kernel_specs = json_decode(response.body) + kernel_specs = self._replace_path_kernelspec_resources(kernel_specs) + return kernel_specs + + async def get_kernel_spec(self, kernel_name, **kwargs): + """Get kernel spec for kernel_name. + + Parameters + ---------- + kernel_name : str + The name of the kernel. + """ + kernel_spec_url = self._get_kernelspecs_endpoint_url(kernel_name=str(kernel_name)) + self.log.debug(f"Request kernel spec at: {kernel_spec_url}") + try: + response = await gateway_request(kernel_spec_url, method="GET") + except web.HTTPError as error: + if error.status_code == 404: + # Convert not found to KeyError since that's what the Notebook handler expects + # message is not used, but might as well make it useful for troubleshooting + msg = f"kernelspec {kernel_name} not found on Gateway server at: {GatewayClient.instance().url}" + raise KeyError(msg) from None + else: + raise + else: + kernel_spec = json_decode(response.body) + + return kernel_spec + + async def get_kernel_spec_resource(self, kernel_name, path): + """Get kernel spec for kernel_name. + + Parameters + ---------- + kernel_name : str + The name of the kernel. + path : str + The name of the desired resource + """ + kernel_spec_resource_url = url_path_join( + self.base_resource_endpoint, str(kernel_name), str(path) ) + self.log.debug(f"Request kernel spec resource '{path}' at: {kernel_spec_resource_url}") try: - # Try to directly refresh the model for this specific kernel in case - # the upstream list of kernels was erroneously incomplete. - # - # That might happen if the case of a proxy that manages multiple - # backends where there could be transient connectivity issues with - # a single backend. - # - # Alternatively, it could happen if there is simply a bug in the - # upstream gateway server. - # - # Either way, including this check improves our reliability in the - # face of such scenarios. - model = await self._kernels[kid].refresh_model() - except web.HTTPError: - model = None - if model: - kernel_models[kid] = model + response = await gateway_request(kernel_spec_resource_url, method="GET") + except web.HTTPError as error: + if error.status_code == 404: + kernel_spec_resource = None + else: + raise else: - self.log.warning( - f"Kernel {kid} no longer active - probably culled on Gateway server." - ) - self._kernels.pop(kid, None) - culled_ids.append(kid) # TODO: Figure out what do with these. - return list(kernel_models.values()) - - async def shutdown_kernel(self, kernel_id, now=False, restart=False): - """Shutdown a kernel by its kernel uuid. - - Parameters - ========== - kernel_id : uuid - The id of the kernel to shutdown. - now : bool - Shutdown the kernel immediately (True) or gracefully (False) - restart : bool - The purpose of this shutdown is to restart the kernel (True) - """ - km = self.get_kernel(kernel_id) - await ensure_async(km.shutdown_kernel(now=now, restart=restart)) - self.remove_kernel(kernel_id) + kernel_spec_resource = response.body + return kernel_spec_resource - async def restart_kernel(self, kernel_id, now=False, **kwargs): - """Restart a kernel by its kernel uuid. - Parameters - ========== - kernel_id : uuid - The id of the kernel to restart. - """ - km = self.get_kernel(kernel_id) - await ensure_async(km.restart_kernel(now=now, **kwargs)) +class GatewaySessionManager(SessionManager): + """A gateway session manager.""" - async def interrupt_kernel(self, kernel_id, **kwargs): - """Interrupt a kernel by its kernel uuid. + kernel_manager = Instance("jupyter_server.gateway.managers.GatewayMappingKernelManager") - Parameters - ========== - kernel_id : uuid - The id of the kernel to interrupt. - """ - km = self.get_kernel(kernel_id) - await ensure_async(km.interrupt_kernel()) + async def kernel_culled(self, kernel_id: str) -> bool: # typing: ignore + """Checks if the kernel is still considered alive and returns true if it's not found.""" + km: Optional[GatewayKernelManager] = None + try: + # Since we keep the models up-to-date via client polling, use that state to determine + # if this kernel no longer exists on the gateway server rather than perform a redundant + # fetch operation - especially since this is called at approximately the same interval. + # This has the effect of reducing GET /api/kernels requests against the gateway server + # by 50%! + # Note that should the redundant polling be consolidated, or replaced with an event-based + # notification model, this will need to be revisited. + km = self.kernel_manager.get_kernel(kernel_id) + except Exception: + # Let exceptions here reflect culled kernel + pass + return km is None - async def shutdown_all(self, now=False): - """Shutdown all kernels.""" - kids = list(self._kernels) - for kernel_id in kids: - km = self.get_kernel(kernel_id) - await ensure_async(km.shutdown_kernel(now=now)) - self.remove_kernel(kernel_id) - async def cull_kernels(self): - """Override cull_kernels, so we can be sure their state is current.""" - await self.list_kernels() - await super().cull_kernels() +class GatewayKernelManager(ServerKernelManager): + """Manages a single kernel remotely via a Gateway Server.""" + kernel_id: Optional[str] = None # type:ignore[assignment] + kernel = None -class GatewayKernelSpecManager(KernelSpecManager): - """A gateway kernel spec manager.""" + @default("cache_ports") + def _default_cache_ports(self): + return False # no need to cache ports here - def __init__(self, **kwargs): - """Initialize a gateway kernel spec manager.""" - super().__init__(**kwargs) - base_endpoint = url_path_join( - GatewayClient.instance().url or "", GatewayClient.instance().kernelspecs_endpoint + def __init__(self, **kwargs): + """Initialize the gateway kernel manager.""" + super().__init__(**kwargs) + self.kernels_url = url_path_join( + GatewayClient.instance().url or "", GatewayClient.instance().kernels_endpoint + ) + self.kernel_url: str + self.kernel = self.kernel_id = None + # simulate busy/activity markers: + self.execution_state = "starting" + self.last_activity = utcnow() + + @property + def has_kernel(self): + """Has a kernel been started that we are managing.""" + return self.kernel is not None + + client_class = DottedObjectName("jupyter_server.gateway.managers.GatewayKernelClient") + client_factory = Type(klass="jupyter_server.gateway.managers.GatewayKernelClient") + + # -------------------------------------------------------------------------- + # create a Client connected to our Kernel + # -------------------------------------------------------------------------- + + def client(self, **kwargs): + """Create a client configured to connect to our kernel""" + kw: dict[str, Any] = {} + kw.update(self.get_connection_info(session=True)) + kw.update( + { + "connection_file": self.connection_file, + "parent": self, + } + ) + kw["kernel_id"] = self.kernel_id + + # add kwargs last, for manual overrides + kw.update(kwargs) + return self.client_factory(**kw) + + async def refresh_model(self, model=None): + """Refresh the kernel model. + + Parameters + ---------- + model : dict + The model from which to refresh the kernel. If None, the kernel + model is fetched from the Gateway server. + """ + if model is None: + self.log.debug("Request kernel at: %s" % self.kernel_url) + try: + response = await gateway_request(self.kernel_url, method="GET") + + except web.HTTPError as error: + if error.status_code == 404: + self.log.warning("Kernel not found at: %s" % self.kernel_url) + model = None + else: + raise + else: + model = json_decode(response.body) + self.log.debug("Kernel retrieved: %s" % model) + + if model: # Update activity markers + self.last_activity = datetime.datetime.strptime( + model["last_activity"], "%Y-%m-%dT%H:%M:%S.%fZ" + ).replace(tzinfo=UTC) + self.execution_state = model["execution_state"] + if isinstance(self.parent, AsyncMappingKernelManager): + # Update connections only if there's a mapping kernel manager parent for + # this kernel manager. The current kernel manager instance may not have + # a parent instance if, say, a server extension is using another application + # (e.g., papermill) that uses a KernelManager instance directly. + self.parent._kernel_connections[self.kernel_id] = int(model["connections"]) # type:ignore[index] + + self.kernel = model + return model + + # -------------------------------------------------------------------------- + # Kernel management + # -------------------------------------------------------------------------- + + @emit_kernel_action_event( + success_msg="Kernel {kernel_id} was started.", ) - - self.base_endpoint = GatewayKernelSpecManager._get_endpoint_for_user_filter(base_endpoint) - self.base_resource_endpoint = url_path_join( - GatewayClient.instance().url or "", - GatewayClient.instance().kernelspecs_resource_endpoint, - ) - - @staticmethod - def _get_endpoint_for_user_filter(default_endpoint): - """Get the endpoint for a user filter.""" - kernel_user = os.environ.get("KERNEL_USERNAME") - if kernel_user: - return "?user=".join([default_endpoint, kernel_user]) - return default_endpoint - - def _replace_path_kernelspec_resources(self, kernel_specs): - """Helper method that replaces any gateway base_url with the server's base_url - This enables clients to properly route through jupyter_server to a gateway - for kernel resources such as logo files - """ - if not self.parent: - return {} - kernelspecs = kernel_specs["kernelspecs"] - for kernel_name in kernelspecs: - resources = kernelspecs[kernel_name]["resources"] - for resource_name in resources: - original_path = resources[resource_name] - split_eg_base_url = str.rsplit(original_path, sep="/kernelspecs/", maxsplit=1) - if len(split_eg_base_url) > 1: - new_path = url_path_join( - self.parent.base_url, "kernelspecs", split_eg_base_url[1] - ) - kernel_specs["kernelspecs"][kernel_name]["resources"][resource_name] = new_path - if original_path != new_path: - self.log.debug( - f"Replaced original kernel resource path {original_path} with new " - f"path {kernel_specs['kernelspecs'][kernel_name]['resources'][resource_name]}" + async def start_kernel(self, **kwargs): + """Starts a kernel via HTTP in an asynchronous manner. + + Parameters + ---------- + `**kwargs` : optional + keyword arguments that are passed down to build the kernel_cmd + and launching the kernel (e.g. Popen kwargs). + """ + kernel_id = kwargs.get("kernel_id") + + if kernel_id is None: + kernel_name = kwargs.get("kernel_name", "python3") + self.log.debug("Request new kernel at: %s" % self.kernels_url) + + # Let KERNEL_USERNAME take precedent over http_user config option. + if os.environ.get("KERNEL_USERNAME") is None and GatewayClient.instance().http_user: + os.environ["KERNEL_USERNAME"] = GatewayClient.instance().http_user or "" + + payload_envs = os.environ.copy() + payload_envs.update(kwargs.get("env", {})) # Add any env entries in this request + + # Build the actual env payload, filtering allowed_envs and those starting with 'KERNEL_' + kernel_env = { + k: v + for (k, v) in payload_envs.items() + if k.startswith("KERNEL_") or k in GatewayClient.instance().allowed_envs.split(",") + } + + # Convey the full path to where this notebook file is located. + if kwargs.get("cwd") is not None and kernel_env.get("KERNEL_WORKING_DIR") is None: + kernel_env["KERNEL_WORKING_DIR"] = kwargs["cwd"] + + json_body = json_encode({"name": kernel_name, "env": kernel_env}) + + response = await gateway_request( + self.kernels_url, + method="POST", + headers={"Content-Type": "application/json"}, + body=json_body, ) - return kernel_specs - - def _get_kernelspecs_endpoint_url(self, kernel_name=None): - """Builds a url for the kernels endpoint - Parameters - ---------- - kernel_name : kernel name (optional) - """ - if kernel_name: - return url_path_join(self.base_endpoint, url_escape(kernel_name)) - - return self.base_endpoint - - async def get_all_specs(self): - """Get all of the kernel specs for the gateway.""" - fetched_kspecs = await self.list_kernel_specs() - - # get the default kernel name and compare to that of this server. - # If different log a warning and reset the default. However, the - # caller of this method will still return this server's value until - # the next fetch of kernelspecs - at which time they'll match. - if not self.parent: - return {} - km = self.parent.kernel_manager - remote_default_kernel_name = fetched_kspecs.get("default") - if remote_default_kernel_name != km.default_kernel_name: - self.log.info( - f"Default kernel name on Gateway server ({remote_default_kernel_name}) differs from " - f"Notebook server ({km.default_kernel_name}). Updating to Gateway server's value." - ) - km.default_kernel_name = remote_default_kernel_name - - remote_kspecs = fetched_kspecs.get("kernelspecs") - return remote_kspecs - - async def list_kernel_specs(self): - """Get a list of kernel specs.""" - kernel_spec_url = self._get_kernelspecs_endpoint_url() - self.log.debug(f"Request list kernel specs at: {kernel_spec_url}") - response = await gateway_request(kernel_spec_url, method="GET") - kernel_specs = json_decode(response.body) - kernel_specs = self._replace_path_kernelspec_resources(kernel_specs) - return kernel_specs - - async def get_kernel_spec(self, kernel_name, **kwargs): - """Get kernel spec for kernel_name. - - Parameters - ---------- - kernel_name : str - The name of the kernel. - """ - kernel_spec_url = self._get_kernelspecs_endpoint_url(kernel_name=str(kernel_name)) - self.log.debug(f"Request kernel spec at: {kernel_spec_url}") - try: - response = await gateway_request(kernel_spec_url, method="GET") - except web.HTTPError as error: - if error.status_code == 404: - # Convert not found to KeyError since that's what the Notebook handler expects - # message is not used, but might as well make it useful for troubleshooting - msg = f"kernelspec {kernel_name} not found on Gateway server at: {GatewayClient.instance().url}" - raise KeyError(msg) from None - else: - raise - else: - kernel_spec = json_decode(response.body) - - return kernel_spec - - async def get_kernel_spec_resource(self, kernel_name, path): - """Get kernel spec for kernel_name. - - Parameters - ---------- - kernel_name : str - The name of the kernel. - path : str - The name of the desired resource - """ - kernel_spec_resource_url = url_path_join( - self.base_resource_endpoint, str(kernel_name), str(path) - ) - self.log.debug(f"Request kernel spec resource '{path}' at: {kernel_spec_resource_url}") - try: - response = await gateway_request(kernel_spec_resource_url, method="GET") - except web.HTTPError as error: - if error.status_code == 404: - kernel_spec_resource = None - else: - raise - else: - kernel_spec_resource = response.body - return kernel_spec_resource - - -class GatewaySessionManager(SessionManager): - """A gateway session manager.""" - - kernel_manager = Instance("jupyter_server.gateway.managers.GatewayMappingKernelManager") - - async def kernel_culled(self, kernel_id: str) -> bool: # typing: ignore - """Checks if the kernel is still considered alive and returns true if it's not found.""" - km: Optional[GatewayKernelManager] = None - try: - # Since we keep the models up-to-date via client polling, use that state to determine - # if this kernel no longer exists on the gateway server rather than perform a redundant - # fetch operation - especially since this is called at approximately the same interval. - # This has the effect of reducing GET /api/kernels requests against the gateway server - # by 50%! - # Note that should the redundant polling be consolidated, or replaced with an event-based - # notification model, this will need to be revisited. - km = self.kernel_manager.get_kernel(kernel_id) - except Exception: - # Let exceptions here reflect culled kernel - pass - return km is None - - -class GatewayKernelManager(ServerKernelManager): - """Manages a single kernel remotely via a Gateway Server.""" - - kernel_id: Optional[str] = None # type:ignore[assignment] - kernel = None - - @default("cache_ports") - def _default_cache_ports(self): - return False # no need to cache ports here + self.kernel = json_decode(response.body) + self.kernel_id = self.kernel["id"] + self.kernel_url = url_path_join(self.kernels_url, url_escape(str(self.kernel_id))) + self.log.info(f"GatewayKernelManager started kernel: {self.kernel_id}, args: {kwargs}") + else: + self.kernel_id = kernel_id + self.kernel_url = url_path_join(self.kernels_url, url_escape(str(self.kernel_id))) + self.kernel = await self.refresh_model() + self.log.info(f"GatewayKernelManager using existing kernel: {self.kernel_id}") - def __init__(self, **kwargs): - """Initialize the gateway kernel manager.""" - super().__init__(**kwargs) - self.kernels_url = url_path_join( - GatewayClient.instance().url or "", GatewayClient.instance().kernels_endpoint + @emit_kernel_action_event( + success_msg="Kernel {kernel_id} was shutdown.", ) - self.kernel_url: str - self.kernel = self.kernel_id = None - # simulate busy/activity markers: - self.execution_state = "starting" - self.last_activity = utcnow() - - @property - def has_kernel(self): - """Has a kernel been started that we are managing.""" - return self.kernel is not None - - client_class = DottedObjectName("jupyter_server.gateway.managers.GatewayKernelClient") - client_factory = Type(klass="jupyter_server.gateway.managers.GatewayKernelClient") - - # -------------------------------------------------------------------------- - # create a Client connected to our Kernel - # -------------------------------------------------------------------------- - - def client(self, **kwargs): - """Create a client configured to connect to our kernel""" - kw: dict[str, Any] = {} - kw.update(self.get_connection_info(session=True)) - kw.update( - { - "connection_file": self.connection_file, - "parent": self, - } + async def shutdown_kernel(self, now=False, restart=False): + """Attempts to stop the kernel process cleanly via HTTP.""" + + if self.has_kernel: + self.log.debug("Request shutdown kernel at: %s", self.kernel_url) + try: + response = await gateway_request(self.kernel_url, method="DELETE") + self.log.debug("Shutdown kernel response: %d %s", response.code, response.reason) + except web.HTTPError as error: + if error.status_code == 404: + self.log.debug("Shutdown kernel response: kernel not found (ignored)") + else: + raise + + @emit_kernel_action_event( + success_msg="Kernel {kernel_id} was restarted.", ) - kw["kernel_id"] = self.kernel_id + async def restart_kernel(self, **kw): + """Restarts a kernel via HTTP.""" + if self.has_kernel: + assert self.kernel_url is not None + kernel_url = self.kernel_url + "/restart" + self.log.debug("Request restart kernel at: %s", kernel_url) + response = await gateway_request( + kernel_url, + method="POST", + headers={"Content-Type": "application/json"}, + body=json_encode({}), + ) + self.log.debug("Restart kernel response: %d %s", response.code, response.reason) - # add kwargs last, for manual overrides - kw.update(kwargs) - return self.client_factory(**kw) + @emit_kernel_action_event( + success_msg="Kernel {kernel_id} was interrupted.", + ) + async def interrupt_kernel(self): + """Interrupts the kernel via an HTTP request.""" + if self.has_kernel: + assert self.kernel_url is not None + kernel_url = self.kernel_url + "/interrupt" + self.log.debug("Request interrupt kernel at: %s", kernel_url) + response = await gateway_request( + kernel_url, + method="POST", + headers={"Content-Type": "application/json"}, + body=json_encode({}), + ) + self.log.debug("Interrupt kernel response: %d %s", response.code, response.reason) - async def refresh_model(self, model=None): - """Refresh the kernel model. + async def is_alive(self): + """Is the kernel process still running?""" + if self.has_kernel: + # Go ahead and issue a request to get the kernel + self.kernel = await self.refresh_model() + self.log.debug(f"The kernel: {self.kernel} is alive.") + return True + else: # we don't have a kernel + self.log.debug(f"The kernel: {self.kernel} no longer exists.") + return False - Parameters - ---------- - model : dict - The model from which to refresh the kernel. If None, the kernel - model is fetched from the Gateway server. - """ - if model is None: - self.log.debug("Request kernel at: %s" % self.kernel_url) - try: - response = await gateway_request(self.kernel_url, method="GET") - - except web.HTTPError as error: - if error.status_code == 404: - self.log.warning("Kernel not found at: %s" % self.kernel_url) - model = None - else: - raise - else: - model = json_decode(response.body) - self.log.debug("Kernel retrieved: %s" % model) - - if model: # Update activity markers - self.last_activity = datetime.datetime.strptime( - model["last_activity"], "%Y-%m-%dT%H:%M:%S.%fZ" - ).replace(tzinfo=UTC) - self.execution_state = model["execution_state"] - if isinstance(self.parent, AsyncMappingKernelManager): - # Update connections only if there's a mapping kernel manager parent for - # this kernel manager. The current kernel manager instance may not have - # a parent instance if, say, a server extension is using another application - # (e.g., papermill) that uses a KernelManager instance directly. - self.parent._kernel_connections[self.kernel_id] = int(model["connections"]) # type:ignore[index] - - self.kernel = model - return model - - # -------------------------------------------------------------------------- - # Kernel management - # -------------------------------------------------------------------------- - - @emit_kernel_action_event( - success_msg="Kernel {kernel_id} was started.", - ) - async def start_kernel(self, **kwargs): - """Starts a kernel via HTTP in an asynchronous manner. - - Parameters - ---------- - `**kwargs` : optional - keyword arguments that are passed down to build the kernel_cmd - and launching the kernel (e.g. Popen kwargs). - """ - kernel_id = kwargs.get("kernel_id") - - if kernel_id is None: - kernel_name = kwargs.get("kernel_name", "python3") - self.log.debug("Request new kernel at: %s" % self.kernels_url) - - # Let KERNEL_USERNAME take precedent over http_user config option. - if os.environ.get("KERNEL_USERNAME") is None and GatewayClient.instance().http_user: - os.environ["KERNEL_USERNAME"] = GatewayClient.instance().http_user or "" - - payload_envs = os.environ.copy() - payload_envs.update(kwargs.get("env", {})) # Add any env entries in this request - - # Build the actual env payload, filtering allowed_envs and those starting with 'KERNEL_' - kernel_env = { - k: v - for (k, v) in payload_envs.items() - if k.startswith("KERNEL_") or k in GatewayClient.instance().allowed_envs.split(",") - } - - # Convey the full path to where this notebook file is located. - if kwargs.get("cwd") is not None and kernel_env.get("KERNEL_WORKING_DIR") is None: - kernel_env["KERNEL_WORKING_DIR"] = kwargs["cwd"] - - json_body = json_encode({"name": kernel_name, "env": kernel_env}) - - response = await gateway_request( - self.kernels_url, - method="POST", - headers={"Content-Type": "application/json"}, - body=json_body, - ) - self.kernel = json_decode(response.body) - self.kernel_id = self.kernel["id"] - self.kernel_url = url_path_join(self.kernels_url, url_escape(str(self.kernel_id))) - self.log.info(f"GatewayKernelManager started kernel: {self.kernel_id}, args: {kwargs}") - else: - self.kernel_id = kernel_id - self.kernel_url = url_path_join(self.kernels_url, url_escape(str(self.kernel_id))) - self.kernel = await self.refresh_model() - self.log.info(f"GatewayKernelManager using existing kernel: {self.kernel_id}") - - @emit_kernel_action_event( - success_msg="Kernel {kernel_id} was shutdown.", - ) - async def shutdown_kernel(self, now=False, restart=False): - """Attempts to stop the kernel process cleanly via HTTP.""" - - if self.has_kernel: - self.log.debug("Request shutdown kernel at: %s", self.kernel_url) - try: - response = await gateway_request(self.kernel_url, method="DELETE") - self.log.debug("Shutdown kernel response: %d %s", response.code, response.reason) - except web.HTTPError as error: - if error.status_code == 404: - self.log.debug("Shutdown kernel response: kernel not found (ignored)") - else: - raise - - @emit_kernel_action_event( - success_msg="Kernel {kernel_id} was restarted.", - ) - async def restart_kernel(self, **kw): - """Restarts a kernel via HTTP.""" - if self.has_kernel: - assert self.kernel_url is not None - kernel_url = self.kernel_url + "/restart" - self.log.debug("Request restart kernel at: %s", kernel_url) - response = await gateway_request( - kernel_url, - method="POST", - headers={"Content-Type": "application/json"}, - body=json_encode({}), - ) - self.log.debug("Restart kernel response: %d %s", response.code, response.reason) - - @emit_kernel_action_event( - success_msg="Kernel {kernel_id} was interrupted.", - ) - async def interrupt_kernel(self): - """Interrupts the kernel via an HTTP request.""" - if self.has_kernel: - assert self.kernel_url is not None - kernel_url = self.kernel_url + "/interrupt" - self.log.debug("Request interrupt kernel at: %s", kernel_url) - response = await gateway_request( - kernel_url, - method="POST", - headers={"Content-Type": "application/json"}, - body=json_encode({}), - ) - self.log.debug("Interrupt kernel response: %d %s", response.code, response.reason) - - async def is_alive(self): - """Is the kernel process still running?""" - if self.has_kernel: - # Go ahead and issue a request to get the kernel - self.kernel = await self.refresh_model() - self.log.debug(f"The kernel: {self.kernel} is alive.") - return True - else: # we don't have a kernel - self.log.debug(f"The kernel: {self.kernel} no longer exists.") - return False - - def cleanup_resources(self, restart=False): - """Clean up resources when the kernel is shut down""" + def cleanup_resources(self, restart=False): + """Clean up resources when the kernel is shut down""" KernelManagerABC.register(GatewayKernelManager) class ChannelQueue(Queue): # type:ignore[type-arg] - """A queue for a named channel.""" - - channel_name: Optional[str] = None - response_router_finished: bool - - def __init__(self, channel_name: str, channel_socket: websocket.WebSocket, log: Logger): - """Initialize a channel queue.""" - super().__init__() - self.channel_name = channel_name - self.channel_socket = channel_socket - self.log = log - self.response_router_finished = False - - async def _async_get(self, timeout=None): - """Asynchronously get from the queue.""" - if timeout is None: - timeout = float("inf") - elif timeout < 0: - msg = "'timeout' must be a non-negative number" - raise ValueError(msg) - end_time = monotonic() + timeout - - while True: - try: - return self.get(block=False) - except Empty: - if self.response_router_finished: - msg = "Response router had finished" - raise RuntimeError(msg) from None - if monotonic() > end_time: - raise - await asyncio.sleep(0) - - async def get_msg(self, *args: Any, **kwargs: Any) -> dict[str, Any]: - """Get a message from the queue.""" - timeout = kwargs.get("timeout", 1) - msg = await self._async_get(timeout=timeout) - self.log.debug( - "Received message on channel: {}, msg_id: {}, msg_type: {}".format( - self.channel_name, msg["msg_id"], msg["msg_type"] if msg else "null" - ) - ) - self.task_done() - return cast("dict[str, Any]", msg) - - def send(self, msg: dict[str, Any]) -> None: - """Send a message to the queue.""" - message = json.dumps(msg, default=ChannelQueue.serialize_datetime).replace(" None: - """Start the queue.""" - - def stop(self) -> None: - """Stop the queue.""" - if not self.empty(): - # If unprocessed messages are detected, drain the queue collecting non-status - # messages. If any remain that are not 'shutdown_reply' and this is not iopub - # go ahead and issue a warning. - msgs = [] - while self.qsize(): - msg = self.get_nowait() - if msg["msg_type"] != "status": - msgs.append(msg["msg_type"]) - if self.channel_name == "iopub" and "shutdown_reply" in msgs: - return - if len(msgs): - self.log.warning( - "Stopping channel '{}' with {} unprocessed non-status messages: {}.".format( - self.channel_name, len(msgs), msgs - ) + """A queue for a named channel.""" + + channel_name: Optional[str] = None + response_router_finished: bool + + def __init__(self, channel_name: str, channel_socket: websocket.WebSocket, log: Logger): + """Initialize a channel queue.""" + super().__init__() + self.channel_name = channel_name + self.channel_socket = channel_socket + self.log = log + self.response_router_finished = False + + async def _async_get(self, timeout=None): + """Asynchronously get from the queue.""" + if timeout is None: + timeout = float("inf") + elif timeout < 0: + msg = "'timeout' must be a non-negative number" + raise ValueError(msg) + end_time = monotonic() + timeout + + while True: + try: + return self.get(block=False) + except Empty: + if self.response_router_finished: + msg = "Response router had finished" + raise RuntimeError(msg) from None + if monotonic() > end_time: + raise + await asyncio.sleep(0) + + async def get_msg(self, *args: Any, **kwargs: Any) -> dict[str, Any]: + """Get a message from the queue.""" + timeout = kwargs.get("timeout", 1) + msg = await self._async_get(timeout=timeout) + self.log.debug( + "Received message on channel: {}, msg_id: {}, msg_type: {}".format( + self.channel_name, msg["msg_id"], msg["msg_type"] if msg else "null" + ) ) - - def is_alive(self) -> bool: - """Whether the queue is alive.""" - return self.channel_socket is not None + self.task_done() + return cast("dict[str, Any]", msg) + + def send(self, msg: dict[str, Any]) -> None: + """Send a message to the queue.""" + message = json.dumps(msg, default=ChannelQueue.serialize_datetime).replace(" None: + """Start the queue.""" + + def stop(self) -> None: + """Stop the queue.""" + if not self.empty(): + # If unprocessed messages are detected, drain the queue collecting non-status + # messages. If any remain that are not 'shutdown_reply' and this is not iopub + # go ahead and issue a warning. + msgs = [] + while self.qsize(): + msg = self.get_nowait() + if msg["msg_type"] != "status": + msgs.append(msg["msg_type"]) + if self.channel_name == "iopub" and "shutdown_reply" in msgs: + return + if len(msgs): + self.log.warning( + "Stopping channel '{}' with {} unprocessed non-status messages: {}.".format( + self.channel_name, len(msgs), msgs + ) + ) + + def is_alive(self) -> bool: + """Whether the queue is alive.""" + return self.channel_socket is not None class HBChannelQueue(ChannelQueue): - """A queue for the heartbeat channel.""" + """A queue for the heartbeat channel.""" - def is_beating(self) -> bool: - """Whether the channel is beating.""" - # Just use the is_alive status for now - return self.is_alive() + def is_beating(self) -> bool: + """Whether the channel is beating.""" + # Just use the is_alive status for now + return self.is_alive() class GatewayKernelClient(AsyncKernelClient): - """Communicates with a single kernel indirectly via a websocket to a gateway server. - - There are five channels associated with each kernel: - - * shell: for request/reply calls to the kernel. - * iopub: for the kernel to publish results to frontends. - * hb: for monitoring the kernel's heartbeat. - * stdin: for frontends to reply to raw_input calls in the kernel. - * control: for kernel management calls to the kernel. - - The messages that can be sent on these channels are exposed as methods of the - client (KernelClient.execute, complete, history, etc.). These methods only - send the message, they don't wait for a reply. To get results, use e.g. - :meth:`get_shell_msg` to fetch messages from the shell channel. - """ - - # flag for whether execute requests should be allowed to call raw_input: - allow_stdin = False - _channels_stopped: bool - _channel_queues: Optional[dict[str, ChannelQueue]] - _control_channel: Optional[ChannelQueue] # type:ignore[assignment] - _hb_channel: Optional[ChannelQueue] # type:ignore[assignment] - _stdin_channel: Optional[ChannelQueue] # type:ignore[assignment] - _iopub_channel: Optional[ChannelQueue] # type:ignore[assignment] - _shell_channel: Optional[ChannelQueue] # type:ignore[assignment] - - def __init__(self, kernel_id, **kwargs): - """Initialize a gateway kernel client.""" - super().__init__(**kwargs) - self.kernel_id = kernel_id - self.channel_socket: Optional[websocket.WebSocket] = None - self.response_router: Optional[Thread] = None - self._channels_stopped = False - self._channel_queues = {} - - # -------------------------------------------------------------------------- - # Channel management methods - # -------------------------------------------------------------------------- - - async def start_channels(self, shell=True, iopub=True, stdin=True, hb=True, control=True): - """Starts the channels for this kernel. - - For this class, we establish a websocket connection to the destination - and set up the channel-based queues on which applicable messages will - be posted. - """ + """Communicates with a single kernel indirectly via a websocket to a gateway server. - ws_url = url_path_join( - GatewayClient.instance().ws_url or "", - GatewayClient.instance().kernels_endpoint, - url_escape(self.kernel_id), - "channels", - ) - # Gather cert info in case where ssl is desired... - ssl_options = { - "ca_certs": GatewayClient.instance().ca_certs, - "certfile": GatewayClient.instance().client_cert, - "keyfile": GatewayClient.instance().client_key, - } - - self.channel_socket = websocket.create_connection( - ws_url, - timeout=GatewayClient.instance().KERNEL_LAUNCH_TIMEOUT, - enable_multithread=True, - sslopt=ssl_options, - ) + There are five channels associated with each kernel: - await ensure_async( - super().start_channels(shell=shell, iopub=iopub, stdin=stdin, hb=hb, control=control) - ) + * shell: for request/reply calls to the kernel. + * iopub: for the kernel to publish results to frontends. + * hb: for monitoring the kernel's heartbeat. + * stdin: for frontends to reply to raw_input calls in the kernel. + * control: for kernel management calls to the kernel. - self.response_router = Thread(target=self._route_responses) - self.response_router.start() + The messages that can be sent on these channels are exposed as methods of the + client (KernelClient.execute, complete, history, etc.). These methods only + send the message, they don't wait for a reply. To get results, use e.g. + :meth:`get_shell_msg` to fetch messages from the shell channel. + """ - def stop_channels(self): - """Stops all the running channels for this kernel. + # flag for whether execute requests should be allowed to call raw_input: + allow_stdin = False + _channels_stopped: bool + _channel_queues: Optional[dict[str, ChannelQueue]] + _control_channel: Optional[ChannelQueue] # type:ignore[assignment] + _hb_channel: Optional[ChannelQueue] # type:ignore[assignment] + _stdin_channel: Optional[ChannelQueue] # type:ignore[assignment] + _iopub_channel: Optional[ChannelQueue] # type:ignore[assignment] + _shell_channel: Optional[ChannelQueue] # type:ignore[assignment] + + def __init__(self, kernel_id, **kwargs): + """Initialize a gateway kernel client.""" + super().__init__(**kwargs) + self.kernel_id = kernel_id + self.channel_socket: Optional[websocket.WebSocket] = None + self.response_router: Optional[Thread] = None + self._channels_stopped = False + self._channel_queues = {} + + # -------------------------------------------------------------------------- + # Channel management methods + # -------------------------------------------------------------------------- + + async def start_channels(self, shell=True, iopub=True, stdin=True, hb=True, control=True): + """Starts the channels for this kernel. + + For this class, we establish a websocket connection to the destination + and set up the channel-based queues on which applicable messages will + be posted. + """ + + ws_url = url_path_join( + GatewayClient.instance().ws_url or "", + GatewayClient.instance().kernels_endpoint, + url_escape(self.kernel_id), + "channels", + ) + # Gather cert info in case where ssl is desired... + ssl_options = { + "ca_certs": GatewayClient.instance().ca_certs, + "certfile": GatewayClient.instance().client_cert, + "keyfile": GatewayClient.instance().client_key, + } + + self.channel_socket = websocket.create_connection( + ws_url, + timeout=GatewayClient.instance().KERNEL_LAUNCH_TIMEOUT, + enable_multithread=True, + sslopt=ssl_options, + ) - For this class, we close the websocket connection and destroy the - channel-based queues. - """ - super().stop_channels() - self._channels_stopped = True - self.log.debug("Closing websocket connection") - - assert self.channel_socket is not None - self.channel_socket.close() - assert self.response_router is not None - self.response_router.join() - - if self._channel_queues: - self._channel_queues.clear() - self._channel_queues = None - - # Channels are implemented via a ChannelQueue that is used to send and receive messages - - @property - def shell_channel(self): - """Get the shell channel object for this kernel.""" - if self._shell_channel is None: - self.log.debug("creating shell channel queue") - assert self.channel_socket is not None - self._shell_channel = ChannelQueue("shell", self.channel_socket, self.log) - assert self._channel_queues is not None - self._channel_queues["shell"] = self._shell_channel - return self._shell_channel - - @property - def iopub_channel(self): - """Get the iopub channel object for this kernel.""" - if self._iopub_channel is None: - self.log.debug("creating iopub channel queue") - assert self.channel_socket is not None - self._iopub_channel = ChannelQueue("iopub", self.channel_socket, self.log) - assert self._channel_queues is not None - self._channel_queues["iopub"] = self._iopub_channel - return self._iopub_channel - - @property - def stdin_channel(self): - """Get the stdin channel object for this kernel.""" - if self._stdin_channel is None: - self.log.debug("creating stdin channel queue") - assert self.channel_socket is not None - self._stdin_channel = ChannelQueue("stdin", self.channel_socket, self.log) - assert self._channel_queues is not None - self._channel_queues["stdin"] = self._stdin_channel - return self._stdin_channel - - @property - def hb_channel(self): - """Get the hb channel object for this kernel.""" - if self._hb_channel is None: - self.log.debug("creating hb channel queue") - assert self.channel_socket is not None - self._hb_channel = HBChannelQueue("hb", self.channel_socket, self.log) - assert self._channel_queues is not None - self._channel_queues["hb"] = self._hb_channel - return self._hb_channel - - @property - def control_channel(self): - """Get the control channel object for this kernel.""" - if self._control_channel is None: - self.log.debug("creating control channel queue") - assert self.channel_socket is not None - self._control_channel = ChannelQueue("control", self.channel_socket, self.log) - assert self._channel_queues is not None - self._channel_queues["control"] = self._control_channel - return self._control_channel - - def _route_responses(self): - """ - Reads responses from the websocket and routes each to the appropriate channel queue based - on the message's channel. It does this for the duration of the class's lifetime until the - channels are stopped, at which time the socket is closed (unblocking the router) and - the thread terminates. If shutdown happens to occur while processing a response (unlikely), - termination takes place via the loop control boolean. - """ - try: - while not self._channels_stopped: - assert self.channel_socket is not None - raw_message = self.channel_socket.recv() - if not raw_message: - break - response_message = json_decode(utf8(raw_message)) - channel = response_message["channel"] - assert self._channel_queues is not None - self._channel_queues[channel].put_nowait(response_message) + await ensure_async( + super().start_channels(shell=shell, iopub=iopub, stdin=stdin, hb=hb, control=control) + ) - except websocket.WebSocketConnectionClosedException: - pass # websocket closure most likely due to shut down + self.response_router = Thread(target=self._route_responses) + self.response_router.start() - except BaseException as be: - if not self._channels_stopped: - self.log.warning(f"Unexpected exception encountered ({be})") + def stop_channels(self): + """Stops all the running channels for this kernel. - # Notify channel queues that this thread had finished and no more messages are being received - assert self._channel_queues is not None - for channel_queue in self._channel_queues.values(): - channel_queue.response_router_finished = True + For this class, we close the websocket connection and destroy the + channel-based queues. + """ + super().stop_channels() + self._channels_stopped = True + self.log.debug("Closing websocket connection") + + assert self.channel_socket is not None + self.channel_socket.close() + assert self.response_router is not None + self.response_router.join() + + if self._channel_queues: + self._channel_queues.clear() + self._channel_queues = None + + # Channels are implemented via a ChannelQueue that is used to send and receive messages + + @property + def shell_channel(self): + """Get the shell channel object for this kernel.""" + if self._shell_channel is None: + self.log.debug("creating shell channel queue") + assert self.channel_socket is not None + self._shell_channel = ChannelQueue("shell", self.channel_socket, self.log) + assert self._channel_queues is not None + self._channel_queues["shell"] = self._shell_channel + return self._shell_channel + + @property + def iopub_channel(self): + """Get the iopub channel object for this kernel.""" + if self._iopub_channel is None: + self.log.debug("creating iopub channel queue") + assert self.channel_socket is not None + self._iopub_channel = ChannelQueue("iopub", self.channel_socket, self.log) + assert self._channel_queues is not None + self._channel_queues["iopub"] = self._iopub_channel + return self._iopub_channel + + @property + def stdin_channel(self): + """Get the stdin channel object for this kernel.""" + if self._stdin_channel is None: + self.log.debug("creating stdin channel queue") + assert self.channel_socket is not None + self._stdin_channel = ChannelQueue("stdin", self.channel_socket, self.log) + assert self._channel_queues is not None + self._channel_queues["stdin"] = self._stdin_channel + return self._stdin_channel + + @property + def hb_channel(self): + """Get the hb channel object for this kernel.""" + if self._hb_channel is None: + self.log.debug("creating hb channel queue") + assert self.channel_socket is not None + self._hb_channel = HBChannelQueue("hb", self.channel_socket, self.log) + assert self._channel_queues is not None + self._channel_queues["hb"] = self._hb_channel + return self._hb_channel + + @property + def control_channel(self): + """Get the control channel object for this kernel.""" + if self._control_channel is None: + self.log.debug("creating control channel queue") + assert self.channel_socket is not None + self._control_channel = ChannelQueue("control", self.channel_socket, self.log) + assert self._channel_queues is not None + self._channel_queues["control"] = self._control_channel + return self._control_channel + + def _route_responses(self): + """ + Reads responses from the websocket and routes each to the appropriate channel queue based + on the message's channel. It does this for the duration of the class's lifetime until the + channels are stopped, at which time the socket is closed (unblocking the router) and + the thread terminates. If shutdown happens to occur while processing a response (unlikely), + termination takes place via the loop control boolean. + """ + try: + while not self._channels_stopped: + assert self.channel_socket is not None + raw_message = self.channel_socket.recv() + if not raw_message: + break + response_message = json_decode(utf8(raw_message)) + channel = response_message["channel"] + assert self._channel_queues is not None + self._channel_queues[channel].put_nowait(response_message) + + except websocket.WebSocketConnectionClosedException: + pass # websocket closure most likely due to shut down + + except BaseException as be: + if not self._channels_stopped: + self.log.warning(f"Unexpected exception encountered ({be})") + + # Notify channel queues that this thread had finished and no more messages are being received + assert self._channel_queues is not None + for channel_queue in self._channel_queues.values(): + channel_queue.response_router_finished = True - self.log.debug("Response router thread exiting...") + self.log.debug("Response router thread exiting...") KernelClientABC.register(GatewayKernelClient) diff --git a/jupyter_server/serverapp.py b/jupyter_server/serverapp.py index f4c2e3c17f..9e4a57375d 100644 --- a/jupyter_server/serverapp.py +++ b/jupyter_server/serverapp.py @@ -43,51 +43,51 @@ from tornado.netutil import bind_sockets if not sys.platform.startswith("win"): - from tornado.netutil import bind_unix_socket + from tornado.netutil import bind_unix_socket from traitlets import ( - Any, - Bool, - Bytes, - Dict, - Float, - Instance, - Integer, - List, - TraitError, - Type, - Unicode, - Union, - default, - observe, - validate, + Any, + Bool, + Bytes, + Dict, + Float, + Instance, + Integer, + List, + TraitError, + Type, + Unicode, + Union, + default, + observe, + validate, ) from traitlets.config import Config from traitlets.config.application import boolean_flag, catch_config_error from jupyter_server import ( - DEFAULT_EVENTS_SCHEMA_PATH, - DEFAULT_JUPYTER_SERVER_PORT, - DEFAULT_STATIC_FILES_PATH, - DEFAULT_TEMPLATE_PATH_LIST, - JUPYTER_SERVER_EVENTS_URI, - __version__, + DEFAULT_EVENTS_SCHEMA_PATH, + DEFAULT_JUPYTER_SERVER_PORT, + DEFAULT_STATIC_FILES_PATH, + DEFAULT_TEMPLATE_PATH_LIST, + JUPYTER_SERVER_EVENTS_URI, + __version__, ) from jupyter_server._sysinfo import get_sys_info from jupyter_server._tz import utcnow from jupyter_server.auth.authorizer import AllowAllAuthorizer, Authorizer from jupyter_server.auth.identity import ( - IdentityProvider, - LegacyIdentityProvider, - PasswordIdentityProvider, + IdentityProvider, + LegacyIdentityProvider, + PasswordIdentityProvider, ) from jupyter_server.auth.login import LoginHandler from jupyter_server.auth.logout import LogoutHandler from jupyter_server.base.handlers import ( - FileFindHandler, - MainHandler, - RedirectWithParams, - Template404, + FileFindHandler, + MainHandler, + RedirectWithParams, + Template404, ) from jupyter_server.extension.config import ExtensionConfigManager from jupyter_server.extension.manager import ExtensionManager @@ -95,39 +95,39 @@ from jupyter_server.gateway.connections import GatewayWebSocketConnection from jupyter_server.gateway.gateway_client import GatewayClient from jupyter_server.gateway.managers import ( - GatewayKernelSpecManager, - GatewayMappingKernelManager, - GatewaySessionManager, + GatewayKernelSpecManager, + GatewayMappingKernelManager, + GatewaySessionManager, ) from jupyter_server.log import log_request from jupyter_server.services.config import ConfigManager from jupyter_server.services.contents.filemanager import ( - AsyncFileContentsManager, - FileContentsManager, + AsyncFileContentsManager, + FileContentsManager, ) from jupyter_server.services.contents.largefilemanager import AsyncLargeFileManager from jupyter_server.services.contents.manager import AsyncContentsManager, ContentsManager from jupyter_server.services.kernels.connection.base import BaseKernelWebsocketConnection from jupyter_server.services.kernels.connection.channels import ZMQChannelsWebsocketConnection from jupyter_server.services.kernels.kernelmanager import ( - AsyncMappingKernelManager, - MappingKernelManager, + AsyncMappingKernelManager, + MappingKernelManager, ) from jupyter_server.services.sessions.sessionmanager import SessionManager from jupyter_server.utils import ( - check_pid, - fetch, - unix_socket_in_use, - url_escape, - url_path_join, - urlencode_unix_socket_path, + check_pid, + fetch, + unix_socket_in_use, + url_escape, + url_path_join, + urlencode_unix_socket_path, ) try: - import resource + import resource except ImportError: - # Windows - resource = None # type:ignore[assignment] + # Windows + resource = None # type:ignore[assignment] from jinja2 import Environment, FileSystemLoader from jupyter_core.paths import secure_write @@ -140,17 +140,17 @@ MIN_TORNADO = (6, 1, 0) try: - import tornado + import tornado - assert tornado.version_info >= MIN_TORNADO + assert tornado.version_info >= MIN_TORNADO except (ImportError, AttributeError, AssertionError) as e: # pragma: no cover - raise ImportError(_i18n("The Jupyter Server requires tornado >=%s.%s.%s") % MIN_TORNADO) from e + raise ImportError(_i18n("The Jupyter Server requires tornado >=%s.%s.%s") % MIN_TORNADO) from e try: - import resource + import resource except ImportError: - # Windows - resource = None # type:ignore[assignment] + # Windows + resource = None # type:ignore[assignment] # ----------------------------------------------------------------------------- # Module globals @@ -163,27 +163,27 @@ """ JUPYTER_SERVICE_HANDLERS = { - "auth": None, - "api": ["jupyter_server.services.api.handlers"], - "config": ["jupyter_server.services.config.handlers"], - "contents": ["jupyter_server.services.contents.handlers"], - "files": ["jupyter_server.files.handlers"], - "kernels": [ - "jupyter_server.services.kernels.handlers", - ], - "kernelspecs": [ - "jupyter_server.kernelspecs.handlers", - "jupyter_server.services.kernelspecs.handlers", - ], - "nbconvert": [ - "jupyter_server.nbconvert.handlers", - "jupyter_server.services.nbconvert.handlers", - ], - "security": ["jupyter_server.services.security.handlers"], - "sessions": ["jupyter_server.services.sessions.handlers"], - "shutdown": ["jupyter_server.services.shutdown"], - "view": ["jupyter_server.view.handlers"], - "events": ["jupyter_server.services.events.handlers"], + "auth": None, + "api": ["jupyter_server.services.api.handlers"], + "config": ["jupyter_server.services.config.handlers"], + "contents": ["jupyter_server.services.contents.handlers"], + "files": ["jupyter_server.files.handlers"], + "kernels": [ + "jupyter_server.services.kernels.handlers", + ], + "kernelspecs": [ + "jupyter_server.kernelspecs.handlers", + "jupyter_server.services.kernelspecs.handlers", + ], + "nbconvert": [ + "jupyter_server.nbconvert.handlers", + "jupyter_server.services.nbconvert.handlers", + ], + "security": ["jupyter_server.services.security.handlers"], + "sessions": ["jupyter_server.services.sessions.handlers"], + "shutdown": ["jupyter_server.services.shutdown"], + "view": ["jupyter_server.view.handlers"], + "events": ["jupyter_server.services.events.handlers"], } # Added for backwards compatibility from classic notebook server. @@ -195,21 +195,21 @@ def random_ports(port: int, n: int) -> t.Generator[int, None, None]: - """Generate a list of n random ports near the given port. + """Generate a list of n random ports near the given port. - The first 5 ports will be sequential, and the remaining n-5 will be - randomly selected in the range [port-2*n, port+2*n]. - """ - for i in range(min(5, n)): - yield port + i - for _ in range(n - 5): - yield max(1, port + random.randint(-2 * n, 2 * n)) + The first 5 ports will be sequential, and the remaining n-5 will be + randomly selected in the range [port-2*n, port+2*n]. + """ + for i in range(min(5, n)): + yield port + i + for _ in range(n - 5): + yield max(1, port + random.randint(-2 * n, 2 * n)) def load_handlers(name: str) -> t.Any: - """Load the (URL pattern, handler) tuples for each component.""" - mod = __import__(name, fromlist=["default_handlers"]) - return mod.default_handlers + """Load the (URL pattern, handler) tuples for each component.""" + mod = __import__(name, fromlist=["default_handlers"]) + return mod.default_handlers # ----------------------------------------------------------------------------- @@ -218,483 +218,483 @@ def load_handlers(name: str) -> t.Any: class ServerWebApplication(web.Application): - """A server web application.""" - - def __init__( - self, - jupyter_app, - default_services, - kernel_manager, - contents_manager, - session_manager, - kernel_spec_manager, - config_manager, - event_logger, - extra_services, - log, - base_url, - default_url, - settings_overrides, - jinja_env_options, - *, - authorizer=None, - identity_provider=None, - kernel_websocket_connection_class=None, - ): - """Initialize a server web application.""" - if identity_provider is None: - warnings.warn( - "identity_provider unspecified. Using default IdentityProvider." - " Specify an identity_provider to avoid this message.", - RuntimeWarning, - stacklevel=2, - ) - identity_provider = IdentityProvider(parent=jupyter_app) - - if authorizer is None: - warnings.warn( - "authorizer unspecified. Using permissive AllowAllAuthorizer." - " Specify an authorizer to avoid this message.", - RuntimeWarning, - stacklevel=2, - ) - authorizer = AllowAllAuthorizer(parent=jupyter_app, identity_provider=identity_provider) - - settings = self.init_settings( - jupyter_app, - kernel_manager, - contents_manager, - session_manager, - kernel_spec_manager, - config_manager, - event_logger, - extra_services, - log, - base_url, - default_url, - settings_overrides, - jinja_env_options, - authorizer=authorizer, - identity_provider=identity_provider, - kernel_websocket_connection_class=kernel_websocket_connection_class, - ) - handlers = self.init_handlers(default_services, settings) - - super().__init__(handlers, **settings) - - def init_settings( - self, - jupyter_app, - kernel_manager, - contents_manager, - session_manager, - kernel_spec_manager, - config_manager, - event_logger, - extra_services, - log, - base_url, - default_url, - settings_overrides, - jinja_env_options=None, - *, - authorizer=None, - identity_provider=None, - kernel_websocket_connection_class=None, - ): - """Initialize settings for the web application.""" - _template_path = settings_overrides.get( - "template_path", - jupyter_app.template_file_path, - ) - if isinstance(_template_path, str): - _template_path = (_template_path,) - template_path = [os.path.expanduser(path) for path in _template_path] - - jenv_opt: dict[str, t.Any] = {"autoescape": True} - jenv_opt.update(jinja_env_options if jinja_env_options else {}) - - env = Environment( - loader=FileSystemLoader(template_path), extensions=["jinja2.ext.i18n"], **jenv_opt - ) - sys_info = get_sys_info() - - base_dir = os.path.realpath(os.path.join(__file__, "..", "..")) - nbui = gettext.translation( - "nbui", - localedir=os.path.join(base_dir, "jupyter_server/i18n"), - fallback=True, - ) - env.install_gettext_translations(nbui, newstyle=False) - - if sys_info["commit_source"] == "repository": - # don't cache (rely on 304) when working from master - version_hash = "" - else: - # reset the cache on server restart - utc = datetime.timezone.utc - version_hash = datetime.datetime.now(tz=utc).strftime("%Y%m%d%H%M%S") - - now = utcnow() - - root_dir = contents_manager.root_dir - home = os.path.expanduser("~") - if root_dir.startswith(home + os.path.sep): - # collapse $HOME to ~ - root_dir = "~" + root_dir[len(home) :] - - settings = { - # basics - "log_function": log_request, - "base_url": base_url, - "default_url": default_url, - "template_path": template_path, - "static_path": jupyter_app.static_file_path, - "static_custom_path": jupyter_app.static_custom_path, - "static_handler_class": FileFindHandler, - "static_url_prefix": url_path_join(base_url, "/static/"), - "static_handler_args": { - # don't cache custom.js - "no_cache_paths": [url_path_join(base_url, "static", "custom")], - }, - "version_hash": version_hash, - # kernel message protocol over websocket - "kernel_ws_protocol": jupyter_app.kernel_ws_protocol, - # rate limits - "limit_rate": jupyter_app.limit_rate, - "iopub_msg_rate_limit": jupyter_app.iopub_msg_rate_limit, - "iopub_data_rate_limit": jupyter_app.iopub_data_rate_limit, - "rate_limit_window": jupyter_app.rate_limit_window, - # authentication - "cookie_secret": jupyter_app.cookie_secret, - "login_url": url_path_join(base_url, "/login"), - "xsrf_cookies": True, - "disable_check_xsrf": jupyter_app.disable_check_xsrf, - "allow_remote_access": jupyter_app.allow_remote_access, - "local_hostnames": jupyter_app.local_hostnames, - "authenticate_prometheus": jupyter_app.authenticate_prometheus, - # managers - "kernel_manager": kernel_manager, - "contents_manager": contents_manager, - "session_manager": session_manager, - "kernel_spec_manager": kernel_spec_manager, - "config_manager": config_manager, - "authorizer": authorizer, - "identity_provider": identity_provider, - "event_logger": event_logger, - "kernel_websocket_connection_class": kernel_websocket_connection_class, - # handlers - "extra_services": extra_services, - # Jupyter stuff - "started": now, - # place for extensions to register activity - # so that they can prevent idle-shutdown - "last_activity_times": {}, - "jinja_template_vars": jupyter_app.jinja_template_vars, - "websocket_url": jupyter_app.websocket_url, - "shutdown_button": jupyter_app.quit_button, - "config": jupyter_app.config, - "config_dir": jupyter_app.config_dir, - "allow_password_change": jupyter_app.allow_password_change, - "server_root_dir": root_dir, - "jinja2_env": env, - "serverapp": jupyter_app, - } + """A server web application.""" + + def __init__( + self, + jupyter_app, + default_services, + kernel_manager, + contents_manager, + session_manager, + kernel_spec_manager, + config_manager, + event_logger, + extra_services, + log, + base_url, + default_url, + settings_overrides, + jinja_env_options, + *, + authorizer=None, + identity_provider=None, + kernel_websocket_connection_class=None, + ): + """Initialize a server web application.""" + if identity_provider is None: + warnings.warn( + "identity_provider unspecified. Using default IdentityProvider." + " Specify an identity_provider to avoid this message.", + RuntimeWarning, + stacklevel=2, + ) + identity_provider = IdentityProvider(parent=jupyter_app) + + if authorizer is None: + warnings.warn( + "authorizer unspecified. Using permissive AllowAllAuthorizer." + " Specify an authorizer to avoid this message.", + RuntimeWarning, + stacklevel=2, + ) + authorizer = AllowAllAuthorizer(parent=jupyter_app, identity_provider=identity_provider) + + settings = self.init_settings( + jupyter_app, + kernel_manager, + contents_manager, + session_manager, + kernel_spec_manager, + config_manager, + event_logger, + extra_services, + log, + base_url, + default_url, + settings_overrides, + jinja_env_options, + authorizer=authorizer, + identity_provider=identity_provider, + kernel_websocket_connection_class=kernel_websocket_connection_class, + ) + handlers = self.init_handlers(default_services, settings) + + super().__init__(handlers, **settings) + + def init_settings( + self, + jupyter_app, + kernel_manager, + contents_manager, + session_manager, + kernel_spec_manager, + config_manager, + event_logger, + extra_services, + log, + base_url, + default_url, + settings_overrides, + jinja_env_options=None, + *, + authorizer=None, + identity_provider=None, + kernel_websocket_connection_class=None, + ): + """Initialize settings for the web application.""" + _template_path = settings_overrides.get( + "template_path", + jupyter_app.template_file_path, + ) + if isinstance(_template_path, str): + _template_path = (_template_path,) + template_path = [os.path.expanduser(path) for path in _template_path] - # allow custom overrides for the tornado web app. - settings.update(settings_overrides) + jenv_opt: dict[str, t.Any] = {"autoescape": True} + jenv_opt.update(jinja_env_options if jinja_env_options else {}) - if base_url and "xsrf_cookie_kwargs" not in settings: - # default: set xsrf cookie on base_url - settings["xsrf_cookie_kwargs"] = {"path": base_url} - return settings + env = Environment( + loader=FileSystemLoader(template_path), extensions=["jinja2.ext.i18n"], **jenv_opt + ) + sys_info = get_sys_info() - def init_handlers(self, default_services, settings): - """Load the (URL pattern, handler) tuples for each component.""" - # Order matters. The first handler to match the URL will handle the request. - handlers = [] - # load extra services specified by users before default handlers - for service in settings["extra_services"]: - handlers.extend(load_handlers(service)) - - # Load default services. Raise exception if service not - # found in JUPYTER_SERVICE_HANLDERS. - for service in default_services: - if service in JUPYTER_SERVICE_HANDLERS: - locations = JUPYTER_SERVICE_HANDLERS[service] - if locations is not None: - for loc in locations: - handlers.extend(load_handlers(loc)) - else: - msg = ( - f"{service} is not recognized as a jupyter_server " - "service. If this is a custom service, " - "try adding it to the " - "`extra_services` list." + base_dir = os.path.realpath(os.path.join(__file__, "..", "..")) + nbui = gettext.translation( + "nbui", + localedir=os.path.join(base_dir, "jupyter_server/i18n"), + fallback=True, ) - raise Exception(msg) - - # Add extra handlers from contents manager. - handlers.extend(settings["contents_manager"].get_extra_handlers()) - # And from identity provider - handlers.extend(settings["identity_provider"].get_handlers()) - - # register base handlers last - handlers.extend(load_handlers("jupyter_server.base.handlers")) - - if settings["default_url"] != settings["base_url"]: - # set the URL that will be redirected from `/` - handlers.append( - ( - r"/?", - RedirectWithParams, - { - "url": settings["default_url"], - "permanent": False, # want 302, not 301 - }, + env.install_gettext_translations(nbui, newstyle=False) + + if sys_info["commit_source"] == "repository": + # don't cache (rely on 304) when working from master + version_hash = "" + else: + # reset the cache on server restart + utc = datetime.timezone.utc + version_hash = datetime.datetime.now(tz=utc).strftime("%Y%m%d%H%M%S") + + now = utcnow() + + root_dir = contents_manager.root_dir + home = os.path.expanduser("~") + if root_dir.startswith(home + os.path.sep): + # collapse $HOME to ~ + root_dir = "~" + root_dir[len(home) :] + + settings = { + # basics + "log_function": log_request, + "base_url": base_url, + "default_url": default_url, + "template_path": template_path, + "static_path": jupyter_app.static_file_path, + "static_custom_path": jupyter_app.static_custom_path, + "static_handler_class": FileFindHandler, + "static_url_prefix": url_path_join(base_url, "/static/"), + "static_handler_args": { + # don't cache custom.js + "no_cache_paths": [url_path_join(base_url, "static", "custom")], + }, + "version_hash": version_hash, + # kernel message protocol over websocket + "kernel_ws_protocol": jupyter_app.kernel_ws_protocol, + # rate limits + "limit_rate": jupyter_app.limit_rate, + "iopub_msg_rate_limit": jupyter_app.iopub_msg_rate_limit, + "iopub_data_rate_limit": jupyter_app.iopub_data_rate_limit, + "rate_limit_window": jupyter_app.rate_limit_window, + # authentication + "cookie_secret": jupyter_app.cookie_secret, + "login_url": url_path_join(base_url, "/login"), + "xsrf_cookies": True, + "disable_check_xsrf": jupyter_app.disable_check_xsrf, + "allow_remote_access": jupyter_app.allow_remote_access, + "local_hostnames": jupyter_app.local_hostnames, + "authenticate_prometheus": jupyter_app.authenticate_prometheus, + # managers + "kernel_manager": kernel_manager, + "contents_manager": contents_manager, + "session_manager": session_manager, + "kernel_spec_manager": kernel_spec_manager, + "config_manager": config_manager, + "authorizer": authorizer, + "identity_provider": identity_provider, + "event_logger": event_logger, + "kernel_websocket_connection_class": kernel_websocket_connection_class, + # handlers + "extra_services": extra_services, + # Jupyter stuff + "started": now, + # place for extensions to register activity + # so that they can prevent idle-shutdown + "last_activity_times": {}, + "jinja_template_vars": jupyter_app.jinja_template_vars, + "websocket_url": jupyter_app.websocket_url, + "shutdown_button": jupyter_app.quit_button, + "config": jupyter_app.config, + "config_dir": jupyter_app.config_dir, + "allow_password_change": jupyter_app.allow_password_change, + "server_root_dir": root_dir, + "jinja2_env": env, + "serverapp": jupyter_app, + } + + # allow custom overrides for the tornado web app. + settings.update(settings_overrides) + + if base_url and "xsrf_cookie_kwargs" not in settings: + # default: set xsrf cookie on base_url + settings["xsrf_cookie_kwargs"] = {"path": base_url} + return settings + + def init_handlers(self, default_services, settings): + """Load the (URL pattern, handler) tuples for each component.""" + # Order matters. The first handler to match the URL will handle the request. + handlers = [] + # load extra services specified by users before default handlers + for service in settings["extra_services"]: + handlers.extend(load_handlers(service)) + + # Load default services. Raise exception if service not + # found in JUPYTER_SERVICE_HANLDERS. + for service in default_services: + if service in JUPYTER_SERVICE_HANDLERS: + locations = JUPYTER_SERVICE_HANDLERS[service] + if locations is not None: + for loc in locations: + handlers.extend(load_handlers(loc)) + else: + msg = ( + f"{service} is not recognized as a jupyter_server " + "service. If this is a custom service, " + "try adding it to the " + "`extra_services` list." + ) + raise Exception(msg) + + # Add extra handlers from contents manager. + handlers.extend(settings["contents_manager"].get_extra_handlers()) + # And from identity provider + handlers.extend(settings["identity_provider"].get_handlers()) + + # register base handlers last + handlers.extend(load_handlers("jupyter_server.base.handlers")) + + if settings["default_url"] != settings["base_url"]: + # set the URL that will be redirected from `/` + handlers.append( + ( + r"/?", + RedirectWithParams, + { + "url": settings["default_url"], + "permanent": False, # want 302, not 301 + }, + ) + ) + else: + handlers.append((r"/", MainHandler)) + + # prepend base_url onto the patterns that we match + new_handlers = [] + for handler in handlers: + pattern = url_path_join(settings["base_url"], handler[0]) + new_handler = (pattern, *list(handler[1:])) + new_handlers.append(new_handler) + # add 404 on the end, which will catch everything that falls through + new_handlers.append((r"(.*)", Template404)) + return new_handlers + + def last_activity(self): + """Get a UTC timestamp for when the server last did something. + + Includes: API activity, kernel activity, kernel shutdown, and terminal + activity. + """ + sources = [ + self.settings["started"], + self.settings["kernel_manager"].last_kernel_activity, + ] + # Any setting that ends with a key that ends with `_last_activity` is + # counted here. This provides a hook for extensions to add a last activity + # setting to the server. + sources.extend( + [val for key, val in self.settings.items() if key.endswith("_last_activity")] ) - ) - else: - handlers.append((r"/", MainHandler)) - - # prepend base_url onto the patterns that we match - new_handlers = [] - for handler in handlers: - pattern = url_path_join(settings["base_url"], handler[0]) - new_handler = (pattern, *list(handler[1:])) - new_handlers.append(new_handler) - # add 404 on the end, which will catch everything that falls through - new_handlers.append((r"(.*)", Template404)) - return new_handlers - - def last_activity(self): - """Get a UTC timestamp for when the server last did something. - - Includes: API activity, kernel activity, kernel shutdown, and terminal - activity. - """ - sources = [ - self.settings["started"], - self.settings["kernel_manager"].last_kernel_activity, - ] - # Any setting that ends with a key that ends with `_last_activity` is - # counted here. This provides a hook for extensions to add a last activity - # setting to the server. - sources.extend( - [val for key, val in self.settings.items() if key.endswith("_last_activity")] - ) - sources.extend(self.settings["last_activity_times"].values()) - return max(sources) + sources.extend(self.settings["last_activity_times"].values()) + return max(sources) class JupyterPasswordApp(JupyterApp): - """Set a password for the Jupyter server. + """Set a password for the Jupyter server. - Setting a password secures the Jupyter server - and removes the need for token-based authentication. - """ + Setting a password secures the Jupyter server + and removes the need for token-based authentication. + """ - description: str = __doc__ + description: str = __doc__ - def _config_file_default(self): - """the default config file.""" - return os.path.join(self.config_dir, "jupyter_server_config.json") + def _config_file_default(self): + """the default config file.""" + return os.path.join(self.config_dir, "jupyter_server_config.json") - def start(self): - """Start the password app.""" - from jupyter_server.auth.security import set_password + def start(self): + """Start the password app.""" + from jupyter_server.auth.security import set_password - set_password(config_file=self.config_file) - self.log.info("Wrote hashed password to %s" % self.config_file) + set_password(config_file=self.config_file) + self.log.info("Wrote hashed password to %s" % self.config_file) def shutdown_server(server_info, timeout=5, log=None): - """Shutdown a Jupyter server in a separate process. + """Shutdown a Jupyter server in a separate process. - *server_info* should be a dictionary as produced by list_running_servers(). + *server_info* should be a dictionary as produced by list_running_servers(). - Will first try to request shutdown using /api/shutdown . - On Unix, if the server is still running after *timeout* seconds, it will - send SIGTERM. After another timeout, it escalates to SIGKILL. + Will first try to request shutdown using /api/shutdown . + On Unix, if the server is still running after *timeout* seconds, it will + send SIGTERM. After another timeout, it escalates to SIGKILL. + + Returns True if the server was stopped by any means, False if stopping it + failed (on Windows). + """ - Returns True if the server was stopped by any means, False if stopping it - failed (on Windows). - """ + url = server_info["url"] + pid = server_info["pid"] + try: + shutdown_url = urljoin(url, "api/shutdown") + if log: + log.debug("POST request to %s", shutdown_url) + fetch( + shutdown_url, + method="POST", + body=b"", + headers={"Authorization": "token " + server_info["token"]}, + ) + except Exception as ex: + if not str(ex) == "Unknown URL scheme.": + raise ex + if log: + log.debug("Was not a HTTP scheme. Treating as socket instead.") + log.debug("POST request to %s", url) + fetch( + url, + method="POST", + body=b"", + headers={"Authorization": "token " + server_info["token"]}, + ) + + # Poll to see if it shut down. + for _ in range(timeout * 10): + if not check_pid(pid): + if log: + log.debug("Server PID %s is gone", pid) + return True + time.sleep(0.1) + + if sys.platform.startswith("win"): + return False - url = server_info["url"] - pid = server_info["pid"] - try: - shutdown_url = urljoin(url, "api/shutdown") if log: - log.debug("POST request to %s", shutdown_url) - fetch( - shutdown_url, - method="POST", - body=b"", - headers={"Authorization": "token " + server_info["token"]}, - ) - except Exception as ex: - if not str(ex) == "Unknown URL scheme.": - raise ex + log.debug("SIGTERM to PID %s", pid) + os.kill(pid, signal.SIGTERM) + + # Poll to see if it shut down. + for _ in range(timeout * 10): + if not check_pid(pid): + if log: + log.debug("Server PID %s is gone", pid) + return True + time.sleep(0.1) + if log: - log.debug("Was not a HTTP scheme. Treating as socket instead.") - log.debug("POST request to %s", url) - fetch( - url, - method="POST", - body=b"", - headers={"Authorization": "token " + server_info["token"]}, - ) - - # Poll to see if it shut down. - for _ in range(timeout * 10): - if not check_pid(pid): - if log: - log.debug("Server PID %s is gone", pid) - return True - time.sleep(0.1) - - if sys.platform.startswith("win"): - return False - - if log: - log.debug("SIGTERM to PID %s", pid) - os.kill(pid, signal.SIGTERM) - - # Poll to see if it shut down. - for _ in range(timeout * 10): - if not check_pid(pid): - if log: - log.debug("Server PID %s is gone", pid) - return True - time.sleep(0.1) - - if log: - log.debug("SIGKILL to PID %s", pid) - os.kill(pid, signal.SIGKILL) - return True # SIGKILL cannot be caught + log.debug("SIGKILL to PID %s", pid) + os.kill(pid, signal.SIGKILL) + return True # SIGKILL cannot be caught class JupyterServerStopApp(JupyterApp): - """An application to stop a Jupyter server.""" + """An application to stop a Jupyter server.""" - version: str = __version__ - description: str = "Stop currently running Jupyter server for a given port" + version: str = __version__ + description: str = "Stop currently running Jupyter server for a given port" - port = Integer( - DEFAULT_JUPYTER_SERVER_PORT, - config=True, - help="Port of the server to be killed. Default %s" % DEFAULT_JUPYTER_SERVER_PORT, - ) - - sock = Unicode("", config=True, help="UNIX socket of the server to be killed.") - - def parse_command_line(self, argv=None): - """Parse command line options.""" - super().parse_command_line(argv) - if self.extra_args: - try: - self.port = int(self.extra_args[0]) - except ValueError: - # self.extra_args[0] was not an int, so it must be a string (unix socket). - self.sock = self.extra_args[0] - - def shutdown_server(self, server): - """Shut down a server.""" - return shutdown_server(server, log=self.log) - - def _shutdown_or_exit(self, target_endpoint, server): - """Handle a shutdown.""" - self.log.info("Shutting down server on %s..." % target_endpoint) - if not self.shutdown_server(server): - sys.exit("Could not stop server on %s" % target_endpoint) - - @staticmethod - def _maybe_remove_unix_socket(socket_path): - """Try to remove a socket path.""" - try: - os.unlink(socket_path) - except OSError: - pass - - def start(self): - """Start the server stop app.""" - info = self.log.info - servers = list(list_running_servers(self.runtime_dir, log=self.log)) - if not servers: - self.exit("There are no running servers (per %s)" % self.runtime_dir) - for server in servers: - if self.sock: - sock = server.get("sock", None) - if sock and sock == self.sock: - self._shutdown_or_exit(sock, server) - # Attempt to remove the UNIX socket after stopping. - self._maybe_remove_unix_socket(sock) - return - elif self.port: - port = server.get("port", None) - if port == self.port: - self._shutdown_or_exit(port, server) - return - current_endpoint = self.sock or self.port - info(f"There is currently no server running on {current_endpoint}") - info("Ports/sockets currently in use:") - for server in servers: - info(" - {}".format(server.get("sock") or server["port"])) - self.exit(1) + port = Integer( + DEFAULT_JUPYTER_SERVER_PORT, + config=True, + help="Port of the server to be killed. Default %s" % DEFAULT_JUPYTER_SERVER_PORT, + ) + + sock = Unicode("", config=True, help="UNIX socket of the server to be killed.") + + def parse_command_line(self, argv=None): + """Parse command line options.""" + super().parse_command_line(argv) + if self.extra_args: + try: + self.port = int(self.extra_args[0]) + except ValueError: + # self.extra_args[0] was not an int, so it must be a string (unix socket). + self.sock = self.extra_args[0] + + def shutdown_server(self, server): + """Shut down a server.""" + return shutdown_server(server, log=self.log) + + def _shutdown_or_exit(self, target_endpoint, server): + """Handle a shutdown.""" + self.log.info("Shutting down server on %s..." % target_endpoint) + if not self.shutdown_server(server): + sys.exit("Could not stop server on %s" % target_endpoint) + + @staticmethod + def _maybe_remove_unix_socket(socket_path): + """Try to remove a socket path.""" + try: + os.unlink(socket_path) + except OSError: + pass + + def start(self): + """Start the server stop app.""" + info = self.log.info + servers = list(list_running_servers(self.runtime_dir, log=self.log)) + if not servers: + self.exit("There are no running servers (per %s)" % self.runtime_dir) + for server in servers: + if self.sock: + sock = server.get("sock", None) + if sock and sock == self.sock: + self._shutdown_or_exit(sock, server) + # Attempt to remove the UNIX socket after stopping. + self._maybe_remove_unix_socket(sock) + return + elif self.port: + port = server.get("port", None) + if port == self.port: + self._shutdown_or_exit(port, server) + return + current_endpoint = self.sock or self.port + info(f"There is currently no server running on {current_endpoint}") + info("Ports/sockets currently in use:") + for server in servers: + info(" - {}".format(server.get("sock") or server["port"])) + self.exit(1) class JupyterServerListApp(JupyterApp): - """An application to list running Jupyter servers.""" - - version: str = __version__ - description: str = _i18n("List currently running Jupyter servers.") - - flags = { - "jsonlist": ( - {"JupyterServerListApp": {"jsonlist": True}}, - _i18n("Produce machine-readable JSON list output."), - ), - "json": ( - {"JupyterServerListApp": {"json": True}}, - _i18n("Produce machine-readable JSON object on each line of output."), - ), - } - - jsonlist = Bool( - False, - config=True, - help=_i18n( - "If True, the output will be a JSON list of objects, one per " - "active Jupyer server, each with the details from the " - "relevant server info file." - ), - ) - json = Bool( - False, - config=True, - help=_i18n( - "If True, each line of output will be a JSON object with the " - "details from the server info file. For a JSON list output, " - "see the JupyterServerListApp.jsonlist configuration value" - ), - ) - - def start(self): - """Start the server list application.""" - serverinfo_list = list(list_running_servers(self.runtime_dir, log=self.log)) - if self.jsonlist: - print(json.dumps(serverinfo_list, indent=2)) - elif self.json: - for serverinfo in serverinfo_list: - print(json.dumps(serverinfo)) - else: - print("Currently running servers:") - for serverinfo in serverinfo_list: - url = serverinfo["url"] - if serverinfo.get("token"): - url = url + "?token=%s" % serverinfo["token"] - print(url, "::", serverinfo["root_dir"]) + """An application to list running Jupyter servers.""" + + version: str = __version__ + description: str = _i18n("List currently running Jupyter servers.") + + flags = { + "jsonlist": ( + {"JupyterServerListApp": {"jsonlist": True}}, + _i18n("Produce machine-readable JSON list output."), + ), + "json": ( + {"JupyterServerListApp": {"json": True}}, + _i18n("Produce machine-readable JSON object on each line of output."), + ), + } + + jsonlist = Bool( + False, + config=True, + help=_i18n( + "If True, the output will be a JSON list of objects, one per " + "active Jupyer server, each with the details from the " + "relevant server info file." + ), + ) + json = Bool( + False, + config=True, + help=_i18n( + "If True, each line of output will be a JSON object with the " + "details from the server info file. For a JSON list output, " + "see the JupyterServerListApp.jsonlist configuration value" + ), + ) + + def start(self): + """Start the server list application.""" + serverinfo_list = list(list_running_servers(self.runtime_dir, log=self.log)) + if self.jsonlist: + print(json.dumps(serverinfo_list, indent=2)) + elif self.json: + for serverinfo in serverinfo_list: + print(json.dumps(serverinfo)) + else: + print("Currently running servers:") + for serverinfo in serverinfo_list: + url = serverinfo["url"] + if serverinfo.get("token"): + url = url + "?token=%s" % serverinfo["token"] + print(url, "::", serverinfo["root_dir"]) # ----------------------------------------------------------------------------- @@ -704,56 +704,56 @@ def start(self): flags = dict(base_flags) flags["allow-root"] = ( - {"ServerApp": {"allow_root": True}}, - _i18n("Allow the server to be run from root user."), + {"ServerApp": {"allow_root": True}}, + _i18n("Allow the server to be run from root user."), ) flags["no-browser"] = ( - {"ServerApp": {"open_browser": False}, "ExtensionApp": {"open_browser": False}}, - _i18n("Prevent the opening of the default url in the browser."), + {"ServerApp": {"open_browser": False}, "ExtensionApp": {"open_browser": False}}, + _i18n("Prevent the opening of the default url in the browser."), ) flags["debug"] = ( - {"ServerApp": {"log_level": "DEBUG"}, "ExtensionApp": {"log_level": "DEBUG"}}, - _i18n("Set debug level for the extension and underlying server applications."), + {"ServerApp": {"log_level": "DEBUG"}, "ExtensionApp": {"log_level": "DEBUG"}}, + _i18n("Set debug level for the extension and underlying server applications."), ) flags["autoreload"] = ( - {"ServerApp": {"autoreload": True}}, - """Autoreload the webapp - Enable reloading of the tornado webapp and all imported Python packages - when any changes are made to any Python src files in server or - extensions. - """, + {"ServerApp": {"autoreload": True}}, + """Autoreload the webapp + Enable reloading of the tornado webapp and all imported Python packages + when any changes are made to any Python src files in server or + extensions. + """, ) # Add notebook manager flags flags.update( - boolean_flag( - "script", - "FileContentsManager.save_script", - "DEPRECATED, IGNORED", - "DEPRECATED, IGNORED", - ) + boolean_flag( + "script", + "FileContentsManager.save_script", + "DEPRECATED, IGNORED", + "DEPRECATED, IGNORED", + ) ) aliases = dict(base_aliases) aliases.update( - { - "ip": "ServerApp.ip", - "port": "ServerApp.port", - "port-retries": "ServerApp.port_retries", - "sock": "ServerApp.sock", - "sock-mode": "ServerApp.sock_mode", - "transport": "KernelManager.transport", - "keyfile": "ServerApp.keyfile", - "certfile": "ServerApp.certfile", - "client-ca": "ServerApp.client_ca", - "notebook-dir": "ServerApp.root_dir", - "preferred-dir": "ServerApp.preferred_dir", - "browser": "ServerApp.browser", - "pylab": "ServerApp.pylab", - "gateway-url": "GatewayClient.url", - } + { + "ip": "ServerApp.ip", + "port": "ServerApp.port", + "port-retries": "ServerApp.port_retries", + "sock": "ServerApp.sock", + "sock-mode": "ServerApp.sock_mode", + "transport": "KernelManager.transport", + "keyfile": "ServerApp.keyfile", + "certfile": "ServerApp.certfile", + "client-ca": "ServerApp.client_ca", + "notebook-dir": "ServerApp.root_dir", + "preferred-dir": "ServerApp.preferred_dir", + "browser": "ServerApp.browser", + "pylab": "ServerApp.pylab", + "gateway-url": "GatewayClient.url", + } ) # ----------------------------------------------------------------------------- @@ -762,118 +762,118 @@ def start(self): class ServerApp(JupyterApp): - """The Jupyter Server application class.""" + """The Jupyter Server application class.""" - name = "jupyter-server" - version: str = __version__ - description: str = _i18n( - """The Jupyter Server. + name = "jupyter-server" + version: str = __version__ + description: str = _i18n( + """The Jupyter Server. -This launches a Tornado-based Jupyter Server.""" - ) - examples = _examples + This launches a Tornado-based Jupyter Server.""" + ) + examples = _examples + + flags = Dict(flags) # type:ignore[assignment] + aliases = Dict(aliases) # type:ignore[assignment] + + classes = [ + KernelManager, + Session, + MappingKernelManager, + KernelSpecManager, + AsyncMappingKernelManager, + ContentsManager, + FileContentsManager, + AsyncContentsManager, + AsyncFileContentsManager, + NotebookNotary, + GatewayMappingKernelManager, + GatewayKernelSpecManager, + GatewaySessionManager, + GatewayWebSocketConnection, + GatewayClient, + Authorizer, + EventLogger, + ZMQChannelsWebsocketConnection, + ] - flags = Dict(flags) # type:ignore[assignment] - aliases = Dict(aliases) # type:ignore[assignment] + subcommands: dict[str, t.Any] = { + "list": ( + JupyterServerListApp, + JupyterServerListApp.description.splitlines()[0], + ), + "stop": ( + JupyterServerStopApp, + JupyterServerStopApp.description.splitlines()[0], + ), + "password": ( + JupyterPasswordApp, + JupyterPasswordApp.description.splitlines()[0], + ), + "extension": ( + ServerExtensionApp, + ServerExtensionApp.description.splitlines()[0], + ), + } - classes = [ - KernelManager, - Session, - MappingKernelManager, - KernelSpecManager, - AsyncMappingKernelManager, - ContentsManager, - FileContentsManager, - AsyncContentsManager, - AsyncFileContentsManager, - NotebookNotary, - GatewayMappingKernelManager, - GatewayKernelSpecManager, - GatewaySessionManager, - GatewayWebSocketConnection, - GatewayClient, - Authorizer, - EventLogger, - ZMQChannelsWebsocketConnection, - ] - - subcommands: dict[str, t.Any] = { - "list": ( - JupyterServerListApp, - JupyterServerListApp.description.splitlines()[0], - ), - "stop": ( - JupyterServerStopApp, - JupyterServerStopApp.description.splitlines()[0], - ), - "password": ( - JupyterPasswordApp, - JupyterPasswordApp.description.splitlines()[0], - ), - "extension": ( - ServerExtensionApp, - ServerExtensionApp.description.splitlines()[0], - ), - } - - # A list of services whose handlers will be exposed. - # Subclasses can override this list to - # expose a subset of these handlers. - default_services = ( - "api", - "auth", - "config", - "contents", - "files", - "kernels", - "kernelspecs", - "nbconvert", - "security", - "sessions", - "shutdown", - "view", - "events", - ) - - _log_formatter_cls = LogFormatter # type:ignore[assignment] - _stopping = Bool(False, help="Signal that we've begun stopping.") - - @default("log_level") - def _default_log_level(self) -> int: - return logging.INFO - - @default("log_format") - def _default_log_format(self) -> str: - """override default log format to include date & time""" - return ( - "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s" - ) - - # file to be opened in the Jupyter server - file_to_run = Unicode("", help="Open the named file when the application is launched.").tag( - config=True - ) - - file_url_prefix = Unicode( - "notebooks", help="The URL prefix where files are opened directly." - ).tag(config=True) - - # Network related information - allow_origin = Unicode( - "", - config=True, - help="""Set the Access-Control-Allow-Origin header + # A list of services whose handlers will be exposed. + # Subclasses can override this list to + # expose a subset of these handlers. + default_services = ( + "api", + "auth", + "config", + "contents", + "files", + "kernels", + "kernelspecs", + "nbconvert", + "security", + "sessions", + "shutdown", + "view", + "events", + ) + + _log_formatter_cls = LogFormatter # type:ignore[assignment] + _stopping = Bool(False, help="Signal that we've begun stopping.") + + @default("log_level") + def _default_log_level(self) -> int: + return logging.INFO + + @default("log_format") + def _default_log_format(self) -> str: + """override default log format to include date & time""" + return ( + "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s" + ) + + # file to be opened in the Jupyter server + file_to_run = Unicode("", help="Open the named file when the application is launched.").tag( + config=True + ) + + file_url_prefix = Unicode( + "notebooks", help="The URL prefix where files are opened directly." + ).tag(config=True) + + # Network related information + allow_origin = Unicode( + "", + config=True, + help="""Set the Access-Control-Allow-Origin header Use '*' to allow any origin to access your server. Takes precedence over allow_origin_pat. """, - ) + ) - allow_origin_pat = Unicode( - "", - config=True, - help="""Use a regular expression for the Access-Control-Allow-Origin header + allow_origin_pat = Unicode( + "", + config=True, + help="""Use a regular expression for the Access-Control-Allow-Origin header Requests from an origin matching the expression will get replies with: @@ -883,254 +883,254 @@ def _default_log_format(self) -> str: Ignored if allow_origin is set. """, - ) - - allow_credentials = Bool( - False, - config=True, - help=_i18n("Set the Access-Control-Allow-Credentials: true header"), - ) - - allow_root = Bool( - False, - config=True, - help=_i18n("Whether to allow the user to run the server as root."), - ) - - autoreload = Bool( - False, - config=True, - help=_i18n("Reload the webapp when changes are made to any Python src files."), - ) - - default_url = Unicode("/", config=True, help=_i18n("The default URL to redirect to from `/`")) - - ip = Unicode( - "localhost", - config=True, - help=_i18n("The IP address the Jupyter server will listen on."), - ) - - @default("ip") - def _default_ip(self) -> str: - """Return localhost if available, 127.0.0.1 otherwise. - - On some (horribly broken) systems, localhost cannot be bound. - """ - s = socket.socket() - try: - s.bind(("localhost", 0)) - except OSError as e: - self.log.warning( - _i18n("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s"), e - ) - return "127.0.0.1" - else: - s.close() - return "localhost" - - @validate("ip") - def _validate_ip(self, proposal: t.Any) -> str: - value = t.cast(str, proposal["value"]) - if value == "*": - value = "" - return value - - custom_display_url = Unicode( - "", - config=True, - help=_i18n( - """Override URL shown to users. - - Replace actual URL, including protocol, address, port and base URL, - with the given value when displaying URL to the users. Do not change - the actual connection URL. If authentication token is enabled, the - token is added to the custom URL automatically. - - This option is intended to be used when the URL to display to the user - cannot be determined reliably by the Jupyter server (proxified - or containerized setups for example).""" - ), - ) - - port_env = "JUPYTER_PORT" - port_default_value = DEFAULT_JUPYTER_SERVER_PORT - - port = Integer( - config=True, - help=_i18n("The port the server will listen on (env: JUPYTER_PORT)."), - ) - - @default("port") - def _port_default(self) -> int: - return int(os.getenv(self.port_env, self.port_default_value)) - - port_retries_env = "JUPYTER_PORT_RETRIES" - port_retries_default_value = 50 - port_retries = Integer( - port_retries_default_value, - config=True, - help=_i18n( - "The number of additional ports to try if the specified port is not " - "available (env: JUPYTER_PORT_RETRIES)." - ), - ) - - @default("port_retries") - def _port_retries_default(self) -> int: - return int(os.getenv(self.port_retries_env, self.port_retries_default_value)) - - sock = Unicode("", config=True, help="The UNIX socket the Jupyter server will listen on.") - - sock_mode = Unicode( - "0600", - config=True, - help="The permissions mode for UNIX socket creation (default: 0600).", - ) - - @validate("sock_mode") - def _validate_sock_mode(self, proposal: t.Any) -> t.Any: - value = proposal["value"] - try: - converted_value = int(value.encode(), 8) - assert all( - ( - # Ensure the mode is at least user readable/writable. - bool(converted_value & stat.S_IRUSR), - bool(converted_value & stat.S_IWUSR), - # And isn't out of bounds. - converted_value <= 2**12, - ) - ) - except ValueError as e: - raise TraitError( - 'invalid --sock-mode value: %s, please specify as e.g. "0600"' % value - ) from e - except AssertionError as e: - raise TraitError( - "invalid --sock-mode value: %s, must have u+rw (0600) at a minimum" % value - ) from e - return value - - certfile = Unicode( - "", - config=True, - help=_i18n("""The full path to an SSL/TLS certificate file."""), - ) - - keyfile = Unicode( - "", - config=True, - help=_i18n("""The full path to a private key file for usage with SSL/TLS."""), - ) - - client_ca = Unicode( - "", - config=True, - help=_i18n( - """The full path to a certificate authority certificate for SSL/TLS client authentication.""" - ), - ) - - cookie_secret_file = Unicode( - config=True, help=_i18n("""The file where the cookie secret is stored.""") - ) - - @default("cookie_secret_file") - def _default_cookie_secret_file(self) -> str: - return os.path.join(self.runtime_dir, "jupyter_cookie_secret") - - cookie_secret = Bytes( - b"", - config=True, - help="""The random bytes used to secure cookies. + ) + + allow_credentials = Bool( + False, + config=True, + help=_i18n("Set the Access-Control-Allow-Credentials: true header"), + ) + + allow_root = Bool( + False, + config=True, + help=_i18n("Whether to allow the user to run the server as root."), + ) + + autoreload = Bool( + False, + config=True, + help=_i18n("Reload the webapp when changes are made to any Python src files."), + ) + + default_url = Unicode("/", config=True, help=_i18n("The default URL to redirect to from `/`")) + + ip = Unicode( + "localhost", + config=True, + help=_i18n("The IP address the Jupyter server will listen on."), + ) + + @default("ip") + def _default_ip(self) -> str: + """Return localhost if available, 127.0.0.1 otherwise. + + On some (horribly broken) systems, localhost cannot be bound. + """ + s = socket.socket() + try: + s.bind(("localhost", 0)) + except OSError as e: + self.log.warning( + _i18n("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s"), e + ) + return "127.0.0.1" + else: + s.close() + return "localhost" + + @validate("ip") + def _validate_ip(self, proposal: t.Any) -> str: + value = t.cast(str, proposal["value"]) + if value == "*": + value = "" + return value + + custom_display_url = Unicode( + "", + config=True, + help=_i18n( + """Override URL shown to users. + + Replace actual URL, including protocol, address, port and base URL, + with the given value when displaying URL to the users. Do not change + the actual connection URL. If authentication token is enabled, the + token is added to the custom URL automatically. + + This option is intended to be used when the URL to display to the user + cannot be determined reliably by the Jupyter server (proxified + or containerized setups for example).""" + ), + ) + + port_env = "JUPYTER_PORT" + port_default_value = DEFAULT_JUPYTER_SERVER_PORT + + port = Integer( + config=True, + help=_i18n("The port the server will listen on (env: JUPYTER_PORT)."), + ) + + @default("port") + def _port_default(self) -> int: + return int(os.getenv(self.port_env, self.port_default_value)) + + port_retries_env = "JUPYTER_PORT_RETRIES" + port_retries_default_value = 50 + port_retries = Integer( + port_retries_default_value, + config=True, + help=_i18n( + "The number of additional ports to try if the specified port is not " + "available (env: JUPYTER_PORT_RETRIES)." + ), + ) + + @default("port_retries") + def _port_retries_default(self) -> int: + return int(os.getenv(self.port_retries_env, self.port_retries_default_value)) + + sock = Unicode("", config=True, help="The UNIX socket the Jupyter server will listen on.") + + sock_mode = Unicode( + "0600", + config=True, + help="The permissions mode for UNIX socket creation (default: 0600).", + ) + + @validate("sock_mode") + def _validate_sock_mode(self, proposal: t.Any) -> t.Any: + value = proposal["value"] + try: + converted_value = int(value.encode(), 8) + assert all( + ( + # Ensure the mode is at least user readable/writable. + bool(converted_value & stat.S_IRUSR), + bool(converted_value & stat.S_IWUSR), + # And isn't out of bounds. + converted_value <= 2**12, + ) + ) + except ValueError as e: + raise TraitError( + 'invalid --sock-mode value: %s, please specify as e.g. "0600"' % value + ) from e + except AssertionError as e: + raise TraitError( + "invalid --sock-mode value: %s, must have u+rw (0600) at a minimum" % value + ) from e + return value + + certfile = Unicode( + "", + config=True, + help=_i18n("""The full path to an SSL/TLS certificate file."""), + ) + + keyfile = Unicode( + "", + config=True, + help=_i18n("""The full path to a private key file for usage with SSL/TLS."""), + ) + + client_ca = Unicode( + "", + config=True, + help=_i18n( + """The full path to a certificate authority certificate for SSL/TLS client authentication.""" + ), + ) + + cookie_secret_file = Unicode( + config=True, help=_i18n("""The file where the cookie secret is stored.""") + ) + + @default("cookie_secret_file") + def _default_cookie_secret_file(self) -> str: + return os.path.join(self.runtime_dir, "jupyter_cookie_secret") + + cookie_secret = Bytes( + b"", + config=True, + help="""The random bytes used to secure cookies. By default this is a new random number every time you start the server. Set it to a value in a config file to enable logins to persist across server sessions. Note: Cookie secrets should be kept private, do not share config files with cookie_secret stored in plaintext (you can read the value from a file). """, - ) - - @default("cookie_secret") - def _default_cookie_secret(self) -> bytes: - if os.path.exists(self.cookie_secret_file): - with open(self.cookie_secret_file, "rb") as f: - key = f.read() - else: - key = encodebytes(os.urandom(32)) - self._write_cookie_secret_file(key) - h = hmac.new(key, digestmod=hashlib.sha256) - h.update(self.password.encode()) - return h.digest() - - def _write_cookie_secret_file(self, secret: bytes) -> None: - """write my secret to my secret_file""" - self.log.info(_i18n("Writing Jupyter server cookie secret to %s"), self.cookie_secret_file) - try: - with secure_write(self.cookie_secret_file, True) as f: - f.write(secret) - except OSError as e: - self.log.error( - _i18n("Failed to write cookie secret to %s: %s"), - self.cookie_secret_file, - e, - ) - - _token_set = False - - token = Unicode("", help=_i18n("""DEPRECATED. Use IdentityProvider.token""")).tag( - config=True - ) - - @observe("token") - def _deprecated_token(self, change: t.Any) -> None: - self._warn_deprecated_config(change, "IdentityProvider") - - @default("token") - def _deprecated_token_access(self) -> str: - warnings.warn( - "ServerApp.token config is deprecated in jupyter-server 2.0. Use IdentityProvider.token", - DeprecationWarning, - stacklevel=3, - ) - return self.identity_provider.token - - min_open_files_limit = Integer( - config=True, - help=""" + ) + + @default("cookie_secret") + def _default_cookie_secret(self) -> bytes: + if os.path.exists(self.cookie_secret_file): + with open(self.cookie_secret_file, "rb") as f: + key = f.read() + else: + key = encodebytes(os.urandom(32)) + self._write_cookie_secret_file(key) + h = hmac.new(key, digestmod=hashlib.sha256) + h.update(self.password.encode()) + return h.digest() + + def _write_cookie_secret_file(self, secret: bytes) -> None: + """write my secret to my secret_file""" + self.log.info(_i18n("Writing Jupyter server cookie secret to %s"), self.cookie_secret_file) + try: + with secure_write(self.cookie_secret_file, True) as f: + f.write(secret) + except OSError as e: + self.log.error( + _i18n("Failed to write cookie secret to %s: %s"), + self.cookie_secret_file, + e, + ) + + _token_set = False + + token = Unicode("", help=_i18n("""DEPRECATED. Use IdentityProvider.token""")).tag( + config=True + ) + + @observe("token") + def _deprecated_token(self, change: t.Any) -> None: + self._warn_deprecated_config(change, "IdentityProvider") + + @default("token") + def _deprecated_token_access(self) -> str: + warnings.warn( + "ServerApp.token config is deprecated in jupyter-server 2.0. Use IdentityProvider.token", + DeprecationWarning, + stacklevel=3, + ) + return self.identity_provider.token + + min_open_files_limit = Integer( + config=True, + help=""" Gets or sets a lower bound on the open file handles process resource limit. This may need to be increased if you run into an OSError: [Errno 24] Too many open files. This is not applicable when running on Windows. """, - allow_none=True, - ) + allow_none=True, + ) - @default("min_open_files_limit") - def _default_min_open_files_limit(self) -> t.Optional[int]: - if resource is None: - # Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows) - return None # type:ignore[unreachable] + @default("min_open_files_limit") + def _default_min_open_files_limit(self) -> t.Optional[int]: + if resource is None: + # Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows) + return None # type:ignore[unreachable] - soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) + soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) - DEFAULT_SOFT = 4096 - if hard >= DEFAULT_SOFT: - return DEFAULT_SOFT + DEFAULT_SOFT = 4096 + if hard >= DEFAULT_SOFT: + return DEFAULT_SOFT - self.log.debug( - "Default value for min_open_files_limit is ignored (hard=%r, soft=%r)", - hard, - soft, - ) + self.log.debug( + "Default value for min_open_files_limit is ignored (hard=%r, soft=%r)", + hard, + soft, + ) - return soft + return soft - max_body_size = Integer( - 512 * 1024 * 1024, - config=True, - help=""" + max_body_size = Integer( + 512 * 1024 * 1024, + config=True, + help=""" Sets the maximum allowed size of the client request body, specified in the Content-Length request header field. If the size in a request exceeds the configured value, a malformed HTTP message is returned to @@ -1140,66 +1140,66 @@ def _default_min_open_files_limit(self) -> t.Optional[int]: """, ) - max_buffer_size = Integer( - 512 * 1024 * 1024, - config=True, - help=""" + max_buffer_size = Integer( + 512 * 1024 * 1024, + config=True, + help=""" Gets or sets the maximum amount of memory, in bytes, that is allocated for use by the buffer manager. """, ) - password = Unicode( - "", - config=True, - help="""DEPRECATED in 2.0. Use PasswordIdentityProvider.hashed_password""", - ) - - password_required = Bool( - False, - config=True, - help="""DEPRECATED in 2.0. Use PasswordIdentityProvider.password_required""", - ) - - allow_password_change = Bool( - True, - config=True, - help="""DEPRECATED in 2.0. Use PasswordIdentityProvider.allow_password_change""", - ) - - def _warn_deprecated_config( - self, change: t.Any, clsname: str, new_name: t.Optional[str] = None - ) -> None: - """Warn on deprecated config.""" - if new_name is None: - new_name = change.name - if clsname not in self.config or new_name not in self.config[clsname]: - # Deprecated config used, new config not used. - # Use deprecated config, warn about new name. - self.log.warning( - f"ServerApp.{change.name} config is deprecated in 2.0. Use {clsname}.{new_name}." - ) - self.config[clsname][new_name] = change.new - # Deprecated config used, new config also used. - # Warn only if the values differ. - # If the values are the same, assume intentional backward-compatible config. - elif self.config[clsname][new_name] != change.new: - self.log.warning( - f"Ignoring deprecated ServerApp.{change.name} config. Using {clsname}.{new_name}." - ) - - @observe("password") - def _deprecated_password(self, change: t.Any) -> None: - self._warn_deprecated_config(change, "PasswordIdentityProvider", new_name="hashed_password") - - @observe("password_required", "allow_password_change") - def _deprecated_password_config(self, change: t.Any) -> None: - self._warn_deprecated_config(change, "PasswordIdentityProvider") - - disable_check_xsrf = Bool( - False, - config=True, - help="""Disable cross-site-request-forgery protection + password = Unicode( + "", + config=True, + help="""DEPRECATED in 2.0. Use PasswordIdentityProvider.hashed_password""", + ) + + password_required = Bool( + False, + config=True, + help="""DEPRECATED in 2.0. Use PasswordIdentityProvider.password_required""", + ) + + allow_password_change = Bool( + True, + config=True, + help="""DEPRECATED in 2.0. Use PasswordIdentityProvider.allow_password_change""", + ) + + def _warn_deprecated_config( + self, change: t.Any, clsname: str, new_name: t.Optional[str] = None + ) -> None: + """Warn on deprecated config.""" + if new_name is None: + new_name = change.name + if clsname not in self.config or new_name not in self.config[clsname]: + # Deprecated config used, new config not used. + # Use deprecated config, warn about new name. + self.log.warning( + f"ServerApp.{change.name} config is deprecated in 2.0. Use {clsname}.{new_name}." + ) + self.config[clsname][new_name] = change.new + # Deprecated config used, new config also used. + # Warn only if the values differ. + # If the values are the same, assume intentional backward-compatible config. + elif self.config[clsname][new_name] != change.new: + self.log.warning( + f"Ignoring deprecated ServerApp.{change.name} config. Using {clsname}.{new_name}." + ) + + @observe("password") + def _deprecated_password(self, change: t.Any) -> None: + self._warn_deprecated_config(change, "PasswordIdentityProvider", new_name="hashed_password") + + @observe("password_required", "allow_password_change") + def _deprecated_password_config(self, change: t.Any) -> None: + self._warn_deprecated_config(change, "PasswordIdentityProvider") + + disable_check_xsrf = Bool( + False, + config=True, + help="""Disable cross-site-request-forgery protection Jupyter server includes protection from cross-site request forgeries, requiring API requests to either: @@ -1212,11 +1212,11 @@ def _deprecated_password_config(self, change: t.Any) -> None: These services can disable all authentication and security checks, with the full knowledge of what that implies. """, - ) + ) - allow_remote_access = Bool( - config=True, - help="""Allow requests where the Host header doesn't point to a local server + allow_remote_access = Bool( + config=True, + help="""Allow requests where the Host header doesn't point to a local server By default, requests get a 403 forbidden response if the 'Host' header shows that the browser thinks it's on a non-local domain. @@ -1229,45 +1229,45 @@ def _deprecated_password_config(self, change: t.Any) -> None: Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local, along with hostnames configured in local_hostnames. """, - ) - - @default("allow_remote_access") - def _default_allow_remote(self) -> bool: - """Disallow remote access if we're listening only on loopback addresses""" + ) - # if blank, self.ip was configured to "*" meaning bind to all interfaces, - # see _valdate_ip - if self.ip == "": - return True + @default("allow_remote_access") + def _default_allow_remote(self) -> bool: + """Disallow remote access if we're listening only on loopback addresses""" - try: - addr = ipaddress.ip_address(self.ip) - except ValueError: - # Address is a hostname - for info in socket.getaddrinfo(self.ip, self.port, 0, socket.SOCK_STREAM): - addr = info[4][0] # type:ignore[assignment] + # if blank, self.ip was configured to "*" meaning bind to all interfaces, + # see _valdate_ip + if self.ip == "": + return True try: - parsed = ipaddress.ip_address(addr.split("%")[0]) # type:ignore[union-attr] + addr = ipaddress.ip_address(self.ip) except ValueError: - self.log.warning("Unrecognised IP address: %r", addr) - continue - - # Macs map localhost to 'fe80::1%lo0', a link local address - # scoped to the loopback interface. For now, we'll assume that - # any scoped link-local address is effectively local. - if not ( - parsed.is_loopback or (("%" in addr) and parsed.is_link_local) # type:ignore[operator] - ): - return True - return False - else: - return not addr.is_loopback - - use_redirect_file = Bool( - True, - config=True, - help="""Disable launching browser by redirect file + # Address is a hostname + for info in socket.getaddrinfo(self.ip, self.port, 0, socket.SOCK_STREAM): + addr = info[4][0] # type:ignore[assignment] + + try: + parsed = ipaddress.ip_address(addr.split("%")[0]) # type:ignore[union-attr] + except ValueError: + self.log.warning("Unrecognised IP address: %r", addr) + continue + + # Macs map localhost to 'fe80::1%lo0', a link local address + # scoped to the loopback interface. For now, we'll assume that + # any scoped link-local address is effectively local. + if not ( + parsed.is_loopback or (("%" in addr) and parsed.is_link_local) # type:ignore[operator] + ): + return True + return False + else: + return not addr.is_loopback + + use_redirect_file = Bool( + True, + config=True, + help="""Disable launching browser by redirect file For versions of notebook > 5.7.2, a security feature measure was added that prevented the authentication token used to launch the browser from being visible. This feature makes it difficult for other users on a multi-user system from @@ -1280,1751 +1280,1751 @@ def _default_allow_remote(self) -> bool: Disabling this setting to False will disable this behavior, allowing the browser to launch by using a URL and visible token (as before). """, - ) + ) - local_hostnames = List( - Unicode(), - ["localhost"], - config=True, - help="""Hostnames to allow as local when allow_remote_access is False. + local_hostnames = List( + Unicode(), + ["localhost"], + config=True, + help="""Hostnames to allow as local when allow_remote_access is False. Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted as local as well. """, - ) + ) - open_browser = Bool( - False, - config=True, - help="""Whether to open in a browser after starting. + open_browser = Bool( + False, + config=True, + help="""Whether to open in a browser after starting. The specific browser used is platform dependent and determined by the python standard library `webbrowser` module, unless it is overridden using the --browser (ServerApp.browser) configuration option. """, - ) + ) - browser = Unicode( - "", - config=True, - help="""Specify what command to use to invoke a web + browser = Unicode( + "", + config=True, + help="""Specify what command to use to invoke a web browser when starting the server. If not specified, the default browser will be determined by the `webbrowser` standard library module, which allows setting of the BROWSER environment variable to override it. """, - ) - - webbrowser_open_new = Integer( - 2, - config=True, - help=_i18n( - """Specify where to open the server on startup. This is the - `new` argument passed to the standard library method `webbrowser.open`. - The behaviour is not guaranteed, but depends on browser support. Valid - values are: - - - 2 opens a new tab, - - 1 opens a new window, - - 0 opens in an existing window. - - See the `webbrowser.open` documentation for details. - """ - ), - ) - - tornado_settings = Dict( - config=True, - help=_i18n( - "Supply overrides for the tornado.web.Application that the Jupyter server uses." - ), - ) - - websocket_compression_options = Any( - None, - config=True, - help=_i18n( - """ - Set the tornado compression options for websocket connections. - - This value will be returned from :meth:`WebSocketHandler.get_compression_options`. - None (default) will disable compression. - A dict (even an empty one) will enable compression. - - See the tornado docs for WebSocketHandler.get_compression_options for details. - """ - ), - ) - terminado_settings = Dict( - Union([List(), Unicode()]), - config=True, - help=_i18n('Supply overrides for terminado. Currently only supports "shell_command".'), - ) - - cookie_options = Dict( - config=True, - help=_i18n("DEPRECATED. Use IdentityProvider.cookie_options"), - ) - get_secure_cookie_kwargs = Dict( - config=True, - help=_i18n("DEPRECATED. Use IdentityProvider.get_secure_cookie_kwargs"), - ) - - @observe("cookie_options", "get_secure_cookie_kwargs") - def _deprecated_cookie_config(self, change: t.Any) -> None: - self._warn_deprecated_config(change, "IdentityProvider") - - ssl_options = Dict( - allow_none=True, - config=True, - help=_i18n( - """Supply SSL options for the tornado HTTPServer. - See the tornado docs for details.""" - ), - ) - - jinja_environment_options = Dict( - config=True, - help=_i18n("Supply extra arguments that will be passed to Jinja environment."), - ) - - jinja_template_vars = Dict( - config=True, - help=_i18n("Extra variables to supply to jinja templates when rendering."), - ) - - base_url = Unicode( - "/", - config=True, - help="""The base URL for the Jupyter server. + ) + + webbrowser_open_new = Integer( + 2, + config=True, + help=_i18n( + """Specify where to open the server on startup. This is the + `new` argument passed to the standard library method `webbrowser.open`. + The behaviour is not guaranteed, but depends on browser support. Valid + values are: + + - 2 opens a new tab, + - 1 opens a new window, + - 0 opens in an existing window. + + See the `webbrowser.open` documentation for details. + """ + ), + ) + + tornado_settings = Dict( + config=True, + help=_i18n( + "Supply overrides for the tornado.web.Application that the Jupyter server uses." + ), + ) + + websocket_compression_options = Any( + None, + config=True, + help=_i18n( + """ + Set the tornado compression options for websocket connections. + + This value will be returned from :meth:`WebSocketHandler.get_compression_options`. + None (default) will disable compression. + A dict (even an empty one) will enable compression. + + See the tornado docs for WebSocketHandler.get_compression_options for details. + """ + ), + ) + terminado_settings = Dict( + Union([List(), Unicode()]), + config=True, + help=_i18n('Supply overrides for terminado. Currently only supports "shell_command".'), + ) + + cookie_options = Dict( + config=True, + help=_i18n("DEPRECATED. Use IdentityProvider.cookie_options"), + ) + get_secure_cookie_kwargs = Dict( + config=True, + help=_i18n("DEPRECATED. Use IdentityProvider.get_secure_cookie_kwargs"), + ) + + @observe("cookie_options", "get_secure_cookie_kwargs") + def _deprecated_cookie_config(self, change: t.Any) -> None: + self._warn_deprecated_config(change, "IdentityProvider") + + ssl_options = Dict( + allow_none=True, + config=True, + help=_i18n( + """Supply SSL options for the tornado HTTPServer. + See the tornado docs for details.""" + ), + ) + + jinja_environment_options = Dict( + config=True, + help=_i18n("Supply extra arguments that will be passed to Jinja environment."), + ) + + jinja_template_vars = Dict( + config=True, + help=_i18n("Extra variables to supply to jinja templates when rendering."), + ) + + base_url = Unicode( + "/", + config=True, + help="""The base URL for the Jupyter server. Leading and trailing slashes can be omitted, and will automatically be added. """, - ) - - @validate("base_url") - def _update_base_url(self, proposal: t.Any) -> str: - value = t.cast(str, proposal["value"]) - if not value.startswith("/"): - value = "/" + value - if not value.endswith("/"): - value = value + "/" - return value - - extra_static_paths = List( - Unicode(), - config=True, - help="""Extra paths to search for serving static files. + ) + + @validate("base_url") + def _update_base_url(self, proposal: t.Any) -> str: + value = t.cast(str, proposal["value"]) + if not value.startswith("/"): + value = "/" + value + if not value.endswith("/"): + value = value + "/" + return value + + extra_static_paths = List( + Unicode(), + config=True, + help="""Extra paths to search for serving static files. This allows adding javascript/css to be available from the Jupyter server machine, or overriding individual files in the IPython""", - ) - - @property - def static_file_path(self) -> list[str]: - """return extra paths + the default location""" - return [*self.extra_static_paths, DEFAULT_STATIC_FILES_PATH] - - static_custom_path = List(Unicode(), help=_i18n("""Path to search for custom.js, css""")) - - @default("static_custom_path") - def _default_static_custom_path(self) -> list[str]: - return [os.path.join(d, "custom") for d in (self.config_dir, DEFAULT_STATIC_FILES_PATH)] - - extra_template_paths = List( - Unicode(), - config=True, - help=_i18n( - """Extra paths to search for serving jinja templates. - - Can be used to override templates from jupyter_server.templates.""" - ), - ) - - @property - def template_file_path(self) -> list[str]: - """return extra paths + the default locations""" - return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST - - extra_services = List( - Unicode(), - config=True, - help=_i18n( - """handlers that should be loaded at higher priority than the default services""" - ), - ) - - websocket_url = Unicode( - "", - config=True, - help="""The base URL for websockets, + ) + + @property + def static_file_path(self) -> list[str]: + """return extra paths + the default location""" + return [*self.extra_static_paths, DEFAULT_STATIC_FILES_PATH] + + static_custom_path = List(Unicode(), help=_i18n("""Path to search for custom.js, css""")) + + @default("static_custom_path") + def _default_static_custom_path(self) -> list[str]: + return [os.path.join(d, "custom") for d in (self.config_dir, DEFAULT_STATIC_FILES_PATH)] + + extra_template_paths = List( + Unicode(), + config=True, + help=_i18n( + """Extra paths to search for serving jinja templates. + + Can be used to override templates from jupyter_server.templates.""" + ), + ) + + @property + def template_file_path(self) -> list[str]: + """return extra paths + the default locations""" + return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST + + extra_services = List( + Unicode(), + config=True, + help=_i18n( + """handlers that should be loaded at higher priority than the default services""" + ), + ) + + websocket_url = Unicode( + "", + config=True, + help="""The base URL for websockets, if it differs from the HTTP server (hint: it almost certainly doesn't). Should be in the form of an HTTP origin: ws[s]://hostname[:port] """, - ) - - quit_button = Bool( - True, - config=True, - help="""If True, display controls to shut down the Jupyter server, such as menu items or buttons.""", - ) - - contents_manager_class = Type( - default_value=AsyncLargeFileManager, - klass=ContentsManager, - config=True, - help=_i18n("The content manager class to use."), - ) - - kernel_manager_class = Type( - klass=MappingKernelManager, - config=True, - help=_i18n("The kernel manager class to use."), - ) - - @default("kernel_manager_class") - def _default_kernel_manager_class(self) -> t.Union[str, type[AsyncMappingKernelManager]]: - if self.gateway_config.gateway_enabled: - return "jupyter_server.gateway.managers.GatewayMappingKernelManager" - return AsyncMappingKernelManager - - session_manager_class = Type( - config=True, - help=_i18n("The session manager class to use."), - ) - - @default("session_manager_class") - def _default_session_manager_class(self) -> t.Union[str, type[SessionManager]]: - if self.gateway_config.gateway_enabled: - return "jupyter_server.gateway.managers.GatewaySessionManager" - return SessionManager - - kernel_websocket_connection_class = Type( - klass=BaseKernelWebsocketConnection, - config=True, - help=_i18n("The kernel websocket connection class to use."), - ) - - @default("kernel_websocket_connection_class") - def _default_kernel_websocket_connection_class( - self, - ) -> t.Union[str, type[ZMQChannelsWebsocketConnection]]: - if self.gateway_config.gateway_enabled: - return "jupyter_server.gateway.connections.GatewayWebSocketConnection" - return ZMQChannelsWebsocketConnection - - config_manager_class = Type( - default_value=ConfigManager, - config=True, - help=_i18n("The config manager class to use"), - ) - - kernel_spec_manager = Instance(KernelSpecManager, allow_none=True) - - kernel_spec_manager_class = Type( - config=True, - help=""" + ) + + quit_button = Bool( + True, + config=True, + help="""If True, display controls to shut down the Jupyter server, such as menu items or buttons.""", + ) + + contents_manager_class = Type( + default_value=AsyncLargeFileManager, + klass=ContentsManager, + config=True, + help=_i18n("The content manager class to use."), + ) + + kernel_manager_class = Type( + klass=MappingKernelManager, + config=True, + help=_i18n("The kernel manager class to use."), + ) + + @default("kernel_manager_class") + def _default_kernel_manager_class(self) -> t.Union[str, type[AsyncMappingKernelManager]]: + if self.gateway_config.gateway_enabled: + return "jupyter_server.gateway.managers.GatewayMappingKernelManager" + return AsyncMappingKernelManager + + session_manager_class = Type( + config=True, + help=_i18n("The session manager class to use."), + ) + + @default("session_manager_class") + def _default_session_manager_class(self) -> t.Union[str, type[SessionManager]]: + if self.gateway_config.gateway_enabled: + return "jupyter_server.gateway.managers.GatewaySessionManager" + return SessionManager + + kernel_websocket_connection_class = Type( + klass=BaseKernelWebsocketConnection, + config=True, + help=_i18n("The kernel websocket connection class to use."), + ) + + @default("kernel_websocket_connection_class") + def _default_kernel_websocket_connection_class( + self, + ) -> t.Union[str, type[ZMQChannelsWebsocketConnection]]: + if self.gateway_config.gateway_enabled: + return "jupyter_server.gateway.connections.GatewayWebSocketConnection" + return ZMQChannelsWebsocketConnection + + config_manager_class = Type( + default_value=ConfigManager, + config=True, + help=_i18n("The config manager class to use"), + ) + + kernel_spec_manager = Instance(KernelSpecManager, allow_none=True) + + kernel_spec_manager_class = Type( + config=True, + help=""" The kernel spec manager class to use. Should be a subclass of `jupyter_client.kernelspec.KernelSpecManager`. The Api of KernelSpecManager is provisional and might change without warning between this version of Jupyter and the next stable one. """, - ) - - @default("kernel_spec_manager_class") - def _default_kernel_spec_manager_class(self) -> t.Union[str, type[KernelSpecManager]]: - if self.gateway_config.gateway_enabled: - return "jupyter_server.gateway.managers.GatewayKernelSpecManager" - return KernelSpecManager - - login_handler_class = Type( - default_value=LoginHandler, - klass=web.RequestHandler, - allow_none=True, - config=True, - help=_i18n("The login handler class to use."), - ) - - logout_handler_class = Type( - default_value=LogoutHandler, - klass=web.RequestHandler, - allow_none=True, - config=True, - help=_i18n("The logout handler class to use."), - ) - # TODO: detect deprecated login handler config - - authorizer_class = Type( - default_value=AllowAllAuthorizer, - klass=Authorizer, - config=True, - help=_i18n("The authorizer class to use."), - ) - - identity_provider_class = Type( - default_value=PasswordIdentityProvider, - klass=IdentityProvider, - config=True, - help=_i18n("The identity provider class to use."), - ) - - trust_xheaders = Bool( - False, - config=True, - help=( - _i18n( - "Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers" - "sent by the upstream reverse proxy. Necessary if the proxy handles SSL" - ) - ), - ) - - event_logger = Instance( - EventLogger, - allow_none=True, - help="An EventLogger for emitting structured event data from Jupyter Server and extensions.", - ) - - info_file = Unicode() - - @default("info_file") - def _default_info_file(self) -> str: - info_file = "jpserver-%s.json" % os.getpid() - return os.path.join(self.runtime_dir, info_file) - - no_browser_open_file = Bool( - False, help="If True, do not write redirect HTML file disk, or show in messages." - ) - - browser_open_file = Unicode() - - @default("browser_open_file") - def _default_browser_open_file(self) -> str: - basename = "jpserver-%s-open.html" % os.getpid() - return os.path.join(self.runtime_dir, basename) - - browser_open_file_to_run = Unicode() - - @default("browser_open_file_to_run") - def _default_browser_open_file_to_run(self) -> str: - basename = "jpserver-file-to-run-%s-open.html" % os.getpid() - return os.path.join(self.runtime_dir, basename) - - pylab = Unicode( - "disabled", - config=True, - help=_i18n( - """ - DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib. - """ - ), - ) - - @observe("pylab") - def _update_pylab(self, change: t.Any) -> None: - """when --pylab is specified, display a warning and exit""" - backend = " %s" % change["new"] if change["new"] != "warn" else "" - self.log.error( - _i18n("Support for specifying --pylab on the command line has been removed.") - ) - self.log.error( - _i18n("Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.").format( - backend - ) - ) - self.exit(1) - - notebook_dir = Unicode(config=True, help=_i18n("DEPRECATED, use root_dir.")) - - @observe("notebook_dir") - def _update_notebook_dir(self, change: t.Any) -> None: - if self._root_dir_set: - # only use deprecated config if new config is not set - return - self.log.warning(_i18n("notebook_dir is deprecated, use root_dir")) - self.root_dir = change["new"] - - external_connection_dir = Unicode( - None, - allow_none=True, - config=True, - help=_i18n( - "The directory to look at for external kernel connection files, if allow_external_kernels is True. " - "Defaults to Jupyter runtime_dir/external_kernels. " - "Make sure that this directory is not filled with left-over connection files, " - "that could result in unnecessary kernel manager creations." - ), - ) - - allow_external_kernels = Bool( - False, - config=True, - help=_i18n( - "Whether or not to allow external kernels, whose connection files are placed in external_connection_dir." - ), - ) - - root_dir = Unicode(config=True, help=_i18n("The directory to use for notebooks and kernels.")) - _root_dir_set = False - - @default("root_dir") - def _default_root_dir(self) -> str: - if self.file_to_run: - self._root_dir_set = True - return os.path.dirname(os.path.abspath(self.file_to_run)) - else: - return os.getcwd() - - def _normalize_dir(self, value: str) -> str: - """Normalize a directory.""" - # Strip any trailing slashes - # *except* if it's root - _, path = os.path.splitdrive(value) - if path == os.sep: - return value - value = value.rstrip(os.sep) - if not os.path.isabs(value): - # If we receive a non-absolute path, make it absolute. - value = os.path.abspath(value) - return value - - @validate("root_dir") - def _root_dir_validate(self, proposal: t.Any) -> str: - value = self._normalize_dir(proposal["value"]) - if not os.path.isdir(value): - raise TraitError(trans.gettext("No such directory: '%r'") % value) - return value - - @observe("root_dir") - def _root_dir_changed(self, change: t.Any) -> None: - # record that root_dir is set, - # which affects loading of deprecated notebook_dir - self._root_dir_set = True - - preferred_dir = Unicode( - config=True, - help=trans.gettext("Preferred starting directory to use for notebooks and kernels."), - ) - - @default("preferred_dir") - def _default_prefered_dir(self) -> str: - return self.root_dir - - @validate("preferred_dir") - def _preferred_dir_validate(self, proposal: t.Any) -> str: - value = self._normalize_dir(proposal["value"]) - if not os.path.isdir(value): - raise TraitError(trans.gettext("No such preferred dir: '%r'") % value) - return value - - @observe("server_extensions") - def _update_server_extensions(self, change: t.Any) -> None: - self.log.warning(_i18n("server_extensions is deprecated, use jpserver_extensions")) - self.server_extensions = change["new"] - - jpserver_extensions = Dict( - default_value={}, - value_trait=Bool(), - config=True, - help=( - _i18n( - "Dict of Python modules to load as Jupyter server extensions." - "Entry values can be used to enable and disable the loading of" - "the extensions. The extensions will be loaded in alphabetical " - "order." - ) - ), - ) - - reraise_server_extension_failures = Bool( - False, - config=True, - help=_i18n("Reraise exceptions encountered loading server extensions?"), - ) - - kernel_ws_protocol = Unicode( - allow_none=True, - config=True, - help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.kernel_ws_protocol"), - ) - - @observe("kernel_ws_protocol") - def _deprecated_kernel_ws_protocol(self, change: t.Any) -> None: - self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") - - limit_rate = Bool( - allow_none=True, - config=True, - help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.limit_rate"), - ) - - @observe("limit_rate") - def _deprecated_limit_rate(self, change: t.Any) -> None: - self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") - - iopub_msg_rate_limit = Float( - allow_none=True, - config=True, - help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.iopub_msg_rate_limit"), - ) - - @observe("iopub_msg_rate_limit") - def _deprecated_iopub_msg_rate_limit(self, change: t.Any) -> None: - self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") - - iopub_data_rate_limit = Float( - allow_none=True, - config=True, - help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.iopub_data_rate_limit"), - ) - - @observe("iopub_data_rate_limit") - def _deprecated_iopub_data_rate_limit(self, change: t.Any) -> None: - self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") - - rate_limit_window = Float( - allow_none=True, - config=True, - help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.rate_limit_window"), - ) - - @observe("rate_limit_window") - def _deprecated_rate_limit_window(self, change: t.Any) -> None: - self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") - - shutdown_no_activity_timeout = Integer( - 0, - config=True, - help=( - "Shut down the server after N seconds with no kernels" - "running and no activity. " - "This can be used together with culling idle kernels " - "(MappingKernelManager.cull_idle_timeout) to " - "shutdown the Jupyter server when it's not in use. This is not " - "precisely timed: it may shut down up to a minute later. " - "0 (the default) disables this automatic shutdown." - ), - ) - - terminals_enabled = Bool( - config=True, - help=_i18n( - """Set to False to disable terminals. - - This does *not* make the server more secure by itself. - Anything the user can in a terminal, they can also do in a notebook. - - Terminals may also be automatically disabled if the terminado package - is not available. - """ - ), - ) - - @default("terminals_enabled") - def _default_terminals_enabled(self) -> bool: - return True - - authenticate_prometheus = Bool( - True, - help="""" - Require authentication to access prometheus metrics. - """, - config=True, - ) + ) - static_immutable_cache = List( - Unicode(), - help=""" - Paths to set up static files as immutable. + @default("kernel_spec_manager_class") + def _default_kernel_spec_manager_class(self) -> t.Union[str, type[KernelSpecManager]]: + if self.gateway_config.gateway_enabled: + return "jupyter_server.gateway.managers.GatewayKernelSpecManager" + return KernelSpecManager + + login_handler_class = Type( + default_value=LoginHandler, + klass=web.RequestHandler, + allow_none=True, + config=True, + help=_i18n("The login handler class to use."), + ) - This allow setting up the cache control of static files as immutable. - It should be used for static file named with a hash for instance. - """, - config=True, - ) - - _starter_app = Instance( - default_value=None, - allow_none=True, - klass="jupyter_server.extension.application.ExtensionApp", - ) - - @property - def starter_app(self) -> t.Any: - """Get the Extension that started this server.""" - return self._starter_app - - def parse_command_line(self, argv: t.Optional[list[str]] = None) -> None: - """Parse the command line options.""" - super().parse_command_line(argv) - - if self.extra_args: - arg0 = self.extra_args[0] - f = os.path.abspath(arg0) - self.argv.remove(arg0) - if not os.path.exists(f): - self.log.critical(_i18n("No such file or directory: %s"), f) - self.exit(1) + logout_handler_class = Type( + default_value=LogoutHandler, + klass=web.RequestHandler, + allow_none=True, + config=True, + help=_i18n("The logout handler class to use."), + ) + # TODO: detect deprecated login handler config - # Use config here, to ensure that it takes higher priority than - # anything that comes from the config dirs. - c = Config() - if os.path.isdir(f): - c.ServerApp.root_dir = f - elif os.path.isfile(f): - c.ServerApp.file_to_run = f - self.update_config(c) - - def init_configurables(self) -> None: - """Initialize configurables.""" - # If gateway server is configured, replace appropriate managers to perform redirection. To make - # this determination, instantiate the GatewayClient config singleton. - self.gateway_config = GatewayClient.instance(parent=self) - - if not issubclass( - self.kernel_manager_class, - AsyncMappingKernelManager, - ): - warnings.warn( - "The synchronous MappingKernelManager class is deprecated and will not be supported in Jupyter Server 3.0", - DeprecationWarning, - stacklevel=2, - ) - - if not issubclass( - self.contents_manager_class, - AsyncContentsManager, - ): - warnings.warn( - "The synchronous ContentsManager classes are deprecated and will not be supported in Jupyter Server 3.0", - DeprecationWarning, - stacklevel=2, - ) + authorizer_class = Type( + default_value=AllowAllAuthorizer, + klass=Authorizer, + config=True, + help=_i18n("The authorizer class to use."), + ) - self.kernel_spec_manager = self.kernel_spec_manager_class( - parent=self, + identity_provider_class = Type( + default_value=PasswordIdentityProvider, + klass=IdentityProvider, + config=True, + help=_i18n("The identity provider class to use."), ) - kwargs = { - "parent": self, - "log": self.log, - "connection_dir": self.runtime_dir, - "kernel_spec_manager": self.kernel_spec_manager, - } - if jupyter_client.version_info > (8, 3, 0): # type:ignore[attr-defined] - if self.allow_external_kernels: - external_connection_dir = self.external_connection_dir - if external_connection_dir is None: - external_connection_dir = str(Path(self.runtime_dir) / "external_kernels") - kwargs["external_connection_dir"] = external_connection_dir - elif self.allow_external_kernels: - self.log.warning( - "Although allow_external_kernels=True, external kernels are not supported " - "because jupyter-client's version does not allow them (should be >8.3.0)." - ) - - self.kernel_manager = self.kernel_manager_class(**kwargs) - self.contents_manager = self.contents_manager_class( - parent=self, - log=self.log, - ) - # Trigger a default/validation here explicitly while we still support the - # deprecated trait on ServerApp (FIXME remove when deprecation finalized) - self.contents_manager.preferred_dir # noqa: B018 - self.session_manager = self.session_manager_class( - parent=self, - log=self.log, - kernel_manager=self.kernel_manager, - contents_manager=self.contents_manager, - ) - self.config_manager = self.config_manager_class( - parent=self, - log=self.log, - ) - identity_provider_kwargs = {"parent": self, "log": self.log} - - if ( - self.login_handler_class is not LoginHandler - and self.identity_provider_class is PasswordIdentityProvider - ): - # default identity provider, non-default LoginHandler - # this indicates legacy custom LoginHandler config. - # enable LegacyIdentityProvider, which defers to the LoginHandler for pre-2.0 behavior. - self.identity_provider_class = LegacyIdentityProvider - self.log.warning( - f"Customizing authentication via ServerApp.login_handler_class={self.login_handler_class}" - " is deprecated in Jupyter Server 2.0." - " Use ServerApp.identity_provider_class." - " Falling back on legacy authentication.", - ) - identity_provider_kwargs["login_handler_class"] = self.login_handler_class - if self.logout_handler_class: - identity_provider_kwargs["logout_handler_class"] = self.logout_handler_class - elif self.login_handler_class is not LoginHandler: - # non-default login handler ignored because also explicitly set identity provider - self.log.warning( - f"Ignoring deprecated config ServerApp.login_handler_class={self.login_handler_class}." - " Superseded by ServerApp.identity_provider_class={self.identity_provider_class}." - ) - self.identity_provider = self.identity_provider_class(**identity_provider_kwargs) - - if self.identity_provider_class is LegacyIdentityProvider: - # legacy config stored the password in tornado_settings - self.tornado_settings["password"] = self.identity_provider.hashed_password # type:ignore[attr-defined] - self.tornado_settings["token"] = self.identity_provider.token - - if self._token_set: - self.log.warning( - "ServerApp.token config is deprecated in jupyter-server 2.0. Use IdentityProvider.token" - ) - if self.identity_provider.token_generated: - # default behavior: generated default token - # preserve deprecated ServerApp.token config - self.identity_provider.token_generated = False - self.identity_provider.token = self.token - else: - # identity_provider didn't generate a default token, - # that means it has some config that should take higher priority than deprecated ServerApp.token - self.log.warning("Ignoring deprecated ServerApp.token config") - - self.authorizer = self.authorizer_class( - parent=self, log=self.log, identity_provider=self.identity_provider - ) - - def init_logging(self) -> None: - """Initialize logging.""" - # This prevents double log messages because tornado use a root logger that - # self.log is a child of. The logging module dipatches log messages to a log - # and all of its ancenstors until propagate is set to False. - self.log.propagate = False - - for log in app_log, access_log, gen_log: - # consistent log output name (ServerApp instead of tornado.access, etc.) - log.name = self.log.name - # hook up tornado 3's loggers to our app handlers - logger = logging.getLogger("tornado") - logger.propagate = True - logger.parent = self.log - logger.setLevel(self.log.level) - - def init_event_logger(self) -> None: - """Initialize the Event Bus.""" - self.event_logger = EventLogger(parent=self) - # Load the core Jupyter Server event schemas - # All event schemas must start with Jupyter Server's - # events URI, `JUPYTER_SERVER_EVENTS_URI`. - schema_ids = [ - "https://events.jupyter.org/jupyter_server/contents_service/v1", - "https://events.jupyter.org/jupyter_server/gateway_client/v1", - "https://events.jupyter.org/jupyter_server/kernel_actions/v1", - ] - for schema_id in schema_ids: - # Get the schema path from the schema ID. - rel_schema_path = schema_id.replace(JUPYTER_SERVER_EVENTS_URI + "/", "") + ".yaml" - schema_path = DEFAULT_EVENTS_SCHEMA_PATH / rel_schema_path - # Use this pathlib object to register the schema - self.event_logger.register_event_schema(schema_path) - - def init_webapp(self) -> None: - """initialize tornado webapp""" - self.tornado_settings["allow_origin"] = self.allow_origin - self.tornado_settings["websocket_compression_options"] = self.websocket_compression_options - if self.allow_origin_pat: - self.tornado_settings["allow_origin_pat"] = re.compile(self.allow_origin_pat) - self.tornado_settings["allow_credentials"] = self.allow_credentials - self.tornado_settings["autoreload"] = self.autoreload - - # deprecate accessing these directly, in favor of identity_provider? - self.tornado_settings["cookie_options"] = self.identity_provider.cookie_options - self.tornado_settings[ - "get_secure_cookie_kwargs" - ] = self.identity_provider.get_secure_cookie_kwargs - self.tornado_settings["token"] = self.identity_provider.token - - if self.static_immutable_cache: - self.tornado_settings["static_immutable_cache"] = self.static_immutable_cache - - # ensure default_url starts with base_url - if not self.default_url.startswith(self.base_url): - self.default_url = url_path_join(self.base_url, self.default_url) - - # Socket options validation. - if self.sock: - if self.port != DEFAULT_JUPYTER_SERVER_PORT: - self.log.critical( - ("Options --port and --sock are mutually exclusive. Aborting."), - ) - sys.exit(1) - else: - # Reset the default port if we're using a UNIX socket. - self.port = 0 - - if self.open_browser: - # If we're bound to a UNIX socket, we can't reliably connect from a browser. - self.log.info( - ("Ignoring --ServerApp.open_browser due to --sock being used."), - ) + trust_xheaders = Bool( + False, + config=True, + help=( + _i18n( + "Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers" + "sent by the upstream reverse proxy. Necessary if the proxy handles SSL" + ) + ), + ) - if self.file_to_run: - self.log.critical( - ("Options --ServerApp.file_to_run and --sock are mutually exclusive."), - ) - sys.exit(1) + event_logger = Instance( + EventLogger, + allow_none=True, + help="An EventLogger for emitting structured event data from Jupyter Server and extensions.", + ) - if sys.platform.startswith("win"): - self.log.critical( - ( - "Option --sock is not supported on Windows, but got value of %s. Aborting." - % self.sock - ), - ) - sys.exit(1) - - self.web_app = ServerWebApplication( - self, - self.default_services, - self.kernel_manager, - self.contents_manager, - self.session_manager, - self.kernel_spec_manager, - self.config_manager, - self.event_logger, - self.extra_services, - self.log, - self.base_url, - self.default_url, - self.tornado_settings, - self.jinja_environment_options, - authorizer=self.authorizer, - identity_provider=self.identity_provider, - kernel_websocket_connection_class=self.kernel_websocket_connection_class, - ) - if self.certfile: - self.ssl_options["certfile"] = self.certfile - if self.keyfile: - self.ssl_options["keyfile"] = self.keyfile - if self.client_ca: - self.ssl_options["ca_certs"] = self.client_ca - if not self.ssl_options: - # could be an empty dict or None - # None indicates no SSL config - self.ssl_options = None # type:ignore[assignment] - else: - # SSL may be missing, so only import it if it's to be used - import ssl - - # PROTOCOL_TLS selects the highest ssl/tls protocol version that both the client and - # server support. When PROTOCOL_TLS is not available use PROTOCOL_SSLv23. - self.ssl_options.setdefault( - "ssl_version", getattr(ssl, "PROTOCOL_TLS", ssl.PROTOCOL_SSLv23) - ) - if self.ssl_options.get("ca_certs", False): - self.ssl_options.setdefault("cert_reqs", ssl.CERT_REQUIRED) - - self.identity_provider.validate_security(self, ssl_options=self.ssl_options) - - if isinstance(self.identity_provider, LegacyIdentityProvider): - # LegacyIdentityProvider needs access to the tornado settings dict - self.identity_provider.settings = self.web_app.settings - - def init_resources(self) -> None: - """initialize system resources""" - if resource is None: - self.log.debug( # type:ignore[unreachable] - "Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)" - ) - return - - old_soft, old_hard = resource.getrlimit(resource.RLIMIT_NOFILE) - soft = self.min_open_files_limit - hard = old_hard - if soft is not None and old_soft < soft: - if hard < soft: - hard = soft - self.log.debug( - f"Raising open file limit: soft {old_soft}->{soft}; hard {old_hard}->{hard}" - ) - resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard)) - - def _get_urlparts( - self, path: t.Optional[str] = None, include_token: bool = False - ) -> urllib.parse.ParseResult: - """Constructs a urllib named tuple, ParseResult, - with default values set by server config. - The returned tuple can be manipulated using the `_replace` method. - """ - if self.sock: - scheme = "http+unix" - netloc = urlencode_unix_socket_path(self.sock) - else: - if not self.ip: - ip = "localhost" - # Handle nonexplicit hostname. - elif self.ip in ("0.0.0.0", "::"): - ip = "%s" % socket.gethostname() - else: - ip = f"[{self.ip}]" if ":" in self.ip else self.ip - netloc = f"{ip}:{self.port}" - scheme = "https" if self.certfile else "http" - if not path: - path = self.default_url - query = None - # Don't log full token if it came from config - if include_token and self.identity_provider.token: - token = ( - self.identity_provider.token if self.identity_provider.token_generated else "..." - ) - query = urllib.parse.urlencode({"token": token}) - # Build the URL Parts to dump. - urlparts = urllib.parse.ParseResult( - scheme=scheme, netloc=netloc, path=path, query=query or "", params="", fragment="" - ) - return urlparts - - @property - def public_url(self) -> str: - parts = self._get_urlparts(include_token=True) - # Update with custom pieces. - if self.custom_display_url: - # Parse custom display_url - custom = urllib.parse.urlparse(self.custom_display_url)._asdict() - # Get pieces that are matter (non None) - custom_updates = {key: item for key, item in custom.items() if item} - # Update public URL parts with custom pieces. - parts = parts._replace(**custom_updates) - return parts.geturl() - - @property - def local_url(self) -> str: - parts = self._get_urlparts(include_token=True) - # Update with custom pieces. - if not self.sock: - parts = parts._replace(netloc=f"127.0.0.1:{self.port}") - return parts.geturl() - - @property - def display_url(self) -> str: - """Human readable string with URLs for interacting - with the running Jupyter Server - """ - url = self.public_url + "\n " + self.local_url - return url - - @property - def connection_url(self) -> str: - urlparts = self._get_urlparts(path=self.base_url) - return urlparts.geturl() - - def init_signal(self) -> None: - """Initialize signal handlers.""" - if ( - not sys.platform.startswith("win") - and sys.stdin # type:ignore[truthy-bool] - and sys.stdin.isatty() - ): - signal.signal(signal.SIGINT, self._handle_sigint) - signal.signal(signal.SIGTERM, self._signal_stop) - if hasattr(signal, "SIGUSR1"): - # Windows doesn't support SIGUSR1 - signal.signal(signal.SIGUSR1, self._signal_info) - if hasattr(signal, "SIGINFO"): - # only on BSD-based systems - signal.signal(signal.SIGINFO, self._signal_info) - - def _handle_sigint(self, sig: t.Any, frame: t.Any) -> None: - """SIGINT handler spawns confirmation dialog""" - # register more forceful signal handler for ^C^C case - signal.signal(signal.SIGINT, self._signal_stop) - # request confirmation dialog in bg thread, to avoid - # blocking the App - thread = threading.Thread(target=self._confirm_exit) - thread.daemon = True - thread.start() - - def _restore_sigint_handler(self) -> None: - """callback for restoring original SIGINT handler""" - signal.signal(signal.SIGINT, self._handle_sigint) - - def _confirm_exit(self) -> None: - """confirm shutdown on ^C - - A second ^C, or answering 'y' within 5s will cause shutdown, - otherwise original SIGINT handler will be restored. - - This doesn't work on Windows. - """ - info = self.log.info - info(_i18n("interrupted")) - # Check if answer_yes is set - if self.answer_yes: - self.log.critical(_i18n("Shutting down...")) - # schedule stop on the main thread, - # since this might be called from a signal handler - self.stop(from_signal=True) - return - info(self.running_server_info()) - yes = _i18n("y") - no = _i18n("n") - sys.stdout.write(_i18n("Shutdown this Jupyter server (%s/[%s])? ") % (yes, no)) - sys.stdout.flush() - r, w, x = select.select([sys.stdin], [], [], 5) - if r: - line = sys.stdin.readline() - if line.lower().startswith(yes) and no not in line.lower(): - self.log.critical(_i18n("Shutdown confirmed")) - # schedule stop on the main thread, - # since this might be called from a signal handler - self.stop(from_signal=True) - return - else: - if self._stopping: - # don't show 'no answer' if we're actually stopping, - # e.g. ctrl-C ctrl-C - return - info(_i18n("No answer for 5s:")) - info(_i18n("resuming operation...")) - # no answer, or answer is no: - # set it back to original SIGINT handler - # use IOLoop.add_callback because signal.signal must be called - # from main thread - self.io_loop.add_callback_from_signal(self._restore_sigint_handler) - - def _signal_stop(self, sig: t.Any, frame: t.Any) -> None: - """Handle a stop signal.""" - self.log.critical(_i18n("received signal %s, stopping"), sig) - self.stop(from_signal=True) - - def _signal_info(self, sig: t.Any, frame: t.Any) -> None: - """Handle an info signal.""" - self.log.info(self.running_server_info()) - - def init_components(self) -> None: - """Check the components submodule, and warn if it's unclean""" - # TODO: this should still check, but now we use bower, not git submodule - - def find_server_extensions(self) -> None: - """ - Searches Jupyter paths for jpserver_extensions. - """ + info_file = Unicode() - # Walk through all config files looking for jpserver_extensions. - # - # Each extension will likely have a JSON config file enabling itself in - # the "jupyter_server_config.d" directory. Find each of these and - # merge there results in order of precedence. - # - # Load server extensions with ConfigManager. - # This enables merging on keys, which we want for extension enabling. - # Regular config loading only merges at the class level, - # so each level clobbers the previous. - manager = ExtensionConfigManager(read_config_path=self.config_file_paths) - extensions = manager.get_jpserver_extensions() - - for modulename, enabled in sorted(extensions.items()): - if modulename not in self.jpserver_extensions: - self.config.ServerApp.jpserver_extensions.update({modulename: enabled}) - self.jpserver_extensions.update({modulename: enabled}) - - def init_server_extensions(self) -> None: - """ - If an extension's metadata includes an 'app' key, - the value must be a subclass of ExtensionApp. An instance - of the class will be created at this step. The config for - this instance will inherit the ServerApp's config object - and load its own config. - """ - # Create an instance of the ExtensionManager. - self.extension_manager = ExtensionManager(log=self.log, serverapp=self) - self.extension_manager.from_jpserver_extensions(self.jpserver_extensions) - self.extension_manager.link_all_extensions() + @default("info_file") + def _default_info_file(self) -> str: + info_file = "jpserver-%s.json" % os.getpid() + return os.path.join(self.runtime_dir, info_file) - def load_server_extensions(self) -> None: - """Load any extensions specified by config. + no_browser_open_file = Bool( + False, help="If True, do not write redirect HTML file disk, or show in messages." + ) - Import the module, then call the load_jupyter_server_extension function, - if one exists. + browser_open_file = Unicode() - The extension API is experimental, and may change in future releases. - """ - self.extension_manager.load_all_extensions() - - def init_mime_overrides(self) -> None: - # On some Windows machines, an application has registered incorrect - # mimetypes in the registry. - # Tornado uses this when serving .css and .js files, causing browsers to - # reject these files. We know the mimetype always needs to be text/css for css - # and application/javascript for JS, so we override it here - # and explicitly tell the mimetypes to not trust the Windows registry - if os.name == "nt": - # do not trust windows registry, which regularly has bad info - mimetypes.init(files=[]) - # ensure css, js are correct, which are required for pages to function - mimetypes.add_type("text/css", ".css") - mimetypes.add_type("application/javascript", ".js") - # for python <3.8 - mimetypes.add_type("application/wasm", ".wasm") - - def shutdown_no_activity(self) -> None: - """Shutdown server on timeout when there are no kernels or terminals.""" - km = self.kernel_manager - if len(km) != 0: - return # Kernels still running - - if self.extension_manager.any_activity(): - return - - seconds_since_active = (utcnow() - self.web_app.last_activity()).total_seconds() - self.log.debug("No activity for %d seconds.", seconds_since_active) - if seconds_since_active > self.shutdown_no_activity_timeout: - self.log.info( - "No kernels for %d seconds; shutting down.", - seconds_since_active, - ) - self.stop() - - def init_shutdown_no_activity(self) -> None: - """Initialize a shutdown on no activity.""" - if self.shutdown_no_activity_timeout > 0: - self.log.info( - "Will shut down after %d seconds with no kernels.", - self.shutdown_no_activity_timeout, - ) - pc = ioloop.PeriodicCallback(self.shutdown_no_activity, 60000) - pc.start() - - @property - def http_server(self) -> httpserver.HTTPServer: - """An instance of Tornado's HTTPServer class for the Server Web Application.""" - try: - return self._http_server - except AttributeError: - msg = ( - "An HTTPServer instance has not been created for the " - "Server Web Application. To create an HTTPServer for this " - "application, call `.init_httpserver()`." - ) - raise AttributeError(msg) from None - - def init_httpserver(self) -> None: - """Creates an instance of a Tornado HTTPServer for the Server Web Application - and sets the http_server attribute. - """ - # Check that a web_app has been initialized before starting a server. - if not hasattr(self, "web_app"): - msg = ( - "A tornado web application has not be initialized. " - "Try calling `.init_webapp()` first." - ) - raise AttributeError(msg) - - # Create an instance of the server. - self._http_server = httpserver.HTTPServer( - self.web_app, - ssl_options=self.ssl_options, - xheaders=self.trust_xheaders, - max_body_size=self.max_body_size, - max_buffer_size=self.max_buffer_size, - ) - - # binding sockets must be called from inside an event loop - if not self.sock: - self._find_http_port() - self.io_loop.add_callback(self._bind_http_server) - - def _bind_http_server(self) -> None: - """Bind our http server.""" - success = self._bind_http_server_unix() if self.sock else self._bind_http_server_tcp() - if not success: - self.log.critical( - _i18n( - "ERROR: the Jupyter server could not be started because " - "no available port could be found." - ) - ) - self.exit(1) + @default("browser_open_file") + def _default_browser_open_file(self) -> str: + basename = "jpserver-%s-open.html" % os.getpid() + return os.path.join(self.runtime_dir, basename) - def _bind_http_server_unix(self) -> bool: - """Bind an http server on unix.""" - if unix_socket_in_use(self.sock): - self.log.warning(_i18n("The socket %s is already in use.") % self.sock) - return False + browser_open_file_to_run = Unicode() - try: - sock = bind_unix_socket(self.sock, mode=int(self.sock_mode.encode(), 8)) - self.http_server.add_socket(sock) - except OSError as e: - if e.errno == errno.EADDRINUSE: - self.log.warning(_i18n("The socket %s is already in use.") % self.sock) - return False - elif e.errno in (errno.EACCES, getattr(errno, "WSAEACCES", errno.EACCES)): - self.log.warning(_i18n("Permission to listen on sock %s denied") % self.sock) - return False - else: - raise - else: - return True - - def _bind_http_server_tcp(self) -> bool: - """Bind a tcp server.""" - self.http_server.listen(self.port, self.ip) - return True - - def _find_http_port(self) -> None: - """Find an available http port.""" - success = False - port = self.port - for port in random_ports(self.port, self.port_retries + 1): - try: - sockets = bind_sockets(port, self.ip) - sockets[0].close() - except OSError as e: - if e.errno == errno.EADDRINUSE: - if self.port_retries: - self.log.info( - _i18n("The port %i is already in use, trying another port.") % port - ) - else: - self.log.info(_i18n("The port %i is already in use.") % port) - continue - if e.errno in ( - errno.EACCES, - getattr(errno, "WSAEACCES", errno.EACCES), - ): - self.log.warning(_i18n("Permission to listen on port %i denied.") % port) - continue - raise - else: - success = True - self.port = port - break - if not success: - if self.port_retries: - self.log.critical( - _i18n( - "ERROR: the Jupyter server could not be started because " - "no available port could be found." - ) + @default("browser_open_file_to_run") + def _default_browser_open_file_to_run(self) -> str: + basename = "jpserver-file-to-run-%s-open.html" % os.getpid() + return os.path.join(self.runtime_dir, basename) + + pylab = Unicode( + "disabled", + config=True, + help=_i18n( + """ + DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib. + """ + ), + ) + + @observe("pylab") + def _update_pylab(self, change: t.Any) -> None: + """when --pylab is specified, display a warning and exit""" + backend = " %s" % change["new"] if change["new"] != "warn" else "" + self.log.error( + _i18n("Support for specifying --pylab on the command line has been removed.") ) - else: - self.log.critical( - _i18n( - "ERROR: the Jupyter server could not be started because " - "port %i is not available." - ) - % port + self.log.error( + _i18n("Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.").format( + backend + ) ) - self.exit(1) - - @staticmethod - def _init_asyncio_patch() -> None: - """set default asyncio policy to be compatible with tornado - - Tornado 6.0 is not compatible with default asyncio - ProactorEventLoop, which lacks basic *_reader methods. - Tornado 6.1 adds a workaround to add these methods in a thread, - but SelectorEventLoop should still be preferred - to avoid the extra thread for ~all of our events, - at least until asyncio adds *_reader methods - to proactor. - """ - if sys.platform.startswith("win") and sys.version_info >= (3, 8): - import asyncio - - try: - from asyncio import WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy - except ImportError: - pass - # not affected - else: - if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: - # prefer Selector to Proactor for tornado + pyzmq - asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) - - @catch_config_error - def initialize( - self, - argv: t.Optional[list[str]] = None, - find_extensions: bool = True, - new_httpserver: bool = True, - starter_extension: t.Any = None, - ) -> None: - """Initialize the Server application class, configurables, web application, and http server. - - Parameters - ---------- - argv : list or None - CLI arguments to parse. - find_extensions : bool - If True, find and load extensions listed in Jupyter config paths. If False, - only load extensions that are passed to ServerApp directly through - the `argv`, `config`, or `jpserver_extensions` arguments. - new_httpserver : bool - If True, a tornado HTTPServer instance will be created and configured for the Server Web - Application. This will set the http_server attribute of this class. - starter_extension : str - If given, it references the name of an extension point that started the Server. - We will try to load configuration from extension point - """ - self._init_asyncio_patch() - # Parse command line, load ServerApp config files, - # and update ServerApp config. - super().initialize(argv=argv) - if self._dispatching: - return - # initialize io loop as early as possible, - # so configurables, extensions may reference the event loop - self.init_ioloop() - - # Then, use extensions' config loading mechanism to - # update config. ServerApp config takes precedence. - if find_extensions: - self.find_server_extensions() - self.init_logging() - self.init_event_logger() - self.init_server_extensions() - - # Special case the starter extension and load - # any server configuration is provides. - if starter_extension: - # Configure ServerApp based on named extension. - point = self.extension_manager.extension_points[starter_extension] - # Set starter_app property. - if point.app: - self._starter_app = point.app - # Load any configuration that comes from the Extension point. - self.update_config(Config(point.config)) - - # Initialize other pieces of the server. - self.init_resources() - self.init_configurables() - self.init_components() - self.init_webapp() - self.init_signal() - self.load_server_extensions() - self.init_mime_overrides() - self.init_shutdown_no_activity() - if new_httpserver: - self.init_httpserver() - - async def cleanup_kernels(self) -> None: - """Shutdown all kernels. - - The kernels will shutdown themselves when this process no longer exists, - but explicit shutdown allows the KernelManagers to cleanup the connection files. - """ - if not getattr(self, "kernel_manager", None): - return - n_kernels = len(self.kernel_manager.list_kernel_ids()) - kernel_msg = trans.ngettext( - "Shutting down %d kernel", "Shutting down %d kernels", n_kernels - ) - self.log.info(kernel_msg % n_kernels) - await ensure_async(self.kernel_manager.shutdown_all()) - - async def cleanup_extensions(self) -> None: - """Call shutdown hooks in all extensions.""" - if not getattr(self, "extension_manager", None): - return - n_extensions = len(self.extension_manager.extension_apps) - extension_msg = trans.ngettext( - "Shutting down %d extension", "Shutting down %d extensions", n_extensions - ) - self.log.info(extension_msg % n_extensions) - await ensure_async(self.extension_manager.stop_all_extensions()) - - def running_server_info(self, kernel_count: bool = True) -> str: - """Return the current working directory and the server url information""" - info = t.cast(str, self.contents_manager.info_string()) + "\n" - if kernel_count: - n_kernels = len(self.kernel_manager.list_kernel_ids()) - kernel_msg = trans.ngettext("%d active kernel", "%d active kernels", n_kernels) - info += kernel_msg % n_kernels - info += "\n" - # Format the info so that the URL fits on a single line in 80 char display - info += _i18n(f"Jupyter Server {ServerApp.version} is running at:\n{self.display_url}") - if self.gateway_config.gateway_enabled: - info += ( - _i18n("\nKernels will be managed by the Gateway server running at:\n%s") - % self.gateway_config.url - ) - return info - - def server_info(self) -> dict[str, t.Any]: - """Return a JSONable dict of information about this server.""" - return { - "url": self.connection_url, - "hostname": self.ip if self.ip else "localhost", - "port": self.port, - "sock": self.sock, - "secure": bool(self.certfile), - "base_url": self.base_url, - "token": self.identity_provider.token, - "root_dir": os.path.abspath(self.root_dir), - "password": bool(self.password), - "pid": os.getpid(), - "version": ServerApp.version, - } + self.exit(1) - def write_server_info_file(self) -> None: - """Write the result of server_info() to the JSON file info_file.""" - try: - with secure_write(self.info_file) as f: - json.dump(self.server_info(), f, indent=2, sort_keys=True) - except OSError as e: - self.log.error(_i18n("Failed to write server-info to %s: %r"), self.info_file, e) + notebook_dir = Unicode(config=True, help=_i18n("DEPRECATED, use root_dir.")) + + @observe("notebook_dir") + def _update_notebook_dir(self, change: t.Any) -> None: + if self._root_dir_set: + # only use deprecated config if new config is not set + return + self.log.warning(_i18n("notebook_dir is deprecated, use root_dir")) + self.root_dir = change["new"] + + external_connection_dir = Unicode( + None, + allow_none=True, + config=True, + help=_i18n( + "The directory to look at for external kernel connection files, if allow_external_kernels is True. " + "Defaults to Jupyter runtime_dir/external_kernels. " + "Make sure that this directory is not filled with left-over connection files, " + "that could result in unnecessary kernel manager creations." + ), + ) - def remove_server_info_file(self) -> None: - """Remove the jpserver-.json file created for this server. + allow_external_kernels = Bool( + False, + config=True, + help=_i18n( + "Whether or not to allow external kernels, whose connection files are placed in external_connection_dir." + ), + ) - Ignores the error raised when the file has already been removed. - """ - try: - os.unlink(self.info_file) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - def _resolve_file_to_run_and_root_dir(self) -> str: - """Returns a relative path from file_to_run - to root_dir. If root_dir and file_to_run - are incompatible, i.e. on different subtrees, - crash the app and log a critical message. Note - that if root_dir is not configured and file_to_run - is configured, root_dir will be set to the parent - directory of file_to_run. - """ - rootdir_abspath = pathlib.Path(self.root_dir).absolute() - file_rawpath = pathlib.Path(self.file_to_run) - combined_path = (rootdir_abspath / file_rawpath).absolute() - is_child = str(combined_path).startswith(str(rootdir_abspath)) + root_dir = Unicode(config=True, help=_i18n("The directory to use for notebooks and kernels.")) + _root_dir_set = False - if is_child: - if combined_path.parent != rootdir_abspath: - self.log.debug( - "The `root_dir` trait is set to a directory that's not " - "the immediate parent directory of `file_to_run`. Note that " - "the server will start at `root_dir` and open the " - "the file from the relative path to the `root_dir`." - ) - return str(combined_path.relative_to(rootdir_abspath)) + @default("root_dir") + def _default_root_dir(self) -> str: + if self.file_to_run: + self._root_dir_set = True + return os.path.dirname(os.path.abspath(self.file_to_run)) + else: + return os.getcwd() + + def _normalize_dir(self, value: str) -> str: + """Normalize a directory.""" + # Strip any trailing slashes + # *except* if it's root + _, path = os.path.splitdrive(value) + if path == os.sep: + return value + value = value.rstrip(os.sep) + if not os.path.isabs(value): + # If we receive a non-absolute path, make it absolute. + value = os.path.abspath(value) + return value + + @validate("root_dir") + def _root_dir_validate(self, proposal: t.Any) -> str: + value = self._normalize_dir(proposal["value"]) + if not os.path.isdir(value): + raise TraitError(trans.gettext("No such directory: '%r'") % value) + return value + + @observe("root_dir") + def _root_dir_changed(self, change: t.Any) -> None: + # record that root_dir is set, + # which affects loading of deprecated notebook_dir + self._root_dir_set = True + + preferred_dir = Unicode( + config=True, + help=trans.gettext("Preferred starting directory to use for notebooks and kernels."), + ) + + @default("preferred_dir") + def _default_prefered_dir(self) -> str: + return self.root_dir + + @validate("preferred_dir") + def _preferred_dir_validate(self, proposal: t.Any) -> str: + value = self._normalize_dir(proposal["value"]) + if not os.path.isdir(value): + raise TraitError(trans.gettext("No such preferred dir: '%r'") % value) + return value + + @observe("server_extensions") + def _update_server_extensions(self, change: t.Any) -> None: + self.log.warning(_i18n("server_extensions is deprecated, use jpserver_extensions")) + self.server_extensions = change["new"] + + jpserver_extensions = Dict( + default_value={}, + value_trait=Bool(), + config=True, + help=( + _i18n( + "Dict of Python modules to load as Jupyter server extensions." + "Entry values can be used to enable and disable the loading of" + "the extensions. The extensions will be loaded in alphabetical " + "order." + ) + ), + ) - self.log.critical( - "`root_dir` and `file_to_run` are incompatible. They " - "don't share the same subtrees. Make sure `file_to_run` " - "is on the same path as `root_dir`." + reraise_server_extension_failures = Bool( + False, + config=True, + help=_i18n("Reraise exceptions encountered loading server extensions?"), ) - self.exit(1) - return "" - def _write_browser_open_file(self, url: str, fh: t.Any) -> None: - """Write the browser open file.""" - if self.identity_provider.token: - url = url_concat(url, {"token": self.identity_provider.token}) - url = url_path_join(self.connection_url, url) + kernel_ws_protocol = Unicode( + allow_none=True, + config=True, + help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.kernel_ws_protocol"), + ) - jinja2_env = self.web_app.settings["jinja2_env"] - template = jinja2_env.get_template("browser-open.html") - fh.write(template.render(open_url=url, base_url=self.base_url)) + @observe("kernel_ws_protocol") + def _deprecated_kernel_ws_protocol(self, change: t.Any) -> None: + self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") - def write_browser_open_files(self) -> None: - """Write an `browser_open_file` and `browser_open_file_to_run` files + limit_rate = Bool( + allow_none=True, + config=True, + help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.limit_rate"), + ) - This can be used to open a file directly in a browser. - """ - # default_url contains base_url, but so does connection_url - self.write_browser_open_file() + @observe("limit_rate") + def _deprecated_limit_rate(self, change: t.Any) -> None: + self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") - # Create a second browser open file if - # file_to_run is set. - if self.file_to_run: - # Make sure file_to_run and root_dir are compatible. - file_to_run_relpath = self._resolve_file_to_run_and_root_dir() + iopub_msg_rate_limit = Float( + allow_none=True, + config=True, + help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.iopub_msg_rate_limit"), + ) - file_open_url = url_escape( - url_path_join(self.file_url_prefix, *file_to_run_relpath.split(os.sep)) - ) + @observe("iopub_msg_rate_limit") + def _deprecated_iopub_msg_rate_limit(self, change: t.Any) -> None: + self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") - with open(self.browser_open_file_to_run, "w", encoding="utf-8") as f: - self._write_browser_open_file(file_open_url, f) + iopub_data_rate_limit = Float( + allow_none=True, + config=True, + help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.iopub_data_rate_limit"), + ) - def write_browser_open_file(self) -> None: - """Write an jpserver--open.html file + @observe("iopub_data_rate_limit") + def _deprecated_iopub_data_rate_limit(self, change: t.Any) -> None: + self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") - This can be used to open the notebook in a browser - """ - # default_url contains base_url, but so does connection_url - open_url = self.default_url[len(self.base_url) :] + rate_limit_window = Float( + allow_none=True, + config=True, + help=_i18n("DEPRECATED. Use ZMQChannelsWebsocketConnection.rate_limit_window"), + ) - with open(self.browser_open_file, "w", encoding="utf-8") as f: - self._write_browser_open_file(open_url, f) + @observe("rate_limit_window") + def _deprecated_rate_limit_window(self, change: t.Any) -> None: + self._warn_deprecated_config(change, "ZMQChannelsWebsocketConnection") + + shutdown_no_activity_timeout = Integer( + 0, + config=True, + help=( + "Shut down the server after N seconds with no kernels" + "running and no activity. " + "This can be used together with culling idle kernels " + "(MappingKernelManager.cull_idle_timeout) to " + "shutdown the Jupyter server when it's not in use. This is not " + "precisely timed: it may shut down up to a minute later. " + "0 (the default) disables this automatic shutdown." + ), + ) - def remove_browser_open_files(self) -> None: - """Remove the `browser_open_file` and `browser_open_file_to_run` files - created for this server. + terminals_enabled = Bool( + config=True, + help=_i18n( + """Set to False to disable terminals. - Ignores the error raised when the file has already been removed. - """ - self.remove_browser_open_file() - try: - os.unlink(self.browser_open_file_to_run) - except OSError as e: - if e.errno != errno.ENOENT: - raise + This does *not* make the server more secure by itself. + Anything the user can in a terminal, they can also do in a notebook. - def remove_browser_open_file(self) -> None: - """Remove the jpserver--open.html file created for this server. + Terminals may also be automatically disabled if the terminado package + is not available. + """ + ), + ) - Ignores the error raised when the file has already been removed. - """ - try: - os.unlink(self.browser_open_file) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - def _prepare_browser_open(self) -> tuple[str, t.Optional[str]]: - """Prepare to open the browser.""" - if not self.use_redirect_file: - uri = self.default_url[len(self.base_url) :] - - if self.identity_provider.token: - uri = url_concat(uri, {"token": self.identity_provider.token}) - - if self.file_to_run: # noqa: SIM108 - # Create a separate, temporary open-browser-file - # pointing at a specific file. - open_file = self.browser_open_file_to_run - else: - # otherwise, just return the usual open browser file. - open_file = self.browser_open_file - - if self.use_redirect_file: - assembled_url = urljoin("file:", pathname2url(open_file)) - else: - assembled_url = url_path_join(self.connection_url, uri) - - return assembled_url, open_file - - def launch_browser(self) -> None: - """Launch the browser.""" - # Deferred import for environments that do not have - # the webbrowser module. - import webbrowser + @default("terminals_enabled") + def _default_terminals_enabled(self) -> bool: + return True - try: - browser = webbrowser.get(self.browser or None) - except webbrowser.Error as e: - self.log.warning(_i18n("No web browser found: %r.") % e) - browser = None + authenticate_prometheus = Bool( + True, + help="""" + Require authentication to access prometheus metrics. + """, + config=True, + ) + + static_immutable_cache = List( + Unicode(), + help=""" + Paths to set up static files as immutable. + + This allow setting up the cache control of static files as immutable. + It should be used for static file named with a hash for instance. + """, + config=True, + ) - if not browser: - return + _starter_app = Instance( + default_value=None, + allow_none=True, + klass="jupyter_server.extension.application.ExtensionApp", + ) - assembled_url, _ = self._prepare_browser_open() + @property + def starter_app(self) -> t.Any: + """Get the Extension that started this server.""" + return self._starter_app + + def parse_command_line(self, argv: t.Optional[list[str]] = None) -> None: + """Parse the command line options.""" + super().parse_command_line(argv) + + if self.extra_args: + arg0 = self.extra_args[0] + f = os.path.abspath(arg0) + self.argv.remove(arg0) + if not os.path.exists(f): + self.log.critical(_i18n("No such file or directory: %s"), f) + self.exit(1) + + # Use config here, to ensure that it takes higher priority than + # anything that comes from the config dirs. + c = Config() + if os.path.isdir(f): + c.ServerApp.root_dir = f + elif os.path.isfile(f): + c.ServerApp.file_to_run = f + self.update_config(c) + + def init_configurables(self) -> None: + """Initialize configurables.""" + # If gateway server is configured, replace appropriate managers to perform redirection. To make + # this determination, instantiate the GatewayClient config singleton. + self.gateway_config = GatewayClient.instance(parent=self) + + if not issubclass( + self.kernel_manager_class, + AsyncMappingKernelManager, + ): + warnings.warn( + "The synchronous MappingKernelManager class is deprecated and will not be supported in Jupyter Server 3.0", + DeprecationWarning, + stacklevel=2, + ) - def target(): - assert browser is not None - browser.open(assembled_url, new=self.webbrowser_open_new) + if not issubclass( + self.contents_manager_class, + AsyncContentsManager, + ): + warnings.warn( + "The synchronous ContentsManager classes are deprecated and will not be supported in Jupyter Server 3.0", + DeprecationWarning, + stacklevel=2, + ) - threading.Thread(target=target).start() + self.kernel_spec_manager = self.kernel_spec_manager_class( + parent=self, + ) - def start_app(self) -> None: - """Start the Jupyter Server application.""" - super().start() + kwargs = { + "parent": self, + "log": self.log, + "connection_dir": self.runtime_dir, + "kernel_spec_manager": self.kernel_spec_manager, + } + if jupyter_client.version_info > (8, 3, 0): # type:ignore[attr-defined] + if self.allow_external_kernels: + external_connection_dir = self.external_connection_dir + if external_connection_dir is None: + external_connection_dir = str(Path(self.runtime_dir) / "external_kernels") + kwargs["external_connection_dir"] = external_connection_dir + elif self.allow_external_kernels: + self.log.warning( + "Although allow_external_kernels=True, external kernels are not supported " + "because jupyter-client's version does not allow them (should be >8.3.0)." + ) - if not self.allow_root: - # check if we are running as root, and abort if it's not allowed - try: - uid = os.geteuid() - except AttributeError: - uid = -1 # anything nonzero here, since we can't check UID assume non-root - if uid == 0: - self.log.critical( - _i18n("Running as root is not recommended. Use --allow-root to bypass.") + self.kernel_manager = self.kernel_manager_class(**kwargs) + self.contents_manager = self.contents_manager_class( + parent=self, + log=self.log, ) - self.exit(1) + # Trigger a default/validation here explicitly while we still support the + # deprecated trait on ServerApp (FIXME remove when deprecation finalized) + self.contents_manager.preferred_dir # noqa: B018 + self.session_manager = self.session_manager_class( + parent=self, + log=self.log, + kernel_manager=self.kernel_manager, + contents_manager=self.contents_manager, + ) + self.config_manager = self.config_manager_class( + parent=self, + log=self.log, + ) + identity_provider_kwargs = {"parent": self, "log": self.log} - info = self.log.info - for line in self.running_server_info(kernel_count=False).split("\n"): - info(line) - info( - _i18n( - "Use Control-C to stop this server and shut down all kernels (twice to skip confirmation)." - ) - ) - if "dev" in __version__: - info( - _i18n( - "Welcome to Project Jupyter! Explore the various tools available" - " and their corresponding documentation. If you are interested" - " in contributing to the platform, please visit the community" - " resources section at https://jupyter.org/community.html." + if ( + self.login_handler_class is not LoginHandler + and self.identity_provider_class is PasswordIdentityProvider + ): + # default identity provider, non-default LoginHandler + # this indicates legacy custom LoginHandler config. + # enable LegacyIdentityProvider, which defers to the LoginHandler for pre-2.0 behavior. + self.identity_provider_class = LegacyIdentityProvider + self.log.warning( + f"Customizing authentication via ServerApp.login_handler_class={self.login_handler_class}" + " is deprecated in Jupyter Server 2.0." + " Use ServerApp.identity_provider_class." + " Falling back on legacy authentication.", + ) + identity_provider_kwargs["login_handler_class"] = self.login_handler_class + if self.logout_handler_class: + identity_provider_kwargs["logout_handler_class"] = self.logout_handler_class + elif self.login_handler_class is not LoginHandler: + # non-default login handler ignored because also explicitly set identity provider + self.log.warning( + f"Ignoring deprecated config ServerApp.login_handler_class={self.login_handler_class}." + " Superseded by ServerApp.identity_provider_class={self.identity_provider_class}." + ) + self.identity_provider = self.identity_provider_class(**identity_provider_kwargs) + + if self.identity_provider_class is LegacyIdentityProvider: + # legacy config stored the password in tornado_settings + self.tornado_settings["password"] = self.identity_provider.hashed_password # type:ignore[attr-defined] + self.tornado_settings["token"] = self.identity_provider.token + + if self._token_set: + self.log.warning( + "ServerApp.token config is deprecated in jupyter-server 2.0. Use IdentityProvider.token" + ) + if self.identity_provider.token_generated: + # default behavior: generated default token + # preserve deprecated ServerApp.token config + self.identity_provider.token_generated = False + self.identity_provider.token = self.token + else: + # identity_provider didn't generate a default token, + # that means it has some config that should take higher priority than deprecated ServerApp.token + self.log.warning("Ignoring deprecated ServerApp.token config") + + self.authorizer = self.authorizer_class( + parent=self, log=self.log, identity_provider=self.identity_provider + ) + + def init_logging(self) -> None: + """Initialize logging.""" + # This prevents double log messages because tornado use a root logger that + # self.log is a child of. The logging module dipatches log messages to a log + # and all of its ancenstors until propagate is set to False. + self.log.propagate = False + + for log in app_log, access_log, gen_log: + # consistent log output name (ServerApp instead of tornado.access, etc.) + log.name = self.log.name + # hook up tornado 3's loggers to our app handlers + logger = logging.getLogger("tornado") + logger.propagate = True + logger.parent = self.log + logger.setLevel(self.log.level) + + def init_event_logger(self) -> None: + """Initialize the Event Bus.""" + self.event_logger = EventLogger(parent=self) + # Load the core Jupyter Server event schemas + # All event schemas must start with Jupyter Server's + # events URI, `JUPYTER_SERVER_EVENTS_URI`. + schema_ids = [ + "https://events.jupyter.org/jupyter_server/contents_service/v1", + "https://events.jupyter.org/jupyter_server/gateway_client/v1", + "https://events.jupyter.org/jupyter_server/kernel_actions/v1", + ] + for schema_id in schema_ids: + # Get the schema path from the schema ID. + rel_schema_path = schema_id.replace(JUPYTER_SERVER_EVENTS_URI + "/", "") + ".yaml" + schema_path = DEFAULT_EVENTS_SCHEMA_PATH / rel_schema_path + # Use this pathlib object to register the schema + self.event_logger.register_event_schema(schema_path) + + def init_webapp(self) -> None: + """initialize tornado webapp""" + self.tornado_settings["allow_origin"] = self.allow_origin + self.tornado_settings["websocket_compression_options"] = self.websocket_compression_options + if self.allow_origin_pat: + self.tornado_settings["allow_origin_pat"] = re.compile(self.allow_origin_pat) + self.tornado_settings["allow_credentials"] = self.allow_credentials + self.tornado_settings["autoreload"] = self.autoreload + + # deprecate accessing these directly, in favor of identity_provider? + self.tornado_settings["cookie_options"] = self.identity_provider.cookie_options + self.tornado_settings[ + "get_secure_cookie_kwargs" + ] = self.identity_provider.get_secure_cookie_kwargs + self.tornado_settings["token"] = self.identity_provider.token + + if self.static_immutable_cache: + self.tornado_settings["static_immutable_cache"] = self.static_immutable_cache + + # ensure default_url starts with base_url + if not self.default_url.startswith(self.base_url): + self.default_url = url_path_join(self.base_url, self.default_url) + + # Socket options validation. + if self.sock: + if self.port != DEFAULT_JUPYTER_SERVER_PORT: + self.log.critical( + ("Options --port and --sock are mutually exclusive. Aborting."), + ) + sys.exit(1) + else: + # Reset the default port if we're using a UNIX socket. + self.port = 0 + + if self.open_browser: + # If we're bound to a UNIX socket, we can't reliably connect from a browser. + self.log.info( + ("Ignoring --ServerApp.open_browser due to --sock being used."), + ) + + if self.file_to_run: + self.log.critical( + ("Options --ServerApp.file_to_run and --sock are mutually exclusive."), + ) + sys.exit(1) + + if sys.platform.startswith("win"): + self.log.critical( + ( + "Option --sock is not supported on Windows, but got value of %s. Aborting." + % self.sock + ), + ) + sys.exit(1) + + self.web_app = ServerWebApplication( + self, + self.default_services, + self.kernel_manager, + self.contents_manager, + self.session_manager, + self.kernel_spec_manager, + self.config_manager, + self.event_logger, + self.extra_services, + self.log, + self.base_url, + self.default_url, + self.tornado_settings, + self.jinja_environment_options, + authorizer=self.authorizer, + identity_provider=self.identity_provider, + kernel_websocket_connection_class=self.kernel_websocket_connection_class, ) - ) + if self.certfile: + self.ssl_options["certfile"] = self.certfile + if self.keyfile: + self.ssl_options["keyfile"] = self.keyfile + if self.client_ca: + self.ssl_options["ca_certs"] = self.client_ca + if not self.ssl_options: + # could be an empty dict or None + # None indicates no SSL config + self.ssl_options = None # type:ignore[assignment] + else: + # SSL may be missing, so only import it if it's to be used + import ssl - self.write_server_info_file() + # PROTOCOL_TLS selects the highest ssl/tls protocol version that both the client and + # server support. When PROTOCOL_TLS is not available use PROTOCOL_SSLv23. + self.ssl_options.setdefault( + "ssl_version", getattr(ssl, "PROTOCOL_TLS", ssl.PROTOCOL_SSLv23) + ) + if self.ssl_options.get("ca_certs", False): + self.ssl_options.setdefault("cert_reqs", ssl.CERT_REQUIRED) - if not self.no_browser_open_file: - self.write_browser_open_files() + self.identity_provider.validate_security(self, ssl_options=self.ssl_options) - # Handle the browser opening. - if self.open_browser and not self.sock: - self.launch_browser() + if isinstance(self.identity_provider, LegacyIdentityProvider): + # LegacyIdentityProvider needs access to the tornado settings dict + self.identity_provider.settings = self.web_app.settings - if self.identity_provider.token and self.identity_provider.token_generated: - # log full URL with generated token, so there's a copy/pasteable link - # with auth info. - if self.sock: - self.log.critical( - "\n".join( - [ - "\n", - "Jupyter Server is listening on %s" % self.display_url, - "", - ( - "UNIX sockets are not browser-connectable, but you can tunnel to " - "the instance via e.g.`ssh -L 8888:{} -N user@this_host` and then " - "open e.g. {} in a browser." - ).format(self.sock, self.connection_url), - ] - ) + def init_resources(self) -> None: + """initialize system resources""" + if resource is None: + self.log.debug( # type:ignore[unreachable] + "Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)" + ) + return + + old_soft, old_hard = resource.getrlimit(resource.RLIMIT_NOFILE) + soft = self.min_open_files_limit + hard = old_hard + if soft is not None and old_soft < soft: + if hard < soft: + hard = soft + self.log.debug( + f"Raising open file limit: soft {old_soft}->{soft}; hard {old_hard}->{hard}" + ) + resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard)) + + def _get_urlparts( + self, path: t.Optional[str] = None, include_token: bool = False + ) -> urllib.parse.ParseResult: + """Constructs a urllib named tuple, ParseResult, + with default values set by server config. + The returned tuple can be manipulated using the `_replace` method. + """ + if self.sock: + scheme = "http+unix" + netloc = urlencode_unix_socket_path(self.sock) + else: + if not self.ip: + ip = "localhost" + # Handle nonexplicit hostname. + elif self.ip in ("0.0.0.0", "::"): + ip = "%s" % socket.gethostname() + else: + ip = f"[{self.ip}]" if ":" in self.ip else self.ip + netloc = f"{ip}:{self.port}" + scheme = "https" if self.certfile else "http" + if not path: + path = self.default_url + query = None + # Don't log full token if it came from config + if include_token and self.identity_provider.token: + token = ( + self.identity_provider.token if self.identity_provider.token_generated else "..." + ) + query = urllib.parse.urlencode({"token": token}) + # Build the URL Parts to dump. + urlparts = urllib.parse.ParseResult( + scheme=scheme, netloc=netloc, path=path, query=query or "", params="", fragment="" ) - else: - if self.no_browser_open_file: - message = [ - "\n", - _i18n("To access the server, copy and paste one of these URLs:"), - " %s" % self.display_url, - ] + return urlparts + + @property + def public_url(self) -> str: + parts = self._get_urlparts(include_token=True) + # Update with custom pieces. + if self.custom_display_url: + # Parse custom display_url + custom = urllib.parse.urlparse(self.custom_display_url)._asdict() + # Get pieces that are matter (non None) + custom_updates = {key: item for key, item in custom.items() if item} + # Update public URL parts with custom pieces. + parts = parts._replace(**custom_updates) + return parts.geturl() + + @property + def local_url(self) -> str: + parts = self._get_urlparts(include_token=True) + # Update with custom pieces. + if not self.sock: + parts = parts._replace(netloc=f"127.0.0.1:{self.port}") + return parts.geturl() + + @property + def display_url(self) -> str: + """Human readable string with URLs for interacting + with the running Jupyter Server + """ + url = self.public_url + "\n " + self.local_url + return url + + @property + def connection_url(self) -> str: + urlparts = self._get_urlparts(path=self.base_url) + return urlparts.geturl() + + def init_signal(self) -> None: + """Initialize signal handlers.""" + if ( + not sys.platform.startswith("win") + and sys.stdin # type:ignore[truthy-bool] + and sys.stdin.isatty() + ): + signal.signal(signal.SIGINT, self._handle_sigint) + signal.signal(signal.SIGTERM, self._signal_stop) + if hasattr(signal, "SIGUSR1"): + # Windows doesn't support SIGUSR1 + signal.signal(signal.SIGUSR1, self._signal_info) + if hasattr(signal, "SIGINFO"): + # only on BSD-based systems + signal.signal(signal.SIGINFO, self._signal_info) + + def _handle_sigint(self, sig: t.Any, frame: t.Any) -> None: + """SIGINT handler spawns confirmation dialog""" + # register more forceful signal handler for ^C^C case + signal.signal(signal.SIGINT, self._signal_stop) + # request confirmation dialog in bg thread, to avoid + # blocking the App + thread = threading.Thread(target=self._confirm_exit) + thread.daemon = True + thread.start() + + def _restore_sigint_handler(self) -> None: + """callback for restoring original SIGINT handler""" + signal.signal(signal.SIGINT, self._handle_sigint) + + def _confirm_exit(self) -> None: + """confirm shutdown on ^C + + A second ^C, or answering 'y' within 5s will cause shutdown, + otherwise original SIGINT handler will be restored. + + This doesn't work on Windows. + """ + info = self.log.info + info(_i18n("interrupted")) + # Check if answer_yes is set + if self.answer_yes: + self.log.critical(_i18n("Shutting down...")) + # schedule stop on the main thread, + # since this might be called from a signal handler + self.stop(from_signal=True) + return + info(self.running_server_info()) + yes = _i18n("y") + no = _i18n("n") + sys.stdout.write(_i18n("Shutdown this Jupyter server (%s/[%s])? ") % (yes, no)) + sys.stdout.flush() + r, w, x = select.select([sys.stdin], [], [], 5) + if r: + line = sys.stdin.readline() + if line.lower().startswith(yes) and no not in line.lower(): + self.log.critical(_i18n("Shutdown confirmed")) + # schedule stop on the main thread, + # since this might be called from a signal handler + self.stop(from_signal=True) + return else: - message = [ - "\n", - _i18n( - "To access the server, open this file in a browser:", - ), - " %s" % urljoin("file:", pathname2url(self.browser_open_file)), - _i18n( - "Or copy and paste one of these URLs:", - ), - " %s" % self.display_url, - ] + if self._stopping: + # don't show 'no answer' if we're actually stopping, + # e.g. ctrl-C ctrl-C + return + info(_i18n("No answer for 5s:")) + info(_i18n("resuming operation...")) + # no answer, or answer is no: + # set it back to original SIGINT handler + # use IOLoop.add_callback because signal.signal must be called + # from main thread + self.io_loop.add_callback_from_signal(self._restore_sigint_handler) + + def _signal_stop(self, sig: t.Any, frame: t.Any) -> None: + """Handle a stop signal.""" + self.log.critical(_i18n("received signal %s, stopping"), sig) + self.stop(from_signal=True) - self.log.critical("\n".join(message)) + def _signal_info(self, sig: t.Any, frame: t.Any) -> None: + """Handle an info signal.""" + self.log.info(self.running_server_info()) + + def init_components(self) -> None: + """Check the components submodule, and warn if it's unclean""" + # TODO: this should still check, but now we use bower, not git submodule + + def find_server_extensions(self) -> None: + """ + Searches Jupyter paths for jpserver_extensions. + """ + + # Walk through all config files looking for jpserver_extensions. + # + # Each extension will likely have a JSON config file enabling itself in + # the "jupyter_server_config.d" directory. Find each of these and + # merge there results in order of precedence. + # + # Load server extensions with ConfigManager. + # This enables merging on keys, which we want for extension enabling. + # Regular config loading only merges at the class level, + # so each level clobbers the previous. + manager = ExtensionConfigManager(read_config_path=self.config_file_paths) + extensions = manager.get_jpserver_extensions() + + for modulename, enabled in sorted(extensions.items()): + if modulename not in self.jpserver_extensions: + self.config.ServerApp.jpserver_extensions.update({modulename: enabled}) + self.jpserver_extensions.update({modulename: enabled}) + + def init_server_extensions(self) -> None: + """ + If an extension's metadata includes an 'app' key, + the value must be a subclass of ExtensionApp. An instance + of the class will be created at this step. The config for + this instance will inherit the ServerApp's config object + and load its own config. + """ + # Create an instance of the ExtensionManager. + self.extension_manager = ExtensionManager(log=self.log, serverapp=self) + self.extension_manager.from_jpserver_extensions(self.jpserver_extensions) + self.extension_manager.link_all_extensions() + + def load_server_extensions(self) -> None: + """Load any extensions specified by config. + + Import the module, then call the load_jupyter_server_extension function, + if one exists. + + The extension API is experimental, and may change in future releases. + """ + self.extension_manager.load_all_extensions() + + def init_mime_overrides(self) -> None: + # On some Windows machines, an application has registered incorrect + # mimetypes in the registry. + # Tornado uses this when serving .css and .js files, causing browsers to + # reject these files. We know the mimetype always needs to be text/css for css + # and application/javascript for JS, so we override it here + # and explicitly tell the mimetypes to not trust the Windows registry + if os.name == "nt": + # do not trust windows registry, which regularly has bad info + mimetypes.init(files=[]) + # ensure css, js are correct, which are required for pages to function + mimetypes.add_type("text/css", ".css") + mimetypes.add_type("application/javascript", ".js") + # for python <3.8 + mimetypes.add_type("application/wasm", ".wasm") + + def shutdown_no_activity(self) -> None: + """Shutdown server on timeout when there are no kernels or terminals.""" + km = self.kernel_manager + if len(km) != 0: + return # Kernels still running + + if self.extension_manager.any_activity(): + return + + seconds_since_active = (utcnow() - self.web_app.last_activity()).total_seconds() + self.log.debug("No activity for %d seconds.", seconds_since_active) + if seconds_since_active > self.shutdown_no_activity_timeout: + self.log.info( + "No kernels for %d seconds; shutting down.", + seconds_since_active, + ) + self.stop() - async def _cleanup(self) -> None: - """General cleanup of files, extensions and kernels created - by this instance ServerApp. - """ - self.remove_server_info_file() - self.remove_browser_open_files() - await self.cleanup_extensions() - await self.cleanup_kernels() - try: - await self.kernel_websocket_connection_class.close_all() # type:ignore[attr-defined] - except AttributeError: - # This can happen in two different scenarios: - # - # 1. During tests, where the _cleanup method is invoked without - # the corresponding initialize method having been invoked. - # 2. If the provided `kernel_websocket_connection_class` does not - # implement the `close_all` class method. - # - # In either case, we don't need to do anything and just want to treat - # the raised error as a no-op. - pass - if getattr(self, "kernel_manager", None): - self.kernel_manager.__del__() - if getattr(self, "session_manager", None): - self.session_manager.close() - if hasattr(self, "http_server"): - # Stop a server if its set. - self.http_server.stop() - - def start_ioloop(self) -> None: - """Start the IO Loop.""" - if sys.platform.startswith("win"): - # add no-op to wake every 5s - # to handle signals that may be ignored by the inner loop - pc = ioloop.PeriodicCallback(lambda: None, 5000) - pc.start() - try: - self.io_loop.start() - except KeyboardInterrupt: - self.log.info(_i18n("Interrupted...")) - - def init_ioloop(self) -> None: - """init self.io_loop so that an extension can use it by io_loop.call_later() to create background tasks""" - self.io_loop = ioloop.IOLoop.current() - - def start(self) -> None: - """Start the Jupyter server app, after initialization - - This method takes no arguments so all configuration and initialization - must be done prior to calling this method.""" - self.start_app() - self.start_ioloop() - - async def _stop(self) -> None: - """Cleanup resources and stop the IO Loop.""" - await self._cleanup() - if getattr(self, "io_loop", None): - self.io_loop.stop() - - def stop(self, from_signal: bool = False) -> None: - """Cleanup resources and stop the server.""" - # signal that stopping has begun - self._stopping = True - if hasattr(self, "http_server"): - # Stop a server if its set. - self.http_server.stop() - if getattr(self, "io_loop", None): - # use IOLoop.add_callback because signal.signal must be called - # from main thread - if from_signal: - self.io_loop.add_callback_from_signal(self._stop) - else: - self.io_loop.add_callback(self._stop) + def init_shutdown_no_activity(self) -> None: + """Initialize a shutdown on no activity.""" + if self.shutdown_no_activity_timeout > 0: + self.log.info( + "Will shut down after %d seconds with no kernels.", + self.shutdown_no_activity_timeout, + ) + pc = ioloop.PeriodicCallback(self.shutdown_no_activity, 60000) + pc.start() + @property + def http_server(self) -> httpserver.HTTPServer: + """An instance of Tornado's HTTPServer class for the Server Web Application.""" + try: + return self._http_server + except AttributeError: + msg = ( + "An HTTPServer instance has not been created for the " + "Server Web Application. To create an HTTPServer for this " + "application, call `.init_httpserver()`." + ) + raise AttributeError(msg) from None + + def init_httpserver(self) -> None: + """Creates an instance of a Tornado HTTPServer for the Server Web Application + and sets the http_server attribute. + """ + # Check that a web_app has been initialized before starting a server. + if not hasattr(self, "web_app"): + msg = ( + "A tornado web application has not be initialized. " + "Try calling `.init_webapp()` first." + ) + raise AttributeError(msg) + + # Create an instance of the server. + self._http_server = httpserver.HTTPServer( + self.web_app, + ssl_options=self.ssl_options, + xheaders=self.trust_xheaders, + max_body_size=self.max_body_size, + max_buffer_size=self.max_buffer_size, + ) -def list_running_servers( - runtime_dir: t.Optional[str] = None, log: t.Optional[logging.Logger] = None -) -> t.Generator[t.Any, None, None]: - """Iterate over the server info files of running Jupyter servers. - - Given a runtime directory, find jpserver-* files in the security directory, - and yield dicts of their information, each one pertaining to - a currently running Jupyter server instance. - """ - if runtime_dir is None: - runtime_dir = jupyter_runtime_dir() - - # The runtime dir might not exist - if not os.path.isdir(runtime_dir): - return - - for file_name in os.listdir(runtime_dir): - if re.match("jpserver-(.+).json", file_name): - with open(os.path.join(runtime_dir, file_name), encoding="utf-8") as f: - # Handle race condition where file is being written. + # binding sockets must be called from inside an event loop + if not self.sock: + self._find_http_port() + self.io_loop.add_callback(self._bind_http_server) + + def _bind_http_server(self) -> None: + """Bind our http server.""" + success = self._bind_http_server_unix() if self.sock else self._bind_http_server_tcp() + if not success: + self.log.critical( + _i18n( + "ERROR: the Jupyter server could not be started because " + "no available port could be found." + ) + ) + self.exit(1) + + def _bind_http_server_unix(self) -> bool: + """Bind an http server on unix.""" + if unix_socket_in_use(self.sock): + self.log.warning(_i18n("The socket %s is already in use.") % self.sock) + return False + + try: + sock = bind_unix_socket(self.sock, mode=int(self.sock_mode.encode(), 8)) + self.http_server.add_socket(sock) + except OSError as e: + if e.errno == errno.EADDRINUSE: + self.log.warning(_i18n("The socket %s is already in use.") % self.sock) + return False + elif e.errno in (errno.EACCES, getattr(errno, "WSAEACCES", errno.EACCES)): + self.log.warning(_i18n("Permission to listen on sock %s denied") % self.sock) + return False + else: + raise + else: + return True + + def _bind_http_server_tcp(self) -> bool: + """Bind a tcp server.""" + self.http_server.listen(self.port, self.ip) + return True + + def _find_http_port(self) -> None: + """Find an available http port.""" + success = False + port = self.port + for port in random_ports(self.port, self.port_retries + 1): + try: + sockets = bind_sockets(port, self.ip) + sockets[0].close() + except OSError as e: + if e.errno == errno.EADDRINUSE: + if self.port_retries: + self.log.info( + _i18n("The port %i is already in use, trying another port.") % port + ) + else: + self.log.info(_i18n("The port %i is already in use.") % port) + continue + if e.errno in ( + errno.EACCES, + getattr(errno, "WSAEACCES", errno.EACCES), + ): + self.log.warning(_i18n("Permission to listen on port %i denied.") % port) + continue + raise + else: + success = True + self.port = port + break + if not success: + if self.port_retries: + self.log.critical( + _i18n( + "ERROR: the Jupyter server could not be started because " + "no available port could be found." + ) + ) + else: + self.log.critical( + _i18n( + "ERROR: the Jupyter server could not be started because " + "port %i is not available." + ) + % port + ) + self.exit(1) + + @staticmethod + def _init_asyncio_patch() -> None: + """set default asyncio policy to be compatible with tornado + + Tornado 6.0 is not compatible with default asyncio + ProactorEventLoop, which lacks basic *_reader methods. + Tornado 6.1 adds a workaround to add these methods in a thread, + but SelectorEventLoop should still be preferred + to avoid the extra thread for ~all of our events, + at least until asyncio adds *_reader methods + to proactor. + """ + if sys.platform.startswith("win") and sys.version_info >= (3, 8): + import asyncio + + try: + from asyncio import WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy + except ImportError: + pass + # not affected + else: + if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: + # prefer Selector to Proactor for tornado + pyzmq + asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) + + @catch_config_error + def initialize( + self, + argv: t.Optional[list[str]] = None, + find_extensions: bool = True, + new_httpserver: bool = True, + starter_extension: t.Any = None, + ) -> None: + """Initialize the Server application class, configurables, web application, and http server. + + Parameters + ---------- + argv : list or None + CLI arguments to parse. + find_extensions : bool + If True, find and load extensions listed in Jupyter config paths. If False, + only load extensions that are passed to ServerApp directly through + the `argv`, `config`, or `jpserver_extensions` arguments. + new_httpserver : bool + If True, a tornado HTTPServer instance will be created and configured for the Server Web + Application. This will set the http_server attribute of this class. + starter_extension : str + If given, it references the name of an extension point that started the Server. + We will try to load configuration from extension point + """ + self._init_asyncio_patch() + # Parse command line, load ServerApp config files, + # and update ServerApp config. + super().initialize(argv=argv) + if self._dispatching: + return + # initialize io loop as early as possible, + # so configurables, extensions may reference the event loop + self.init_ioloop() + + # Then, use extensions' config loading mechanism to + # update config. ServerApp config takes precedence. + if find_extensions: + self.find_server_extensions() + self.init_logging() + self.init_event_logger() + self.init_server_extensions() + + # Special case the starter extension and load + # any server configuration is provides. + if starter_extension: + # Configure ServerApp based on named extension. + point = self.extension_manager.extension_points[starter_extension] + # Set starter_app property. + if point.app: + self._starter_app = point.app + # Load any configuration that comes from the Extension point. + self.update_config(Config(point.config)) + + # Initialize other pieces of the server. + self.init_resources() + self.init_configurables() + self.init_components() + self.init_webapp() + self.init_signal() + self.load_server_extensions() + self.init_mime_overrides() + self.init_shutdown_no_activity() + if new_httpserver: + self.init_httpserver() + + async def cleanup_kernels(self) -> None: + """Shutdown all kernels. + + The kernels will shutdown themselves when this process no longer exists, + but explicit shutdown allows the KernelManagers to cleanup the connection files. + """ + if not getattr(self, "kernel_manager", None): + return + n_kernels = len(self.kernel_manager.list_kernel_ids()) + kernel_msg = trans.ngettext( + "Shutting down %d kernel", "Shutting down %d kernels", n_kernels + ) + self.log.info(kernel_msg % n_kernels) + await ensure_async(self.kernel_manager.shutdown_all()) + + async def cleanup_extensions(self) -> None: + """Call shutdown hooks in all extensions.""" + if not getattr(self, "extension_manager", None): + return + n_extensions = len(self.extension_manager.extension_apps) + extension_msg = trans.ngettext( + "Shutting down %d extension", "Shutting down %d extensions", n_extensions + ) + self.log.info(extension_msg % n_extensions) + await ensure_async(self.extension_manager.stop_all_extensions()) + + def running_server_info(self, kernel_count: bool = True) -> str: + """Return the current working directory and the server url information""" + info = t.cast(str, self.contents_manager.info_string()) + "\n" + if kernel_count: + n_kernels = len(self.kernel_manager.list_kernel_ids()) + kernel_msg = trans.ngettext("%d active kernel", "%d active kernels", n_kernels) + info += kernel_msg % n_kernels + info += "\n" + # Format the info so that the URL fits on a single line in 80 char display + info += _i18n(f"Jupyter Server {ServerApp.version} is running at:\n{self.display_url}") + if self.gateway_config.gateway_enabled: + info += ( + _i18n("\nKernels will be managed by the Gateway server running at:\n%s") + % self.gateway_config.url + ) + return info + + def server_info(self) -> dict[str, t.Any]: + """Return a JSONable dict of information about this server.""" + return { + "url": self.connection_url, + "hostname": self.ip if self.ip else "localhost", + "port": self.port, + "sock": self.sock, + "secure": bool(self.certfile), + "base_url": self.base_url, + "token": self.identity_provider.token, + "root_dir": os.path.abspath(self.root_dir), + "password": bool(self.password), + "pid": os.getpid(), + "version": ServerApp.version, + } + + def write_server_info_file(self) -> None: + """Write the result of server_info() to the JSON file info_file.""" try: - info = json.load(f) - except json.JSONDecodeError: - continue - - # Simple check whether that process is really still running - # Also remove leftover files from IPython 2.x without a pid field - if ("pid" in info) and check_pid(info["pid"]): - yield info - else: - # If the process has died, try to delete its info file + with secure_write(self.info_file) as f: + json.dump(self.server_info(), f, indent=2, sort_keys=True) + except OSError as e: + self.log.error(_i18n("Failed to write server-info to %s: %r"), self.info_file, e) + + def remove_server_info_file(self) -> None: + """Remove the jpserver-.json file created for this server. + + Ignores the error raised when the file has already been removed. + """ try: - os.unlink(os.path.join(runtime_dir, file_name)) + os.unlink(self.info_file) except OSError as e: - if log: - log.warning(_i18n("Deleting server info file failed: %s.") % e) + if e.errno != errno.ENOENT: + raise + + def _resolve_file_to_run_and_root_dir(self) -> str: + """Returns a relative path from file_to_run + to root_dir. If root_dir and file_to_run + are incompatible, i.e. on different subtrees, + crash the app and log a critical message. Note + that if root_dir is not configured and file_to_run + is configured, root_dir will be set to the parent + directory of file_to_run. + """ + rootdir_abspath = pathlib.Path(self.root_dir).absolute() + file_rawpath = pathlib.Path(self.file_to_run) + combined_path = (rootdir_abspath / file_rawpath).absolute() + is_child = str(combined_path).startswith(str(rootdir_abspath)) + + if is_child: + if combined_path.parent != rootdir_abspath: + self.log.debug( + "The `root_dir` trait is set to a directory that's not " + "the immediate parent directory of `file_to_run`. Note that " + "the server will start at `root_dir` and open the " + "the file from the relative path to the `root_dir`." + ) + return str(combined_path.relative_to(rootdir_abspath)) + + self.log.critical( + "`root_dir` and `file_to_run` are incompatible. They " + "don't share the same subtrees. Make sure `file_to_run` " + "is on the same path as `root_dir`." + ) + self.exit(1) + return "" + + def _write_browser_open_file(self, url: str, fh: t.Any) -> None: + """Write the browser open file.""" + if self.identity_provider.token: + url = url_concat(url, {"token": self.identity_provider.token}) + url = url_path_join(self.connection_url, url) + + jinja2_env = self.web_app.settings["jinja2_env"] + template = jinja2_env.get_template("browser-open.html") + fh.write(template.render(open_url=url, base_url=self.base_url)) + + def write_browser_open_files(self) -> None: + """Write an `browser_open_file` and `browser_open_file_to_run` files + + This can be used to open a file directly in a browser. + """ + # default_url contains base_url, but so does connection_url + self.write_browser_open_file() + + # Create a second browser open file if + # file_to_run is set. + if self.file_to_run: + # Make sure file_to_run and root_dir are compatible. + file_to_run_relpath = self._resolve_file_to_run_and_root_dir() + + file_open_url = url_escape( + url_path_join(self.file_url_prefix, *file_to_run_relpath.split(os.sep)) + ) + + with open(self.browser_open_file_to_run, "w", encoding="utf-8") as f: + self._write_browser_open_file(file_open_url, f) + + def write_browser_open_file(self) -> None: + """Write an jpserver--open.html file + + This can be used to open the notebook in a browser + """ + # default_url contains base_url, but so does connection_url + open_url = self.default_url[len(self.base_url) :] + + with open(self.browser_open_file, "w", encoding="utf-8") as f: + self._write_browser_open_file(open_url, f) + + def remove_browser_open_files(self) -> None: + """Remove the `browser_open_file` and `browser_open_file_to_run` files + created for this server. + + Ignores the error raised when the file has already been removed. + """ + self.remove_browser_open_file() + try: + os.unlink(self.browser_open_file_to_run) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + def remove_browser_open_file(self) -> None: + """Remove the jpserver--open.html file created for this server. + + Ignores the error raised when the file has already been removed. + """ + try: + os.unlink(self.browser_open_file) + except OSError as e: + if e.errno != errno.ENOENT: + raise + + def _prepare_browser_open(self) -> tuple[str, t.Optional[str]]: + """Prepare to open the browser.""" + if not self.use_redirect_file: + uri = self.default_url[len(self.base_url) :] + + if self.identity_provider.token: + uri = url_concat(uri, {"token": self.identity_provider.token}) + + if self.file_to_run: # noqa: SIM108 + # Create a separate, temporary open-browser-file + # pointing at a specific file. + open_file = self.browser_open_file_to_run + else: + # otherwise, just return the usual open browser file. + open_file = self.browser_open_file + + if self.use_redirect_file: + assembled_url = urljoin("file:", pathname2url(open_file)) + else: + assembled_url = url_path_join(self.connection_url, uri) + + return assembled_url, open_file + + def launch_browser(self) -> None: + """Launch the browser.""" + # Deferred import for environments that do not have + # the webbrowser module. + import webbrowser + + try: + browser = webbrowser.get(self.browser or None) + except webbrowser.Error as e: + self.log.warning(_i18n("No web browser found: %r.") % e) + browser = None + + if not browser: + return + + assembled_url, _ = self._prepare_browser_open() + + def target(): + assert browser is not None + browser.open(assembled_url, new=self.webbrowser_open_new) + + threading.Thread(target=target).start() + + def start_app(self) -> None: + """Start the Jupyter Server application.""" + super().start() + + if not self.allow_root: + # check if we are running as root, and abort if it's not allowed + try: + uid = os.geteuid() + except AttributeError: + uid = -1 # anything nonzero here, since we can't check UID assume non-root + if uid == 0: + self.log.critical( + _i18n("Running as root is not recommended. Use --allow-root to bypass.") + ) + self.exit(1) + + info = self.log.info + for line in self.running_server_info(kernel_count=False).split("\n"): + info(line) + info( + _i18n( + "Use Control-C to stop this server and shut down all kernels (twice to skip confirmation)." + ) + ) + if "dev" in __version__: + info( + _i18n( + "Welcome to Project Jupyter! Explore the various tools available" + " and their corresponding documentation. If you are interested" + " in contributing to the platform, please visit the community" + " resources section at https://jupyter.org/community.html." + ) + ) + + self.write_server_info_file() + + if not self.no_browser_open_file: + self.write_browser_open_files() + + # Handle the browser opening. + if self.open_browser and not self.sock: + self.launch_browser() + + if self.identity_provider.token and self.identity_provider.token_generated: + # log full URL with generated token, so there's a copy/pasteable link + # with auth info. + if self.sock: + self.log.critical( + "\n".join( + [ + "\n", + "Jupyter Server is listening on %s" % self.display_url, + "", + ( + "UNIX sockets are not browser-connectable, but you can tunnel to " + "the instance via e.g.`ssh -L 8888:{} -N user@this_host` and then " + "open e.g. {} in a browser." + ).format(self.sock, self.connection_url), + ] + ) + ) + else: + if self.no_browser_open_file: + message = [ + "\n", + _i18n("To access the server, copy and paste one of these URLs:"), + " %s" % self.display_url, + ] + else: + message = [ + "\n", + _i18n( + "To access the server, open this file in a browser:", + ), + " %s" % urljoin("file:", pathname2url(self.browser_open_file)), + _i18n( + "Or copy and paste one of these URLs:", + ), + " %s" % self.display_url, + ] + + self.log.critical("\n".join(message)) + + async def _cleanup(self) -> None: + """General cleanup of files, extensions and kernels created + by this instance ServerApp. + """ + self.remove_server_info_file() + self.remove_browser_open_files() + await self.cleanup_extensions() + await self.cleanup_kernels() + try: + await self.kernel_websocket_connection_class.close_all() # type:ignore[attr-defined] + except AttributeError: + # This can happen in two different scenarios: + # + # 1. During tests, where the _cleanup method is invoked without + # the corresponding initialize method having been invoked. + # 2. If the provided `kernel_websocket_connection_class` does not + # implement the `close_all` class method. + # + # In either case, we don't need to do anything and just want to treat + # the raised error as a no-op. + pass + if getattr(self, "kernel_manager", None): + self.kernel_manager.__del__() + if getattr(self, "session_manager", None): + self.session_manager.close() + if hasattr(self, "http_server"): + # Stop a server if its set. + self.http_server.stop() + + def start_ioloop(self) -> None: + """Start the IO Loop.""" + if sys.platform.startswith("win"): + # add no-op to wake every 5s + # to handle signals that may be ignored by the inner loop + pc = ioloop.PeriodicCallback(lambda: None, 5000) + pc.start() + try: + self.io_loop.start() + except KeyboardInterrupt: + self.log.info(_i18n("Interrupted...")) + + def init_ioloop(self) -> None: + """init self.io_loop so that an extension can use it by io_loop.call_later() to create background tasks""" + self.io_loop = ioloop.IOLoop.current() + + def start(self) -> None: + """Start the Jupyter server app, after initialization + + This method takes no arguments so all configuration and initialization + must be done prior to calling this method.""" + self.start_app() + self.start_ioloop() + + async def _stop(self) -> None: + """Cleanup resources and stop the IO Loop.""" + await self._cleanup() + if getattr(self, "io_loop", None): + self.io_loop.stop() + + def stop(self, from_signal: bool = False) -> None: + """Cleanup resources and stop the server.""" + # signal that stopping has begun + self._stopping = True + if hasattr(self, "http_server"): + # Stop a server if its set. + self.http_server.stop() + if getattr(self, "io_loop", None): + # use IOLoop.add_callback because signal.signal must be called + # from main thread + if from_signal: + self.io_loop.add_callback_from_signal(self._stop) + else: + self.io_loop.add_callback(self._stop) + + +def list_running_servers( + runtime_dir: t.Optional[str] = None, log: t.Optional[logging.Logger] = None +) -> t.Generator[t.Any, None, None]: + """Iterate over the server info files of running Jupyter servers. + + Given a runtime directory, find jpserver-* files in the security directory, + and yield dicts of their information, each one pertaining to + a currently running Jupyter server instance. + """ + if runtime_dir is None: + runtime_dir = jupyter_runtime_dir() + + # The runtime dir might not exist + if not os.path.isdir(runtime_dir): + return + + for file_name in os.listdir(runtime_dir): + if re.match("jpserver-(.+).json", file_name): + with open(os.path.join(runtime_dir, file_name), encoding="utf-8") as f: + # Handle race condition where file is being written. + try: + info = json.load(f) + except json.JSONDecodeError: + continue + + # Simple check whether that process is really still running + # Also remove leftover files from IPython 2.x without a pid field + if ("pid" in info) and check_pid(info["pid"]): + yield info + else: + # If the process has died, try to delete its info file + try: + os.unlink(os.path.join(runtime_dir, file_name)) + except OSError as e: + if log: + log.warning(_i18n("Deleting server info file failed: %s.") % e) # ----------------------------------------------------------------------------- diff --git a/jupyter_server/services/contents/filemanager.py b/jupyter_server/services/contents/filemanager.py index c56a1acc70..09cbeec13a 100644 --- a/jupyter_server/services/contents/filemanager.py +++ b/jupyter_server/services/contents/filemanager.py @@ -1025,7 +1025,7 @@ async def is_non_empty_dir(os_path): try: send2trash(os_path) except OSError as e: - raise web.HTTPError(400, "send2trash f`1ailed: %s" % e) from e + raise web.HTTPError(400, "send2trash failed: %s" % e) from e return if os.path.isdir(os_path): diff --git a/jupyter_server/services/kernels/websocket.py b/jupyter_server/services/kernels/websocket.py index 7417441b5f..4c2c1c8914 100644 --- a/jupyter_server/services/kernels/websocket.py +++ b/jupyter_server/services/kernels/websocket.py @@ -13,85 +13,85 @@ class KernelWebsocketHandler(WebSocketMixin, WebSocketHandler, JupyterHandler): # type:ignore[misc] - """The kernels websocket should connect""" - - auth_resource = AUTH_RESOURCE - - @property - def kernel_websocket_connection_class(self): - """The kernel websocket connection class.""" - return self.settings.get("kernel_websocket_connection_class") - - def set_default_headers(self): - """Undo the set_default_headers in JupyterHandler - - which doesn't make sense for websockets - """ - - def get_compression_options(self): - """Get the socket connection options.""" - return self.settings.get("websocket_compression_options", None) - - async def pre_get(self): - """Handle a pre_get.""" - # authenticate first - user = self.current_user - if user is None: - self.log.warning("Couldn't authenticate WebSocket connection") - raise web.HTTPError(403) - - # authorize the user. - authorized = await ensure_async( - self.authorizer.is_authorized(self, user, "execute", "kernels") - ) - if not authorized: - raise web.HTTPError(403) - - kernel = self.kernel_manager.get_kernel(self.kernel_id) - self.connection = self.kernel_websocket_connection_class( - parent=kernel, websocket_handler=self, config=self.config - ) - - if self.get_argument("session_id", None): - self.connection.session.session = self.get_argument("session_id") - else: - self.log.warning("No session ID specified") - # For backwards compatibility with older versions - # of the websocket connection, call a prepare method if found. - if hasattr(self.connection, "prepare"): - await self.connection.prepare() - - async def get(self, kernel_id): - """Handle a get request for a kernel.""" - self.kernel_id = kernel_id - await self.pre_get() - await super().get(kernel_id=kernel_id) - - async def open(self, kernel_id): - """Open a kernel websocket.""" - # Need to call super here to make sure we - # begin a ping-pong loop with the client. - super().open() - # Wait for the kernel to emit an idle status. - self.log.info(f"Connecting to kernel {self.kernel_id}.") - await self.connection.connect() - - def on_message(self, ws_message): - """Get a kernel message from the websocket and turn it into a ZMQ message.""" - self.connection.handle_incoming_message(ws_message) - - def on_close(self): - """Handle a socket closure.""" - self.connection.disconnect() - self.connection = None - - def select_subprotocol(self, subprotocols): - """Select the sub protocol for the socket.""" - preferred_protocol = self.connection.kernel_ws_protocol - if preferred_protocol is None: - preferred_protocol = "v1.kernel.websocket.jupyter.org" - elif preferred_protocol == "": - preferred_protocol = None - selected_subprotocol = preferred_protocol if preferred_protocol in subprotocols else None - # None is the default, "legacy" protocol - return selected_subprotocol + """The kernels websocket should connect""" + + auth_resource = AUTH_RESOURCE + + @property + def kernel_websocket_connection_class(self): + """The kernel websocket connection class.""" + return self.settings.get("kernel_websocket_connection_class") + + def set_default_headers(self): + """Undo the set_default_headers in JupyterHandler + + which doesn't make sense for websockets + """ + + def get_compression_options(self): + """Get the socket connection options.""" + return self.settings.get("websocket_compression_options", None) + + async def pre_get(self): + """Handle a pre_get.""" + # authenticate first + user = self.current_user + if user is None: + self.log.warning("Couldn't authenticate WebSocket connection") + raise web.HTTPError(403) + + # authorize the user. + authorized = await ensure_async( + self.authorizer.is_authorized(self, user, "execute", "kernels") + ) + if not authorized: + raise web.HTTPError(403) + + kernel = self.kernel_manager.get_kernel(self.kernel_id) + self.connection = self.kernel_websocket_connection_class( + parent=kernel, websocket_handler=self, config=self.config + ) + + if self.get_argument("session_id", None): + self.connection.session.session = self.get_argument("session_id") + else: + self.log.warning("No session ID specified") + # For backwards compatibility with older versions + # of the websocket connection, call a prepare method if found. + if hasattr(self.connection, "prepare"): + await self.connection.prepare() + + async def get(self, kernel_id): + """Handle a get request for a kernel.""" + self.kernel_id = kernel_id + await self.pre_get() + await super().get(kernel_id=kernel_id) + + async def open(self, kernel_id): + """Open a kernel websocket.""" + # Need to call super here to make sure we + # begin a ping-pong loop with the client. + super().open() + # Wait for the kernel to emit an idle status. + self.log.info(f"Connecting to kernel {self.kernel_id}.") + await self.connection.connect() + + def on_message(self, ws_message): + """Get a kernel message from the websocket and turn it into a ZMQ message.""" + self.connection.handle_incoming_message(ws_message) + + def on_close(self): + """Handle a socket closure.""" + self.connection.disconnect() + self.connection = None + + def select_subprotocol(self, subprotocols): + """Select the sub protocol for the socket.""" + preferred_protocol = self.connection.kernel_ws_protocol + if preferred_protocol is None: + preferred_protocol = "v1.kernel.websocket.jupyter.org" + elif preferred_protocol == "": + preferred_protocol = None + selected_subprotocol = preferred_protocol if preferred_protocol in subprotocols else None + # None is the default, "legacy" protocol + return selected_subprotocol diff --git a/jupyter_server/services/sessions/sessionmanager.py b/jupyter_server/services/sessions/sessionmanager.py index 08d6a583e6..a40f5bb113 100644 --- a/jupyter_server/services/sessions/sessionmanager.py +++ b/jupyter_server/services/sessions/sessionmanager.py @@ -12,10 +12,10 @@ ModelName = NewType("ModelName", str) try: - import sqlite3 + import sqlite3 except ImportError: - # fallback on pysqlite2 if Python was build without sqlite - from pysqlite2 import dbapi2 as sqlite3 # type:ignore[no-redef] + # fallback on pysqlite2 if Python was build without sqlite + from pysqlite2 import dbapi2 as sqlite3 # type:ignore[no-redef] import asyncio from dataclasses import dataclass, fields @@ -29,542 +29,542 @@ class KernelSessionRecordConflict(Exception): - """Exception class to use when two KernelSessionRecords cannot - merge because of conflicting data. - """ + """Exception class to use when two KernelSessionRecords cannot + merge because of conflicting data. + """ @dataclass class KernelSessionRecord: - """A record object for tracking a Jupyter Server Kernel Session. - - Two records that share a session_id must also share a kernel_id, while - kernels can have multiple session (and thereby) session_ids - associated with them. - """ - - session_id: Optional[str] = None - kernel_id: Optional[str] = None - - def __eq__(self, other: object) -> bool: - """Whether a record equals another.""" - if isinstance(other, KernelSessionRecord): - condition1 = self.kernel_id and self.kernel_id == other.kernel_id - condition2 = all( - [ - self.session_id == other.session_id, - self.kernel_id is None or other.kernel_id is None, - ] - ) - if any([condition1, condition2]): - return True - # If two records share session_id but have different kernels, this is - # and ill-posed expression. This should never be true. Raise an exception - # to inform the user. - if all( - [ - self.session_id, - self.session_id == other.session_id, - self.kernel_id != other.kernel_id, - ] - ): - msg = ( - "A single session_id can only have one kernel_id " - "associated with. These two KernelSessionRecords share the same " - "session_id but have different kernel_ids. This should " - "not be possible and is likely an issue with the session " - "records." - ) - raise KernelSessionRecordConflict(msg) - return False - - def update(self, other: "KernelSessionRecord") -> None: - """Updates in-place a kernel from other (only accepts positive updates""" - if not isinstance(other, KernelSessionRecord): - msg = "'other' must be an instance of KernelSessionRecord." # type:ignore[unreachable] - raise TypeError(msg) - - if other.kernel_id and self.kernel_id and other.kernel_id != self.kernel_id: - msg = "Could not update the record from 'other' because the two records conflict." - raise KernelSessionRecordConflict(msg) + """A record object for tracking a Jupyter Server Kernel Session. + + Two records that share a session_id must also share a kernel_id, while + kernels can have multiple session (and thereby) session_ids + associated with them. + """ - for field in fields(self): - if hasattr(other, field.name) and getattr(other, field.name): - setattr(self, field.name, getattr(other, field.name)) + session_id: Optional[str] = None + kernel_id: Optional[str] = None + + def __eq__(self, other: object) -> bool: + """Whether a record equals another.""" + if isinstance(other, KernelSessionRecord): + condition1 = self.kernel_id and self.kernel_id == other.kernel_id + condition2 = all( + [ + self.session_id == other.session_id, + self.kernel_id is None or other.kernel_id is None, + ] + ) + if any([condition1, condition2]): + return True + # If two records share session_id but have different kernels, this is + # and ill-posed expression. This should never be true. Raise an exception + # to inform the user. + if all( + [ + self.session_id, + self.session_id == other.session_id, + self.kernel_id != other.kernel_id, + ] + ): + msg = ( + "A single session_id can only have one kernel_id " + "associated with. These two KernelSessionRecords share the same " + "session_id but have different kernel_ids. This should " + "not be possible and is likely an issue with the session " + "records." + ) + raise KernelSessionRecordConflict(msg) + return False + + def update(self, other: "KernelSessionRecord") -> None: + """Updates in-place a kernel from other (only accepts positive updates""" + if not isinstance(other, KernelSessionRecord): + msg = "'other' must be an instance of KernelSessionRecord." # type:ignore[unreachable] + raise TypeError(msg) + + if other.kernel_id and self.kernel_id and other.kernel_id != self.kernel_id: + msg = "Could not update the record from 'other' because the two records conflict." + raise KernelSessionRecordConflict(msg) + + for field in fields(self): + if hasattr(other, field.name) and getattr(other, field.name): + setattr(self, field.name, getattr(other, field.name)) class KernelSessionRecordList: - """An object for storing and managing a list of KernelSessionRecords. - - When adding a record to the list, the KernelSessionRecordList - first checks if the record already exists in the list. If it does, - the record will be updated with the new information; otherwise, - it will be appended. - """ - - _records: List[KernelSessionRecord] - - def __init__(self, *records: KernelSessionRecord): - """Initialize a record list.""" - self._records = [] - for record in records: - self.update(record) - - def __str__(self): - """The string representation of a record list.""" - return str(self._records) - - def __contains__(self, record: Union[KernelSessionRecord, str]) -> bool: - """Search for records by kernel_id and session_id""" - if isinstance(record, KernelSessionRecord) and record in self._records: - return True - - if isinstance(record, str): - for r in self._records: - if record in [r.session_id, r.kernel_id]: - return True - return False - - def __len__(self): - """The length of the record list.""" - return len(self._records) - - def get(self, record: Union[KernelSessionRecord, str]) -> KernelSessionRecord: - """Return a full KernelSessionRecord from a session_id, kernel_id, or - incomplete KernelSessionRecord. - """ - if isinstance(record, str): - for r in self._records: - if record in (r.kernel_id, r.session_id): - return r - elif isinstance(record, KernelSessionRecord): - for r in self._records: - if record == r: - return record - msg = f"{record} not found in KernelSessionRecordList." - raise ValueError(msg) - - def update(self, record: KernelSessionRecord) -> None: - """Update a record in-place or append it if not in the list.""" - try: - idx = self._records.index(record) - self._records[idx].update(record) - except ValueError: - self._records.append(record) - - def remove(self, record: KernelSessionRecord) -> None: - """Remove a record if its found in the list. If it's not found, - do nothing. + """An object for storing and managing a list of KernelSessionRecords. + + When adding a record to the list, the KernelSessionRecordList + first checks if the record already exists in the list. If it does, + the record will be updated with the new information; otherwise, + it will be appended. """ - if record in self._records: - self._records.remove(record) + _records: List[KernelSessionRecord] + + def __init__(self, *records: KernelSessionRecord): + """Initialize a record list.""" + self._records = [] + for record in records: + self.update(record) + + def __str__(self): + """The string representation of a record list.""" + return str(self._records) + + def __contains__(self, record: Union[KernelSessionRecord, str]) -> bool: + """Search for records by kernel_id and session_id""" + if isinstance(record, KernelSessionRecord) and record in self._records: + return True + + if isinstance(record, str): + for r in self._records: + if record in [r.session_id, r.kernel_id]: + return True + return False + + def __len__(self): + """The length of the record list.""" + return len(self._records) + + def get(self, record: Union[KernelSessionRecord, str]) -> KernelSessionRecord: + """Return a full KernelSessionRecord from a session_id, kernel_id, or + incomplete KernelSessionRecord. + """ + if isinstance(record, str): + for r in self._records: + if record in (r.kernel_id, r.session_id): + return r + elif isinstance(record, KernelSessionRecord): + for r in self._records: + if record == r: + return record + msg = f"{record} not found in KernelSessionRecordList." + raise ValueError(msg) + + def update(self, record: KernelSessionRecord) -> None: + """Update a record in-place or append it if not in the list.""" + try: + idx = self._records.index(record) + self._records[idx].update(record) + except ValueError: + self._records.append(record) + + def remove(self, record: KernelSessionRecord) -> None: + """Remove a record if its found in the list. If it's not found, + do nothing. + """ + if record in self._records: + self._records.remove(record) -class SessionManager(LoggingConfigurable): - """A session manager.""" - - database_filepath = Unicode( - default_value=":memory:", - help=( - "The filesystem path to SQLite Database file " - "(e.g. /path/to/session_database.db). By default, the session " - "database is stored in-memory (i.e. `:memory:` setting from sqlite3) " - "and does not persist when the current Jupyter Server shuts down." - ), - ).tag(config=True) - - @validate("database_filepath") - def _validate_database_filepath(self, proposal): - """Validate a database file path.""" - value = proposal["value"] - if value == ":memory:": - return value - path = pathlib.Path(value) - if path.exists(): - # Verify that the database path is not a directory. - if path.is_dir(): - msg = "`database_filepath` expected a file path, but the given path is a directory." - raise TraitError(msg) - # Verify that database path is an SQLite 3 Database by checking its header. - with open(value, "rb") as f: - header = f.read(100) - - if not header.startswith(b"SQLite format 3") and header != b"": - msg = "The given file is not an SQLite database file." - raise TraitError(msg) - return value - - kernel_manager = Instance("jupyter_server.services.kernels.kernelmanager.MappingKernelManager") - contents_manager = InstanceFromClasses( - [ - "jupyter_server.services.contents.manager.ContentsManager", - "notebook.services.contents.manager.ContentsManager", - ] - ) - - def __init__(self, *args, **kwargs): - """Initialize a record list.""" - super().__init__(*args, **kwargs) - self._pending_sessions = KernelSessionRecordList() - - # Session database initialized below - _cursor = None - _connection = None - _columns = {"session_id", "path", "name", "type", "kernel_id"} - - fut_kernel_id_dict: Optional[Dict[str, Task[str]]] = None - - @property - def cursor(self): - """Start a cursor and create a database called 'session'""" - if self._cursor is None: - self._cursor = self.connection.cursor() - self._cursor.execute( - """CREATE TABLE IF NOT EXISTS session - (session_id, path, name, type, kernel_id)""" - ) - return self._cursor - - @property - def connection(self): - """Start a database connection""" - if self._connection is None: - # Set isolation level to None to autocommit all changes to the database. - self._connection = sqlite3.connect(self.database_filepath, isolation_level=None) - self._connection.row_factory = sqlite3.Row - return self._connection - - def close(self): - """Close the sqlite connection""" - if self._cursor is not None: - self._cursor.close() - self._cursor = None - - def __del__(self): - """Close connection once SessionManager closes""" - self.close() - - async def session_exists(self, path): - """Check to see if the session of a given name exists""" - exists = False - self.cursor.execute("SELECT * FROM session WHERE path=?", (path,)) - row = self.cursor.fetchone() - if row is not None: - # Note, although we found a row for the session, the associated kernel may have - # been culled or died unexpectedly. If that's the case, we should delete the - # row, thereby terminating the session. This can be done via a call to - # row_to_model that tolerates that condition. If row_to_model returns None, - # we'll return false, since, at that point, the session doesn't exist anyway. - model = await self.row_to_model(row, tolerate_culled=True) - if model is not None: - exists = True - return exists - - def new_session_id(self) -> str: - """Create a uuid for a new session""" - return str(uuid.uuid4()) - - async def create_session( - self, - path: Optional[str] = None, - name: Optional[ModelName] = None, - type: Optional[str] = None, - kernel_name: Optional[KernelName] = None, - kernel_id: Optional[str] = None, - session_id: Optional[str] = None, - ) -> Dict[str, Any]: - """Creates a session and returns its model - - Parameters - ---------- - name: ModelName(str) - Usually the model name, like the filename associated with current - kernel. - """ - if session_id is not None and self.fut_kernel_id_dict is None: - self.fut_kernel_id_dict = {} - - if session_id is None or session_id == "": - session_id = self.new_session_id() - - record = KernelSessionRecord(session_id=session_id) - self._pending_sessions.update(record) - if kernel_id is not None and kernel_id in self.kernel_manager: - pass - else: - kernel_id = await self.start_kernel_for_session( - session_id, path, name, type, kernel_name - ) - record.kernel_id = kernel_id - self._pending_sessions.update(record) - result = await self.save_session( - session_id, path=path, name=name, type=type, kernel_id=kernel_id - ) - self._pending_sessions.remove(record) - return cast(Dict[str, Any], result) - - def get_kernel_env( - self, path: Optional[str], name: Optional[ModelName] = None - ) -> Dict[str, str]: - """Return the environment variables that need to be set in the kernel - - Parameters - ---------- - path : str - the url path for the given session. - name: ModelName(str), optional - Here the name is likely to be the name of the associated file - with the current kernel at startup time. - """ - if name is not None: - cwd = self.kernel_manager.cwd_for_path(path) - path = os.path.join(cwd, name) - assert isinstance(path, str) - return {**os.environ, "JPY_SESSION_NAME": path} - - async def start_kernel_for_session( - self, - session_id: str, - path: Optional[str], - name: Optional[ModelName], - type: Optional[str], - kernel_name: Optional[KernelName], - ) -> str: - """Start a new kernel for a given session. - - Parameters - ---------- - session_id : str - uuid for the session; this method must be given a session_id - path : str - the path for the given session - seem to be a session id sometime. - name : str - Usually the model name, like the filename associated with current - kernel. - type : str - the type of the session - kernel_name : str - the name of the kernel specification to use. The default kernel name will be used if not provided. - """ - # allow contents manager to specify kernels cwd - if self.fut_kernel_id_dict is not None: - if session_id in self.fut_kernel_id_dict: - fut_kernel_id = self.fut_kernel_id_dict[session_id] - if fut_kernel_id.done(): - kernel_id = await fut_kernel_id - self.fut_kernel_id_dict.pop(session_id) - return kernel_id - else: - kernel_path = await ensure_async(self.contents_manager.get_kernel_path(path=path)) - kernel_env = self.get_kernel_env(path) - self.fut_kernel_id_dict[session_id] = asyncio.create_task(self.kernel_manager.start_kernel( - path=kernel_path, - kernel_name=kernel_name, - env=kernel_env, - )) - kernel_id = "waiting" - else: - kernel_path = await ensure_async(self.contents_manager.get_kernel_path(path=path)) - - kernel_env = self.get_kernel_env(path, name) - kernel_id = await self.kernel_manager.start_kernel( - path=kernel_path, - kernel_name=kernel_name, - env=kernel_env, - ) - return kernel_id - - async def save_session(self, session_id, path=None, name=None, type=None, kernel_id=None): - """Saves the items for the session with the given session_id - - Given a session_id (and any other of the arguments), this method - creates a row in the sqlite session database that holds the information - for a session. - - Parameters - ---------- - session_id : str - uuid for the session; this method must be given a session_id - path : str - the path for the given session - name : str - the name of the session - type : str - the type of the session - kernel_id : str - a uuid for the kernel associated with this session - - Returns - ------- - model : dict - a dictionary of the session model - """ - self.cursor.execute( - "INSERT INTO session VALUES (?,?,?,?,?)", - (session_id, path, name, type, kernel_id), +class SessionManager(LoggingConfigurable): + """A session manager.""" + + database_filepath = Unicode( + default_value=":memory:", + help=( + "The filesystem path to SQLite Database file " + "(e.g. /path/to/session_database.db). By default, the session " + "database is stored in-memory (i.e. `:memory:` setting from sqlite3) " + "and does not persist when the current Jupyter Server shuts down." + ), + ).tag(config=True) + + @validate("database_filepath") + def _validate_database_filepath(self, proposal): + """Validate a database file path.""" + value = proposal["value"] + if value == ":memory:": + return value + path = pathlib.Path(value) + if path.exists(): + # Verify that the database path is not a directory. + if path.is_dir(): + msg = "`database_filepath` expected a file path, but the given path is a directory." + raise TraitError(msg) + # Verify that database path is an SQLite 3 Database by checking its header. + with open(value, "rb") as f: + header = f.read(100) + + if not header.startswith(b"SQLite format 3") and header != b"": + msg = "The given file is not an SQLite database file." + raise TraitError(msg) + return value + + kernel_manager = Instance("jupyter_server.services.kernels.kernelmanager.MappingKernelManager") + contents_manager = InstanceFromClasses( + [ + "jupyter_server.services.contents.manager.ContentsManager", + "notebook.services.contents.manager.ContentsManager", + ] ) - result = await self.get_session(session_id=session_id) - return result - - async def get_session(self, **kwargs): - """Returns the model for a particular session. - - Takes a keyword argument and searches for the value in the session - database, then returns the rest of the session's info. - - Parameters - ---------- - **kwargs : dict - must be given one of the keywords and values from the session database - (i.e. session_id, path, name, type, kernel_id) - - Returns - ------- - model : dict - returns a dictionary that includes all the information from the - session described by the kwarg. - """ - session_id = kwargs["session_id"] - if self.fut_kernel_id_dict is not None and session_id in self.fut_kernel_id_dict: - model = { - "id": session_id, - "name": "Waiting for kernel to start", - "last_activity": None, - "execution_state": "waiting", - "connections": 0, - } - else: - if not kwargs: - msg = "must specify a column to query" - raise TypeError(msg) - - conditions = [] - for column in kwargs: - if column not in self._columns: - msg = f"No such column: {column}" - raise TypeError(msg) - conditions.append("%s=?" % column) - - query = "SELECT * FROM session WHERE %s" % (" AND ".join(conditions)) - - self.cursor.execute(query, list(kwargs.values())) - try: + + def __init__(self, *args, **kwargs): + """Initialize a record list.""" + super().__init__(*args, **kwargs) + self._pending_sessions = KernelSessionRecordList() + + # Session database initialized below + _cursor = None + _connection = None + _columns = {"session_id", "path", "name", "type", "kernel_id"} + + fut_kernel_id_dict: Optional[Dict[str, Task[str]]] = None + + @property + def cursor(self): + """Start a cursor and create a database called 'session'""" + if self._cursor is None: + self._cursor = self.connection.cursor() + self._cursor.execute( + """CREATE TABLE IF NOT EXISTS session + (session_id, path, name, type, kernel_id)""" + ) + return self._cursor + + @property + def connection(self): + """Start a database connection""" + if self._connection is None: + # Set isolation level to None to autocommit all changes to the database. + self._connection = sqlite3.connect(self.database_filepath, isolation_level=None) + self._connection.row_factory = sqlite3.Row + return self._connection + + def close(self): + """Close the sqlite connection""" + if self._cursor is not None: + self._cursor.close() + self._cursor = None + + def __del__(self): + """Close connection once SessionManager closes""" + self.close() + + async def session_exists(self, path): + """Check to see if the session of a given name exists""" + exists = False + self.cursor.execute("SELECT * FROM session WHERE path=?", (path,)) row = self.cursor.fetchone() - except KeyError: - # The kernel is missing, so the session just got deleted. - row = None - - if row is None: - q = [] - for key, value in kwargs.items(): - q.append(f"{key}={value!r}") - - raise web.HTTPError(404, "Session not found: %s" % (", ".join(q))) - - try: - model = await self.row_to_model(row) - except KeyError as e: - raise web.HTTPError(404, "Session not found: %s" % str(e)) from e - - return model - - async def update_session(self, session_id, **kwargs): - """Updates the values in the session database. - - Changes the values of the session with the given session_id - with the values from the keyword arguments. - - Parameters - ---------- - session_id : str - a uuid that identifies a session in the sqlite3 database - **kwargs : str - the key must correspond to a column title in session database, - and the value replaces the current value in the session - with session_id. - """ - await self.get_session(session_id=session_id) - - if not kwargs: - # no changes - return - - sets = [] - for column in kwargs: - if column not in self._columns: - raise TypeError("No such column: %r" % column) - sets.append("%s=?" % column) - query = "UPDATE session SET %s WHERE session_id=?" % (", ".join(sets)) - self.cursor.execute(query, [*list(kwargs.values()), session_id]) - - if hasattr(self.kernel_manager, "update_env"): - self.cursor.execute( - "SELECT path, name, kernel_id FROM session WHERE session_id=?", [session_id] - ) - path, name, kernel_id = self.cursor.fetchone() - self.kernel_manager.update_env(kernel_id=kernel_id, env=self.get_kernel_env(path, name)) - - async def kernel_culled(self, kernel_id: str) -> bool: - """Checks if the kernel is still considered alive and returns true if its not found.""" - return kernel_id not in self.kernel_manager - - async def row_to_model(self, row, tolerate_culled=False): - """Takes sqlite database session row and turns it into a dictionary""" - kernel_culled: bool = await ensure_async(self.kernel_culled(row["kernel_id"])) - if kernel_culled: - # The kernel was culled or died without deleting the session. - # We can't use delete_session here because that tries to find - # and shut down the kernel - so we'll delete the row directly. - # - # If caller wishes to tolerate culled kernels, log a warning - # and return None. Otherwise, raise KeyError with a similar - # message. - self.cursor.execute("DELETE FROM session WHERE session_id=?", (row["session_id"],)) - msg = ( - "Kernel '{kernel_id}' appears to have been culled or died unexpectedly, " - "invalidating session '{session_id}'. The session has been removed.".format( - kernel_id=row["kernel_id"], session_id=row["session_id"] + if row is not None: + # Note, although we found a row for the session, the associated kernel may have + # been culled or died unexpectedly. If that's the case, we should delete the + # row, thereby terminating the session. This can be done via a call to + # row_to_model that tolerates that condition. If row_to_model returns None, + # we'll return false, since, at that point, the session doesn't exist anyway. + model = await self.row_to_model(row, tolerate_culled=True) + if model is not None: + exists = True + return exists + + def new_session_id(self) -> str: + """Create a uuid for a new session""" + return str(uuid.uuid4()) + + async def create_session( + self, + path: Optional[str] = None, + name: Optional[ModelName] = None, + type: Optional[str] = None, + kernel_name: Optional[KernelName] = None, + kernel_id: Optional[str] = None, + session_id: Optional[str] = None, + ) -> Dict[str, Any]: + """Creates a session and returns its model + + Parameters + ---------- + name: ModelName(str) + Usually the model name, like the filename associated with current + kernel. + """ + + if session_id is not None and self.fut_kernel_id_dict is None: + self.fut_kernel_id_dict = {} + + if session_id is None or session_id == "": + session_id = self.new_session_id() + + record = KernelSessionRecord(session_id=session_id) + self._pending_sessions.update(record) + if kernel_id is not None and kernel_id in self.kernel_manager: + pass + else: + kernel_id = await self.start_kernel_for_session( + session_id, path, name, type, kernel_name + ) + record.kernel_id = kernel_id + self._pending_sessions.update(record) + result = await self.save_session( + session_id, path=path, name=name, type=type, kernel_id=kernel_id + ) + self._pending_sessions.remove(record) + return cast(Dict[str, Any], result) + + def get_kernel_env( + self, path: Optional[str], name: Optional[ModelName] = None + ) -> Dict[str, str]: + """Return the environment variables that need to be set in the kernel + + Parameters + ---------- + path : str + the url path for the given session. + name: ModelName(str), optional + Here the name is likely to be the name of the associated file + with the current kernel at startup time. + """ + if name is not None: + cwd = self.kernel_manager.cwd_for_path(path) + path = os.path.join(cwd, name) + assert isinstance(path, str) + return {**os.environ, "JPY_SESSION_NAME": path} + + async def start_kernel_for_session( + self, + session_id: str, + path: Optional[str], + name: Optional[ModelName], + type: Optional[str], + kernel_name: Optional[KernelName], + ) -> str: + """Start a new kernel for a given session. + + Parameters + ---------- + session_id : str + uuid for the session; this method must be given a session_id + path : str + the path for the given session - seem to be a session id sometime. + name : str + Usually the model name, like the filename associated with current + kernel. + type : str + the type of the session + kernel_name : str + the name of the kernel specification to use. The default kernel name will be used if not provided. + """ + # allow contents manager to specify kernels cwd + if self.fut_kernel_id_dict is not None: + if session_id in self.fut_kernel_id_dict: + fut_kernel_id = self.fut_kernel_id_dict[session_id] + if fut_kernel_id.done(): + kernel_id = await fut_kernel_id + self.fut_kernel_id_dict.pop(session_id) + return kernel_id + else: + kernel_path = await ensure_async(self.contents_manager.get_kernel_path(path=path)) + kernel_env = self.get_kernel_env(path) + self.fut_kernel_id_dict[session_id] = asyncio.create_task(self.kernel_manager.start_kernel( + path=kernel_path, + kernel_name=kernel_name, + env=kernel_env, + )) + kernel_id = "waiting" + else: + kernel_path = await ensure_async(self.contents_manager.get_kernel_path(path=path)) + + kernel_env = self.get_kernel_env(path, name) + kernel_id = await self.kernel_manager.start_kernel( + path=kernel_path, + kernel_name=kernel_name, + env=kernel_env, + ) + return cast(str, kernel_id) + + async def save_session(self, session_id, path=None, name=None, type=None, kernel_id=None): + """Saves the items for the session with the given session_id + + Given a session_id (and any other of the arguments), this method + creates a row in the sqlite session database that holds the information + for a session. + + Parameters + ---------- + session_id : str + uuid for the session; this method must be given a session_id + path : str + the path for the given session + name : str + the name of the session + type : str + the type of the session + kernel_id : str + a uuid for the kernel associated with this session + + Returns + ------- + model : dict + a dictionary of the session model + """ + self.cursor.execute( + "INSERT INTO session VALUES (?,?,?,?,?)", + (session_id, path, name, type, kernel_id), ) - ) - if tolerate_culled: - self.log.warning(f"{msg} Continuing...") - return None - raise KeyError(msg) - - kernel_model = await ensure_async(self.kernel_manager.kernel_model(row["kernel_id"])) - model = { - "id": row["session_id"], - "path": row["path"], - "name": row["name"], - "type": row["type"], - "kernel": kernel_model, - } - if row["type"] == "notebook": - # Provide the deprecated API. - model["notebook"] = {"path": row["path"], "name": row["name"]} - return model - - async def list_sessions(self): - """Returns a list of dictionaries containing all the information from - the session database""" - c = self.cursor.execute("SELECT * FROM session") - result = [] - # We need to use fetchall() here, because row_to_model can delete rows, - # which messes up the cursor if we're iterating over rows. - for row in c.fetchall(): - try: - model = await self.row_to_model(row) - result.append(model) - except KeyError: - pass - return result - - async def delete_session(self, session_id): - """Deletes the row in the session database with given session_id""" - record = KernelSessionRecord(session_id=session_id) - self._pending_sessions.update(record) - session = await self.get_session(session_id=session_id) - await ensure_async(self.kernel_manager.shutdown_kernel(session["kernel"]["id"])) - self.cursor.execute("DELETE FROM session WHERE session_id=?", (session_id,)) - self._pending_sessions.remove(record) + result = await self.get_session(session_id=session_id) + return result + + async def get_session(self, **kwargs): + """Returns the model for a particular session. + + Takes a keyword argument and searches for the value in the session + database, then returns the rest of the session's info. + + Parameters + ---------- + **kwargs : dict + must be given one of the keywords and values from the session database + (i.e. session_id, path, name, type, kernel_id) + + Returns + ------- + model : dict + returns a dictionary that includes all the information from the + session described by the kwarg. + """ + session_id = kwargs["session_id"] + if self.fut_kernel_id_dict is not None and session_id in self.fut_kernel_id_dict: + model = { + "id": session_id, + "name": "Waiting for kernel to start", + "last_activity": None, + "execution_state": "waiting", + "connections": 0, + } + else: + if not kwargs: + msg = "must specify a column to query" + raise TypeError(msg) + + conditions = [] + for column in kwargs: + if column not in self._columns: + msg = f"No such column: {column}" + raise TypeError(msg) + conditions.append("%s=?" % column) + + query = "SELECT * FROM session WHERE %s" % (" AND ".join(conditions)) + + self.cursor.execute(query, list(kwargs.values())) + try: + row = self.cursor.fetchone() + except KeyError: + # The kernel is missing, so the session just got deleted. + row = None + + if row is None: + q = [] + for key, value in kwargs.items(): + q.append(f"{key}={value!r}") + + raise web.HTTPError(404, "Session not found: %s" % (", ".join(q))) + + try: + model = await self.row_to_model(row) + except KeyError as e: + raise web.HTTPError(404, "Session not found: %s" % str(e)) from e + + return model + + async def update_session(self, session_id, **kwargs): + """Updates the values in the session database. + + Changes the values of the session with the given session_id + with the values from the keyword arguments. + + Parameters + ---------- + session_id : str + a uuid that identifies a session in the sqlite3 database + **kwargs : str + the key must correspond to a column title in session database, + and the value replaces the current value in the session + with session_id. + """ + await self.get_session(session_id=session_id) + + if not kwargs: + # no changes + return + + sets = [] + for column in kwargs: + if column not in self._columns: + raise TypeError("No such column: %r" % column) + sets.append("%s=?" % column) + query = "UPDATE session SET %s WHERE session_id=?" % (", ".join(sets)) + self.cursor.execute(query, [*list(kwargs.values()), session_id]) + + if hasattr(self.kernel_manager, "update_env"): + self.cursor.execute( + "SELECT path, name, kernel_id FROM session WHERE session_id=?", [session_id] + ) + path, name, kernel_id = self.cursor.fetchone() + self.kernel_manager.update_env(kernel_id=kernel_id, env=self.get_kernel_env(path, name)) + + async def kernel_culled(self, kernel_id: str) -> bool: + """Checks if the kernel is still considered alive and returns true if its not found.""" + return kernel_id not in self.kernel_manager + + async def row_to_model(self, row, tolerate_culled=False): + """Takes sqlite database session row and turns it into a dictionary""" + kernel_culled: bool = await ensure_async(self.kernel_culled(row["kernel_id"])) + if kernel_culled: + # The kernel was culled or died without deleting the session. + # We can't use delete_session here because that tries to find + # and shut down the kernel - so we'll delete the row directly. + # + # If caller wishes to tolerate culled kernels, log a warning + # and return None. Otherwise, raise KeyError with a similar + # message. + self.cursor.execute("DELETE FROM session WHERE session_id=?", (row["session_id"],)) + msg = ( + "Kernel '{kernel_id}' appears to have been culled or died unexpectedly, " + "invalidating session '{session_id}'. The session has been removed.".format( + kernel_id=row["kernel_id"], session_id=row["session_id"] + ) + ) + if tolerate_culled: + self.log.warning(f"{msg} Continuing...") + return None + raise KeyError(msg) + + kernel_model = await ensure_async(self.kernel_manager.kernel_model(row["kernel_id"])) + model = { + "id": row["session_id"], + "path": row["path"], + "name": row["name"], + "type": row["type"], + "kernel": kernel_model, + } + if row["type"] == "notebook": + # Provide the deprecated API. + model["notebook"] = {"path": row["path"], "name": row["name"]} + return model + + async def list_sessions(self): + """Returns a list of dictionaries containing all the information from + the session database""" + c = self.cursor.execute("SELECT * FROM session") + result = [] + # We need to use fetchall() here, because row_to_model can delete rows, + # which messes up the cursor if we're iterating over rows. + for row in c.fetchall(): + try: + model = await self.row_to_model(row) + result.append(model) + except KeyError: + pass + return result + + async def delete_session(self, session_id): + """Deletes the row in the session database with given session_id""" + record = KernelSessionRecord(session_id=session_id) + self._pending_sessions.update(record) + session = await self.get_session(session_id=session_id) + await ensure_async(self.kernel_manager.shutdown_kernel(session["kernel"]["id"])) + self.cursor.execute("DELETE FROM session WHERE session_id=?", (session_id,)) + self._pending_sessions.remove(record) diff --git a/jupyter_server/utils.py b/jupyter_server/utils.py index 2a4c185d97..968d1fd27c 100644 --- a/jupyter_server/utils.py +++ b/jupyter_server/utils.py @@ -344,11 +344,11 @@ def filefind(filename: str, path_dirs: Sequence[str] | str | None = None) -> str path dirs is given, the filename is tested as is, after running through :func:`expandvars` and :func:`expanduser`. Thus a simple call:: - filefind('myfile.txt') + filefind("myfile.txt") will find the file in the current working dir, but:: - filefind('~/myfile.txt') + filefind("~/myfile.txt") Will find the file in the users home directory. This function does not automatically try any paths, such as the cwd or the user's home directory. diff --git a/pyproject.toml b/pyproject.toml index b2b4bf3bb7..5d67e8a4ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -142,6 +142,9 @@ optional-editable-build = true [tool.ruff] line-length = 100 +[tool.ruff.format] +docstring-code-format = true + [tool.ruff.lint] select = [ "B", # flake8-bugbear @@ -295,4 +298,4 @@ exclude = ["docs", "test"] ignore = ["W002"] [tool.repo-review] -ignore = ["GH102"] +ignore = ["GH102", "PC111"] diff --git a/tests/test_gateway.py b/tests/test_gateway.py index 585650e2f0..fd511b6067 100644 --- a/tests/test_gateway.py +++ b/tests/test_gateway.py @@ -373,15 +373,12 @@ def test_gateway_request_timeout_pad_option( GatewayClient.clear_instance() -cookie_expire_time = format_datetime(datetime.now(tz=timezone.utc) + timedelta(seconds=180)) - - @pytest.mark.parametrize( "accept_cookies,expire_arg,expire_param,existing_cookies,cookie_exists", [ (False, None, None, "EXISTING=1", False), (True, None, None, "EXISTING=1", True), - (True, "Expires", cookie_expire_time, None, True), + (True, "Expires", 180, None, True), (True, "Max-Age", "-360", "EXISTING=1", False), ], ) @@ -400,6 +397,10 @@ def test_gateway_request_with_expiring_cookies( cookie: SimpleCookie = SimpleCookie() cookie.load("SERVERID=1234567; Path=/") + if expire_arg == "Expires": + expire_param = format_datetime( + datetime.now(tz=timezone.utc) + timedelta(seconds=expire_param) + ) if expire_arg: cookie["SERVERID"][expire_arg] = expire_param