diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 213290784..9815273e7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: name: Run linters runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - uses: actions/setup-python@v5 with: python-version: ${{ env.PYTHON_VERSION }} @@ -57,7 +57,7 @@ jobs: # Skipping 3.10 and 3.11 as we assume it is covered by 3.9 and 3.12 - "3.12" steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} @@ -84,7 +84,7 @@ jobs: IDP_CLIENT_SECRET: ${{ secrets.IDP_CLIENT_SECRET }} IDP_TOKEN_URL: ${{ secrets.IDP_TOKEN_URL }} steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - uses: actions/setup-python@v5 with: python-version: 3.11 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 737eb6fc6..00bee5e27 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -10,7 +10,7 @@ jobs: name: Run linters runs-on: ubuntu-latest steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - uses: actions/setup-python@v5 with: python-version: ${{ env.PYTHON_VERSION }} @@ -34,7 +34,7 @@ jobs: IDP_CLIENT_SECRET: ${{ secrets.IDP_CLIENT_SECRET }} IDP_TOKEN_URL: ${{ secrets.IDP_TOKEN_URL }} steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - uses: actions/setup-python@v5 with: python-version: ${{ env.PYTHON_VERSION }} diff --git a/.gitignore b/.gitignore index f0905353a..53680d236 100644 --- a/.gitignore +++ b/.gitignore @@ -294,3 +294,4 @@ cognite_toolkit/config.local.yaml .venv.* build.* cognite_toolkit/.env.* +tests_migrations/project_inits \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 758aa3cc1..479a13a1b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,15 +6,12 @@ repos: args: - --fix - --exit-non-zero-on-fix - - --line-length=120 - --ignore=E731,E501,W605,T201,UP007 # See https://beta.ruff.rs/docs/rules for an overview of ruff rules - --select=E,W,F,I,T,RUF,TID,UP - --fixable=E,W,F,I,T,RUF,TID,UP - --target-version=py39 - id: ruff-format - args: - - --line-length=120 rev: v0.4.8 - repo: https://github.com/pre-commit/pre-commit-hooks diff --git a/.vscode/launch.json b/.vscode/launch.json index c9a60ec74..051f79f10 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -115,12 +115,63 @@ //"--dry-run", //"--env=local", //"--include=transformations" - //"--init-dir=foo" - //"--arrow" - //"--help" + //"--init-dir=foo", + //"--help", + //"--package=quickstart" + //"--package=empty", ], "console": "integratedTerminal", "justMyCode": false }, + { + "name": "Python: feature flags", + "type": "debugpy", + "request": "launch", + "program": "./cdf-tk-dev.py", + "args": [ + "features", + "list", + ], + "console": "integratedTerminal", + "justMyCode": false + }, + { + "name": "Python: feature flag set", + "type": "debugpy", + "request": "launch", + "program": "./cdf-tk-dev.py", + "args": [ + "features", + "set", + "--flag=interactive_init", + "--enabled=true", + + ], + "console": "integratedTerminal", + "justMyCode": false + }, + { + "name": "Python: feature flags reset", + "type": "debugpy", + "request": "launch", + "program": "./cdf-tk-dev.py", + "args": [ + "features", + "reset", + ], + "console": "integratedTerminal", + "justMyCode": false + }, + { + "name": "Python: modules init", + "type": "debugpy", + "request": "launch", + "program": "./cdf-tk-dev.py", + "args": [ + "modules", + "init" ], + "console": "integratedTerminal", + "justMyCode": false + }, ] } \ No newline at end of file diff --git a/CHANGELOG.cdf-tk.md b/CHANGELOG.cdf-tk.md index 5549ea406..f5de7f959 100644 --- a/CHANGELOG.cdf-tk.md +++ b/CHANGELOG.cdf-tk.md @@ -15,6 +15,26 @@ Changes are grouped as follows: - `Fixed` for any bug fixes. - `Security` in case of vulnerabilities. +## [0.2.1] - 2024-06-17 + +### Improved + +- When running `cdf-tk auth verify`, if the client does not have access to the `CDF_PROJECT` the user will now get + a more informative error message. +- When running `cdf-tk auth verify` and missing the `FunctionAcl(READ)` capability, the user will now get a more + informative error message when checking the function service status + +## Fixed + +- When running `cdf-tk build`, you would get a `DuplicatedItemWarning` on RAW Databases that are used with multiple + tables. This is now fixed. + +### Added + +- Preview feature `MODULES_CMD` to allow interactive init and automatic upgrade of modules. Activate by running + `cdf-tk features set MODULES_CMD --enable`, and deactivate by running `cdf-tk features set MODULES_CMD --disable`. + Run `cdf-tk modules init/upgrade` to interactively initialize or upgrade modules. + ## [0.2.0] - 2024-06-10 ### Fixed @@ -23,6 +43,9 @@ Changes are grouped as follows: `ValueError: No capabilities given`. This is now fixed. - When deploying `containers` resources with an index, the `cdf-tk deploy` would consider the resource as changed even though it was not. This is now fixed. +- When parsing yaml without `libyaml`, `cognite-toolkit` would raise an + `AttributeError: module 'yaml' has no attribute 'CSafeLoader'`. This is now fixed by falling back to the + python `yaml` parser if `libyaml` (c-based) is not available. ## [0.2.0b4] - 2024-06-06 diff --git a/CHANGELOG.templates.md b/CHANGELOG.templates.md index 6e2f5188f..97caa4308 100644 --- a/CHANGELOG.templates.md +++ b/CHANGELOG.templates.md @@ -15,6 +15,10 @@ Changes are grouped as follows: - `Fixed` for any bug fixes. - `Security` in case of vulnerabilities. +## [0.2.1] - 2024-06-17 + +No changes to templates. + ## [0.2.0] - 2024-06-10 No changes to templates. diff --git a/cognite_toolkit/_cdf.py b/cognite_toolkit/_cdf.py index 0c35f7a99..643595d4d 100755 --- a/cognite_toolkit/_cdf.py +++ b/cognite_toolkit/_cdf.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # The Typer parameters get mixed up if we use the __future__ import annotations in the main file. - +import contextlib import os import sys from collections.abc import Sequence @@ -21,10 +21,12 @@ DeployCommand, DescribeCommand, DumpCommand, + FeatureFlagCommand, PullCommand, RunFunctionCommand, RunTransformationCommand, ) +from cognite_toolkit._cdf_tk.commands.featureflag import FeatureFlag, Flags from cognite_toolkit._cdf_tk.data_classes import ( ProjectDirectoryInit, ProjectDirectoryUpgrade, @@ -40,7 +42,6 @@ NodeLoader, TransformationLoader, ) -from cognite_toolkit._cdf_tk.prototypes import featureflag from cognite_toolkit._cdf_tk.utils import ( CDFToolConfig, sentry_exception_filter, @@ -81,21 +82,31 @@ run_app = typer.Typer(**default_typer_kws) # type: ignore [arg-type] pull_app = typer.Typer(**default_typer_kws) # type: ignore [arg-type] dump_app = typer.Typer(**default_typer_kws) # type: ignore [arg-type] +feature_flag_app = typer.Typer(**default_typer_kws, hidden=True) # type: ignore [arg-type] + _app.add_typer(auth_app, name="auth") _app.add_typer(describe_app, name="describe") _app.add_typer(run_app, name="run") _app.add_typer(pull_app, name="pull") _app.add_typer(dump_app, name="dump") +_app.add_typer(feature_flag_app, name="features") def app() -> NoReturn: # --- Main entry point --- # Users run 'app()' directly, but that doesn't allow us to control excepton handling: try: - if featureflag.enabled("FF_INTERACTIVE_INIT"): - from cognite_toolkit._cdf_tk.prototypes.interactive_init import InteractiveInit + if FeatureFlag.is_enabled(Flags.MODULES_CMD): + from cognite_toolkit._cdf_tk.prototypes.landing_app import Landing + from cognite_toolkit._cdf_tk.prototypes.modules_app import Modules + + # original init is replaced with the modules subapp + modules_app = Modules(**default_typer_kws) # type: ignore [arg-type] + _app.add_typer(modules_app, name="modules") + _app.command("init")(Landing().main_init) + else: + _app.command("init")(main_init) - _app.command("init")(InteractiveInit().interactive) _app() except ToolkitError as err: print(f" [bold red]ERROR ([/][red]{type(err).__name__}[/][bold red]):[/] {err}") @@ -249,8 +260,7 @@ def build( ] = False, ) -> None: """Build configuration files from the module templates to a local build directory.""" - user_command = f"cdf-tk {' '.join(sys.argv[1:])}" - cmd = BuildCommand(user_command=user_command) + cmd = BuildCommand(user_command=_get_user_command()) cmd.execute(ctx.obj.verbose, Path(source_dir), Path(build_dir), build_env_name, no_clean) @@ -312,9 +322,10 @@ def deploy( ), ] = None, ) -> None: - cmd = DeployCommand(print_warning=True) + cmd = DeployCommand(print_warning=True, user_command=_get_user_command()) include = _process_include(include, interactive) - cmd.execute(ctx, build_dir, build_env_name, dry_run, drop, drop_data, include) + ToolGlobals = CDFToolConfig.from_context(ctx) + cmd.execute(ToolGlobals, build_dir, build_env_name, dry_run, drop, drop_data, include, ctx.obj.verbose) @_app.command("clean") @@ -362,9 +373,10 @@ def clean( ) -> None: """Clean up a CDF environment as set in environments.yaml restricted to the entities in the configuration files in the build directory.""" # Override cluster and project from the options/env variables - cmd = CleanCommand(print_warning=True) + cmd = CleanCommand(print_warning=True, user_command=_get_user_command()) include = _process_include(include, interactive) - cmd.execute(ctx, build_dir, build_env_name, dry_run, include) + ToolGlobals = CDFToolConfig.from_context(ctx) + cmd.execute(ToolGlobals, build_dir, build_env_name, dry_run, include, ctx.obj.verbose) @auth_app.callback(invoke_without_command=True) @@ -433,11 +445,14 @@ def auth_verify( The default bootstrap group configuration is admin.readwrite.group.yaml from the cdf_auth_readwrite_all common module. """ - cmd = AuthCommand() - cmd.execute(ctx, dry_run, interactive, group_file, update_group, create_group) + cmd = AuthCommand(user_command=_get_user_command()) + with contextlib.redirect_stdout(None): + # Remove the Error message from failing to load the config + # This is verified in check_auth + ToolGlobals = CDFToolConfig.from_context(ctx) + cmd.execute(ToolGlobals, dry_run, interactive, group_file, update_group, create_group, ctx.obj.verbose) -@_app.command("init" if not featureflag.enabled("FF_INTERACTIVE_INIT") else "_init") def main_init( ctx: typer.Context, dry_run: Annotated[ @@ -549,7 +564,7 @@ def describe_datamodel_cmd( ) -> None: """This command will describe the characteristics of a data model given the space name and datamodel name.""" - cmd = DescribeCommand() + cmd = DescribeCommand(user_command=_get_user_command()) cmd.execute(CDFToolConfig.from_context(ctx), space, data_model) @@ -574,7 +589,7 @@ def run_transformation_cmd( ], ) -> None: """This command will run the specified transformation using a one-time session.""" - cmd = RunTransformationCommand() + cmd = RunTransformationCommand(user_command=_get_user_command()) cmd.run_transformation(CDFToolConfig.from_context(ctx), external_id) @@ -654,7 +669,7 @@ def run_function_cmd( ] = "dev", ) -> None: """This command will run the specified function using a one-time session.""" - cmd = RunFunctionCommand() + cmd = RunFunctionCommand(user_command=_get_user_command()) cmd.execute( CDFToolConfig.from_context(ctx), external_id, @@ -714,7 +729,7 @@ def pull_transformation_cmd( ] = False, ) -> None: """This command will pull the specified transformation and update its YAML file in the module folder""" - PullCommand().execute( + PullCommand(user_command=_get_user_command()).execute( source_dir, external_id, env, dry_run, ctx.obj.verbose, CDFToolConfig.from_context(ctx), TransformationLoader ) @@ -765,7 +780,7 @@ def pull_node_cmd( ] = False, ) -> None: """This command will pull the specified node and update its YAML file in the module folder.""" - PullCommand().execute( + PullCommand(user_command=_get_user_command()).execute( source_dir, NodeId(space, external_id), env, @@ -830,7 +845,7 @@ def dump_datamodel_cmd( ] = "tmp", ) -> None: """This command will dump the selected data model as yaml to the folder specified, defaults to /tmp.""" - cmd = DumpCommand() + cmd = DumpCommand(user_command=_get_user_command()) cmd.execute( CDFToolConfig.from_context(ctx), DataModelId(space, external_id, version), @@ -840,6 +855,70 @@ def dump_datamodel_cmd( ) +@feature_flag_app.callback(invoke_without_command=True) +def feature_flag_main(ctx: typer.Context) -> None: + """Commands to enable and disable feature flags for the toolkit.""" + if ctx.invoked_subcommand is None: + print( + Panel( + "[yellow]Warning: enabling feature flags may have undesired side effects." + "\nDo not enable a flag unless you are familiar with what it does.[/]" + ) + ) + print("Use [bold yellow]cdf-tk feature list[/] or [bold yellow]cdf-tk feature --[flag] --enabled=True|False[/]") + return None + + +@feature_flag_app.command("list") +def feature_flag_list() -> None: + """List all available feature flags.""" + + cmd = FeatureFlagCommand(user_command=_get_user_command()) + cmd.list() + + +@feature_flag_app.command("set") +def feature_flag_set( + flag: Annotated[ + str, + typer.Argument( + help="Which flag to set", + ), + ], + enable: Annotated[ + bool, + typer.Option( + "--enable", + "-e", + help="Enable the flag.", + ), + ] = False, + disable: Annotated[ + bool, + typer.Option( + "--disable", + help="Disable the flag.", + ), + ] = False, +) -> None: + """Enable or disable a feature flag.""" + + cmd = FeatureFlagCommand(user_command=_get_user_command()) + if enable and disable: + raise ToolkitValidationError("Cannot enable and disable a flag at the same time.") + if not enable and not disable: + raise ToolkitValidationError("Must specify either --enable or --disable.") + cmd.set(flag, enable) + + +@feature_flag_app.command("reset") +def feature_flag_reset() -> None: + """Reset all feature flags to their default values.""" + + cmd = FeatureFlagCommand(user_command=_get_user_command()) + cmd.reset() + + def _process_include(include: Optional[list[str]], interactive: bool) -> list[str]: if include and (invalid_types := set(include).difference(_AVAILABLE_DATA_TYPES)): raise ToolkitValidationError( @@ -870,5 +949,9 @@ def _select_data_types(include: Sequence[str]) -> list[str]: raise ToolkitInvalidSettingsError(f"Invalid selection: {answer}") +def _get_user_command() -> str: + return f"cdf-tk {' '.join(sys.argv[1:])}" + + if __name__ == "__main__": app() diff --git a/cognite_toolkit/_cdf_tk/_migration.yaml b/cognite_toolkit/_cdf_tk/_migration.yaml index eeb4c3568..37f398d7f 100644 --- a/cognite_toolkit/_cdf_tk/_migration.yaml +++ b/cognite_toolkit/_cdf_tk/_migration.yaml @@ -1,8 +1,13 @@ -- version: 0.2.0 +- version: 0.2.1 cognite_modules: {} resources: {} tool: {} cognite_modules_hash: "" +- version: 0.2.0 + cognite_modules: {} + resources: {} + tool: {} + cognite_modules_hash: "1666e2feb3acb6ef523432265ee6f3e907d95e6918f7c28dd77ce93ddbf0bfeb" - version: 0.2.0b4 cognite_modules: {} resources: {} diff --git a/cognite_toolkit/_cdf_tk/commands/__init__.py b/cognite_toolkit/_cdf_tk/commands/__init__.py index 87876b206..5ec5f6679 100644 --- a/cognite_toolkit/_cdf_tk/commands/__init__.py +++ b/cognite_toolkit/_cdf_tk/commands/__init__.py @@ -4,6 +4,7 @@ from .deploy import DeployCommand from .describe import DescribeCommand from .dump import DumpCommand +from .featureflag import FeatureFlagCommand from .pull import PullCommand from .run import RunFunctionCommand, RunTransformationCommand @@ -14,6 +15,7 @@ "DeployCommand", "DescribeCommand", "DumpCommand", + "FeatureFlagCommand", "PullCommand", "RunFunctionCommand", "RunTransformationCommand", diff --git a/cognite_toolkit/_cdf_tk/commands/_base.py b/cognite_toolkit/_cdf_tk/commands/_base.py index 3c9ea7ba7..36f2cc9c0 100644 --- a/cognite_toolkit/_cdf_tk/commands/_base.py +++ b/cognite_toolkit/_cdf_tk/commands/_base.py @@ -1,8 +1,10 @@ from __future__ import annotations +import getpass from pathlib import Path from cognite.client.data_classes._base import T_CogniteResourceList, T_WritableCogniteResource, T_WriteClass +from mixpanel import Mixpanel from rich import print from cognite_toolkit._cdf_tk.exceptions import ToolkitRequiredValueError, ToolkitYAMLFormatError @@ -19,12 +21,22 @@ CDFToolConfig, ) +_COGNITE_TOOLKIT_MIXPANEL_TOKEN: str | None = None + class ToolkitCommand: - def __init__(self, print_warning: bool = True, user_command: str | None = None): + def __init__(self, print_warning: bool = True, user_command: str | None = None, skip_tracking: bool = False): self.print_warning = print_warning self.user_command = user_command self.warning_list = WarningList[ToolkitWarning]() + if not skip_tracking and _COGNITE_TOOLKIT_MIXPANEL_TOKEN is not None: + self._track_command(user_command) + + def _track_command(self, user_command: str | None) -> None: + mp = Mixpanel(_COGNITE_TOOLKIT_MIXPANEL_TOKEN) + distinct_id = getpass.getuser().replace(" ", "_") + cmd = type(self).__name__.removesuffix("Command") + mp.track(distinct_id, f"command_{cmd}", {"user_input": user_command or ""}) def warn(self, warning: ToolkitWarning) -> None: self.warning_list.append(warning) diff --git a/cognite_toolkit/_cdf_tk/commands/auth.py b/cognite_toolkit/_cdf_tk/commands/auth.py index a4c8234ad..0fa74415d 100644 --- a/cognite_toolkit/_cdf_tk/commands/auth.py +++ b/cognite_toolkit/_cdf_tk/commands/auth.py @@ -13,25 +13,32 @@ # limitations under the License. from __future__ import annotations -import contextlib from importlib import resources from pathlib import Path from time import sleep from typing import cast -import typer from cognite.client import CogniteClient from cognite.client.data_classes.capabilities import ( + FunctionsAcl, UserProfilesAcl, ) -from cognite.client.data_classes.iam import Group +from cognite.client.data_classes.iam import Group, GroupList, TokenInspection +from cognite.client.exceptions import CogniteAPIError from rich import print from rich.markup import escape from rich.prompt import Confirm, Prompt from rich.table import Table from cognite_toolkit._cdf_tk.constants import COGNITE_MODULES -from cognite_toolkit._cdf_tk.exceptions import ToolkitInvalidSettingsError, ToolkitValidationError +from cognite_toolkit._cdf_tk.exceptions import ( + AuthorizationError, + ResourceCreationError, + ResourceDeleteError, + ResourceRetrievalError, + ToolkitFileNotFoundError, + ToolkitInvalidSettingsError, +) from cognite_toolkit._cdf_tk.tk_warnings import ( HighSeverityWarning, LowSeverityWarning, @@ -46,20 +53,17 @@ class AuthCommand(ToolkitCommand): def execute( self, - ctx: typer.Context, + ToolGlobals: CDFToolConfig, dry_run: bool, interactive: bool, group_file: str | None, update_group: int, create_group: str | None, + verbose: bool, ) -> None: # TODO: Check if groupsAcl.UPDATE does nothing? if create_group is not None and update_group != 0: raise ToolkitInvalidSettingsError("--create-group and --update-group are mutually exclusive.") - with contextlib.redirect_stdout(None): - # Remove the Error message from failing to load the config - # This is verified in check_auth - ToolGlobals = CDFToolConfig.from_context(ctx) if group_file is None: template_dir = cast(Path, resources.files("cognite_toolkit")) @@ -75,10 +79,8 @@ def execute( create_group=create_group, interactive=interactive, dry_run=dry_run, - verbose=ctx.obj.verbose, + verbose=verbose, ) - if ToolGlobals.failed: - raise ToolkitValidationError("Failure to verify access rights.") def check_auth( self, @@ -89,7 +91,48 @@ def check_auth( interactive: bool = False, dry_run: bool = False, verbose: bool = False, - ) -> CogniteClient | None: + ) -> None: + auth_vars = self.initialize_client(ToolGlobals, interactive, verbose) + if auth_vars.project is None: + raise AuthorizationError("CDF_PROJECT is not set.") + cdf_project = auth_vars.project + token_inspection = self.check_has_any_access(ToolGlobals) + + self.check_has_project_access(token_inspection, cdf_project) + + print(f"[italic]Focusing on current project {cdf_project} only from here on.[/]") + + self.check_has_group_access(ToolGlobals) + + self.check_identity_provider(ToolGlobals, cdf_project) + + try: + groups = ToolGlobals.client.iam.groups.list() + except CogniteAPIError as e: + raise AuthorizationError(f"Unable to retrieve CDF groups.\n{e}") + + read_write, matched_group_id = self.check_group_membership(groups, group_file, update_group) + + self.check_has_toolkit_required_capabilities( + ToolGlobals.client, token_inspection, read_write, cdf_project, group_file.name + ) + print("---------------------") + self.check_capabilities_against_groups(ToolGlobals, token_inspection, auth_vars, groups, update_group) + + self.update_group( + ToolGlobals, + groups, + group_file, + read_write, + matched_group_id, + update_group, + create_group, + interactive, + dry_run, + ) + self.check_function_service_status(ToolGlobals, token_inspection, cdf_project, dry_run) + + def initialize_client(self, ToolGlobals: CDFToolConfig, interactive: bool, verbose: bool) -> AuthVariables: print("[bold]Checking current service principal/application and environment configurations...[/]") auth_vars = AuthVariables.from_env() if interactive: @@ -98,63 +141,63 @@ def check_auth( result = auth_vars.validate(verbose) if result.messages: print("\n".join(result.messages)) - if result.status == "error": - ToolGlobals.failed = True - return None print(" [bold green]OK[/]") - if not ToolGlobals.initialize_from_auth_variables(auth_vars): - ToolGlobals.failed = True - return None + ToolGlobals.initialize_from_auth_variables(auth_vars) + return auth_vars + + def check_has_any_access(self, ToolGlobals: CDFToolConfig) -> TokenInspection: print("Checking basic project configuration...") try: # Using the token/inspect endpoint to check if the client has access to the project. # The response also includes access rights, which can be used to check if the client has the # correct access for what you want to do. - resp = ToolGlobals.client.iam.token.inspect() - if resp is None or len(resp.capabilities) == 0: - print( - " [bold red]ERROR[/]: Valid authentication token, but it does not give any access rights. Check credentials (CDF_CLIENT_ID/CDF_CLIENT_SECRET or CDF_TOKEN)." + token_inspection = ToolGlobals.client.iam.token.inspect() + if token_inspection is None or len(token_inspection.capabilities) == 0: + raise AuthorizationError( + "Valid authentication token, but it does not give any access rights." + " Check credentials (CDF_CLIENT_ID/CDF_CLIENT_SECRET or CDF_TOKEN)." ) - ToolGlobals.failed = True - return None print(" [bold green]OK[/]") except Exception: - print( - " [bold red]ERROR[/]: Not a valid authentication token. Check credentials (CDF_CLIENT_ID/CDF_CLIENT_SECRET or CDF_TOKEN)." + raise AuthorizationError( + "Not a valid authentication token. Check credentials (CDF_CLIENT_ID/CDF_CLIENT_SECRET or CDF_TOKEN)." ) - ToolGlobals.failed = True - return None - try: - print("Checking projects that the service principal/application has access to...") - if len(resp.projects) == 0: - print( - " [bold red]ERROR[/]: The service principal/application configured for this client does not have access to any projects." - ) - ToolGlobals.failed = True - return None - projects = "" - projects = projects.join(f" - {p.url_name}\n" for p in resp.projects) - print(projects[0:-1]) - except Exception as e: - print(f" [bold red]ERROR[/]: Failed to process project information from inspect()\n{e}") - ToolGlobals.failed = True - return None - print(f"[italic]Focusing on current project {auth_vars.project} only from here on.[/]") + return token_inspection + + def check_has_project_access(self, token_inspection: TokenInspection, cdf_project: str) -> None: + print("Checking projects that the service principal/application has access to...") + if len(token_inspection.projects) == 0: + raise AuthorizationError( + "The service principal/application configured for this client does not have access to any projects." + ) + print("\n".join(f" - {p.url_name}" for p in token_inspection.projects)) + if cdf_project not in {p.url_name for p in token_inspection.projects}: + raise AuthorizationError( + f"The service principal/application configured for this client does not have access to the CDF_PROJECT={cdf_project!r}." + ) + + def check_has_group_access(self, ToolGlobals: CDFToolConfig) -> None: + # Todo rewrite to use the token inspection instead. print( - "Checking basic project and group manipulation access rights (projectsAcl: LIST, READ and groupsAcl: LIST, READ, CREATE, UPDATE, DELETE)..." + "Checking basic project and group manipulation access rights " + "(projectsAcl: LIST, READ and groupsAcl: LIST, READ, CREATE, UPDATE, DELETE)..." ) try: ToolGlobals.verify_client( capabilities={ - "projectsAcl": ["LIST", "READ"], - "groupsAcl": ["LIST", "READ"], + "projectsAcl": [ + "LIST", + "READ", + ], + "groupsAcl": ["LIST", "READ", "CREATE", "UPDATE", "DELETE"], } ) print(" [bold green]OK[/]") except Exception: self.warn( HighSeverityWarning( - "The service principal/application configured for this client does not have the basic group write access rights." + "The service principal/application configured for this client " + "does not have the basic group write access rights." ) ) print("Checking basic group read access rights (projectsAcl: LIST, READ and groupsAcl: LIST, READ)...") @@ -167,13 +210,14 @@ def check_auth( ) print(" [bold green]OK[/] - can continue with checks.") except Exception: - print( - " [bold red]ERROR[/]: Unable to continue, the service principal/application configured for this client does not have the basic read group access rights." + raise AuthorizationError( + "Unable to continue, the service principal/application configured for this client does not" + " have the basic read group access rights." ) - ToolGlobals.failed = True - return None - project_info = ToolGlobals.client.get(f"/api/v1/projects/{auth_vars.project}").json() + + def check_identity_provider(self, ToolGlobals: CDFToolConfig, cdf_project: str) -> None: print("Checking identity provider settings...") + project_info = ToolGlobals.client.get(f"/api/v1/projects/{cdf_project}").json() oidc = project_info.get("oidcConfiguration", {}) if "https://login.windows.net" in oidc.get("tokenUrl"): tenant_id = oidc.get("tokenUrl").split("/")[-3] @@ -183,21 +227,17 @@ def check_auth( print(f" [bold green]OK[/] - Auth0 with tenant id ({tenant_id}).") else: self.warn(MediumSeverityWarning(f"Unknown identity provider {oidc.get('tokenUrl')}")) - accessClaims = [c.get("claimName") for c in oidc.get("accessClaims", {})] + access_claims = [c.get("claimName") for c in oidc.get("accessClaims", {})] print( - f" Matching on CDF group sourceIds will be done on any of these claims from the identity provider: {accessClaims}" + f" Matching on CDF group sourceIds will be done on any of these claims from the identity provider: {access_claims}" ) + + def check_group_membership(self, groups: GroupList, group_file: Path, update_group: int) -> tuple[Group, int]: print("Checking CDF group memberships for the current client configured...") - try: - groups = ToolGlobals.client.iam.groups.list().data - except Exception: - print(" [bold red]ERROR[/]: Unable to retrieve CDF groups.") - ToolGlobals.failed = True - return None if group_file.exists(): file_text = group_file.read_text() else: - raise FileNotFoundError(f"Group config file does not exist: {group_file.as_posix()}") + raise ToolkitFileNotFoundError(f"Group config file does not exist: {group_file.as_posix()}") read_write = Group.load(file_text) tbl = Table(title="CDF Group ids, Names, and Source Ids") tbl.add_column("Id", justify="left") @@ -227,12 +267,11 @@ def check_auth( " This is not recommended. The group matching the group config file is marked in bold above if it is present." ) if update_group == 1: - print( - " [bold red]ERROR[/]: You have specified --update-group=1.\n" - + " With multiple groups available, you must use the --update_group= option to specify which group to update." + raise AuthorizationError( + "You have specified --update-group=1.\n" + " With multiple groups available, you must use the --update_group= " + "option to specify which group to update." ) - ToolGlobals.failed = True - return None else: print(" [bold green]OK[/] - Only one group is used for this service principal/application.") print("---------------------") @@ -248,12 +287,22 @@ def check_auth( print( " This group's id should be configured as the [italic]readwrite_source_id[/] for the common/cdf_auth_readwrite_all module." ) - print(f"\nChecking CDF groups access right against capabilities in {group_file.name} ...") + return read_write, matched_group_id + + def check_has_toolkit_required_capabilities( + self, + client: CogniteClient, + token_inspection: TokenInspection, + read_write: Group, + cdf_project: str, + group_file_name: str, + ) -> None: + print(f"\nChecking CDF groups access right against capabilities in {group_file_name} ...") - diff = ToolGlobals.client.iam.compare_capabilities( - resp.capabilities, + diff = client.iam.compare_capabilities( + token_inspection.capabilities, read_write.capabilities or [], - project=auth_vars.project, + project=cdf_project, ) if len(diff) > 0: diff_list: list[str] = [] @@ -263,14 +312,22 @@ def check_auth( self.warn(MissingCapabilityWarning(str(s))) else: print(" [bold green]OK[/] - All capabilities are present in the CDF project.") + + def check_capabilities_against_groups( + self, + ToolGlobals: CDFToolConfig, + token_inspection: TokenInspection, + auth_vars: AuthVariables, + groups: GroupList, + update_group: int, + ) -> None: # Flatten out into a list of acls in the existing project - existing_cap_list = [c.capability for c in resp.capabilities] - print("---------------------") + existing_cap_list = [c.capability for c in token_inspection.capabilities] if len(groups) > 1 and update_group > 1: print(f"Checking group config file against capabilities only from the group {update_group}...") for g in groups: if g.id == update_group: - existing_cap_list = g.capabilities + existing_cap_list = g.capabilities or [] break else: if len(groups) > 1: @@ -280,7 +337,7 @@ def check_auth( loosing = ToolGlobals.client.iam.compare_capabilities( existing_cap_list, - resp.capabilities, + token_inspection.capabilities, project=auth_vars.project, ) loosing = [l for l in loosing if type(l) is not UserProfilesAcl] # noqa: E741 @@ -305,6 +362,19 @@ def check_auth( " [bold green]OK[/] - All capabilities from the CDF project are also present in the group config file." ) print("---------------------") + + def update_group( + self, + ToolGlobals: CDFToolConfig, + groups: GroupList, + group_file: Path, + read_write: Group, + matched_group_id: int, + update_group: int, + create_group: str | None, + interactive: bool, + dry_run: bool, + ) -> None: if interactive and matched_group_id != 0: push_group = Confirm.ask( f"Do you want to update the group with id {matched_group_id} and name {read_write.name} with the capabilities from {group_file.as_posix()} ?", @@ -333,9 +403,7 @@ def check_auth( group = g break if group is None: - print(f" [bold red]ERROR[/]: Unable to find --group-id={update_group} in CDF.") - ToolGlobals.failed = True - return None + raise ResourceRetrievalError(f"Unable to find --group-id={update_group} in CDF.") read_write.name = group.name read_write.source_id = group.source_id read_write.metadata = group.metadata @@ -357,9 +425,7 @@ def check_auth( f" [bold green]OK[/] - Would have created new group with {len(read_write.capabilities or [])} capabilities." ) except Exception as e: - print(f" [bold red]ERROR[/]: Unable to create new group {read_write.name}.\n{e}") - ToolGlobals.failed = True - return None + raise ResourceCreationError(f"Unable to create new group {read_write.name}.\n{e}") if update_group: try: if not dry_run: @@ -368,24 +434,34 @@ def check_auth( else: print(f" [bold green]OK[/] - Would have deleted old group {update_group}.") except Exception as e: - print(f" [bold red]ERROR[/]: Unable to delete old group {update_group}.\n{e}") - ToolGlobals.failed = True - return None + raise ResourceDeleteError(f"Unable to delete old group {update_group}.\n{e}") + + def check_function_service_status( + self, ToolGlobals: CDFToolConfig, token_inspection: TokenInspection, cdf_project: str, dry_run: bool + ) -> None: print("Checking function service status...") - function_status = ToolGlobals.client.functions.status() - if function_status.status != "activated": - if function_status.status == "requested": - print(" [bold yellow]INFO:[/] Function service activation is in progress (may take up to 2 hours)...") - else: - if not dry_run: + has_function_read_access = not ToolGlobals.client.iam.compare_capabilities( + token_inspection.capabilities, + FunctionsAcl([FunctionsAcl.Action.Read], FunctionsAcl.Scope.All()), + project=cdf_project, + ) + if not has_function_read_access: + self.warn(HighSeverityWarning("Cannot check function service status, missing function read access.")) + return None + try: + function_status = ToolGlobals.client.functions.status() + if function_status.status != "activated": + if function_status.status == "requested": print( - " [bold yellow]INFO:[/] Function service has not been activated, activating now, this may take up to 2 hours..." + " [bold yellow]INFO:[/] Function service activation is in progress (may take up to 2 hours)..." ) - ToolGlobals.client.functions.activate() else: print( " [bold yellow]INFO:[/] Function service has not been activated, would have activated (will take up to 2 hours)..." ) - else: - print(" [bold green]OK[/] - Function service has been activated.") + else: + print(" [bold green]OK[/] - Function service has been activated.") + except CogniteAPIError as e: + self.warn(HighSeverityWarning(f"Unable to check function service status.\n{e}")) + return None diff --git a/cognite_toolkit/_cdf_tk/commands/build.py b/cognite_toolkit/_cdf_tk/commands/build.py index 8d6a7c556..01447765b 100644 --- a/cognite_toolkit/_cdf_tk/commands/build.py +++ b/cognite_toolkit/_cdf_tk/commands/build.py @@ -1,6 +1,7 @@ from __future__ import annotations import datetime +import difflib import io import re import shutil @@ -35,7 +36,6 @@ AmbiguousResourceFileError, ToolkitDuplicatedModuleError, ToolkitFileExistsError, - ToolkitMissingModulesError, ToolkitNotADirectoryError, ToolkitValidationError, ToolkitYAMLFormatError, @@ -61,7 +61,11 @@ UnresolvedVariableWarning, WarningList, ) -from cognite_toolkit._cdf_tk.tk_warnings.fileread import DuplicatedItemWarning, MissingRequiredIdentifierWarning +from cognite_toolkit._cdf_tk.tk_warnings.fileread import ( + DuplicatedItemWarning, + MissingRequiredIdentifierWarning, + UnknownResourceTypeWarning, +) from cognite_toolkit._cdf_tk.utils import ( calculate_str_or_file_hash, iterate_modules, @@ -81,14 +85,9 @@ def execute(self, verbose: bool, source_path: Path, build_dir: Path, build_env_n raise ToolkitNotADirectoryError(str(source_path)) system_config = SystemYAML.load_from_directory(source_path, build_env_name, self.warn, self.user_command) + sources = SystemYAML.validate_module_dir(source_path) config = BuildConfigYAML.load_from_directory(source_path, build_env_name, self.warn) - sources = [module_dir for root_module in ROOT_MODULES if (module_dir := source_path / root_module).exists()] - if not sources: - directories = "\n".join(f" ┣ {name}" for name in ROOT_MODULES[:-1]) - raise ToolkitMissingModulesError( - f"Could not find the source modules directory.\nExpected to find one of the following directories\n" - f"{source_path.name}\n{directories}\n ┗ {ROOT_MODULES[-1]}" - ) + directory_name = "current directory" if source_path == Path(".") else f"project '{source_path!s}'" module_locations = "\n".join(f" - Module directory '{source!s}'" for source in sources) print( @@ -503,7 +502,7 @@ def validate( f"YAML validation error for {destination.name} after substituting config variables: {e}" ) - loader = self._get_loader(resource_folder, destination) + loader = self._get_loader(resource_folder, destination, source_path) if loader is None: return warning_list if not issubclass(loader, ResourceLoader): @@ -523,7 +522,9 @@ def validate( warning_list.append(MissingRequiredIdentifierWarning(source_path, element_no, tuple(), error.args)) if first_seen := state.ids_by_resource_type[loader].get(identifier): - warning_list.append(DuplicatedItemWarning(source_path, identifier, first_seen)) + if loader is not RawDatabaseLoader: + # RAW Database will pick up all Raw Tables, so we don't want to warn about duplicates. + warning_list.append(DuplicatedItemWarning(source_path, identifier, first_seen)) else: state.ids_by_resource_type[loader][identifier] = source_path @@ -555,16 +556,27 @@ def _get_api_spec(self, loader: type[ResourceLoader], destination: Path) -> Para ) return api_spec - def _get_loader(self, resource_folder: str, destination: Path) -> type[Loader] | None: - loaders = LOADER_BY_FOLDER_NAME.get(resource_folder, []) - loaders = [loader for loader in loaders if loader.is_supported_file(destination)] - if len(loaders) == 0: + def _get_loader(self, resource_folder: str, destination: Path, source_path: Path) -> type[Loader] | None: + folder_loaders = LOADER_BY_FOLDER_NAME.get(resource_folder, []) + if not folder_loaders: self.warn( ToolkitNotSupportedWarning( - f"the resource {resource_folder!r}", + f"resource of type {resource_folder!r} in {source_path.name}.", details=f"Available resources are: {', '.join(LOADER_BY_FOLDER_NAME.keys())}", ) ) + return None + + loaders = [loader for loader in folder_loaders if loader.is_supported_file(destination)] + if len(loaders) == 0: + suggestion: str | None = None + if "." in source_path.stem: + core, kind = source_path.stem.rsplit(".", 1) + match = difflib.get_close_matches(kind, [loader.kind for loader in folder_loaders]) + if match: + suggestion = f"{core}.{match[0]}{source_path.suffix}" + self.warn(UnknownResourceTypeWarning(source_path, suggestion)) + return None elif len(loaders) > 1 and all(loader.folder_name == "raw" for loader in loaders): # Multiple raw loaders load from the same file. return RawDatabaseLoader diff --git a/cognite_toolkit/_cdf_tk/commands/clean.py b/cognite_toolkit/_cdf_tk/commands/clean.py index 294825b9f..2666948b6 100644 --- a/cognite_toolkit/_cdf_tk/commands/clean.py +++ b/cognite_toolkit/_cdf_tk/commands/clean.py @@ -4,7 +4,6 @@ from graphlib import TopologicalSorter from pathlib import Path -import typer from cognite.client.data_classes._base import T_CogniteResourceList, T_WritableCogniteResource, T_WriteClass from cognite.client.exceptions import CogniteAPIError, CogniteNotFoundError from cognite.client.utils.useful_types import SequenceNotStr @@ -208,9 +207,14 @@ def _verbose_print_drop( ) def execute( - self, ctx: typer.Context, build_dir_raw: str, build_env_name: str, dry_run: bool, include: list[str] + self, + ToolGlobals: CDFToolConfig, + build_dir_raw: str, + build_env_name: str, + dry_run: bool, + include: list[str], + verbose: bool, ) -> None: - ToolGlobals = CDFToolConfig.from_context(ctx) build_dir = Path(build_dir_raw) if not build_dir.exists(): raise ToolkitNotADirectoryError( @@ -269,7 +273,7 @@ def execute( drop=True, dry_run=dry_run, drop_data=True, - verbose=ctx.obj.verbose, + verbose=verbose, ) if result: results[result.name] = result diff --git a/cognite_toolkit/_cdf_tk/commands/deploy.py b/cognite_toolkit/_cdf_tk/commands/deploy.py index 18abaea46..3ebdb6aea 100644 --- a/cognite_toolkit/_cdf_tk/commands/deploy.py +++ b/cognite_toolkit/_cdf_tk/commands/deploy.py @@ -5,7 +5,6 @@ from graphlib import TopologicalSorter from pathlib import Path -import typer from cognite.client.data_classes._base import T_CogniteResourceList from cognite.client.exceptions import CogniteAPIError, CogniteDuplicatedError from rich import print @@ -53,22 +52,22 @@ class DeployCommand(ToolkitCommand): - def __init__(self, print_warning: bool = True): - super().__init__(print_warning) - self._clean_command = CleanCommand(print_warning) + def __init__(self, print_warning: bool = True, user_command: str | None = None): + super().__init__(print_warning, user_command) + self._clean_command = CleanCommand(print_warning, skip_tracking=True) def execute( self, - ctx: typer.Context, + ToolGlobals: CDFToolConfig, build_dir_raw: str, build_env_name: str, dry_run: bool, drop: bool, drop_data: bool, include: list[str], + verbose: bool, ) -> None: # Override cluster and project from the options/env variables - ToolGlobals = CDFToolConfig.from_context(ctx) build_dir: Path = Path(build_dir_raw) if not build_dir.exists(): raise ToolkitNotADirectoryError( @@ -137,7 +136,7 @@ def execute( drop=drop, dry_run=dry_run, drop_data=drop_data, - verbose=ctx.obj.verbose, + verbose=verbose, ) if result: results[result.name] = result @@ -153,11 +152,11 @@ def execute( dry_run=dry_run, has_done_drop=drop, has_dropped_data=drop_data, - verbose=ctx.obj.verbose, + verbose=verbose, ) if result: results[result.name] = result - if ctx.obj.verbose: + if verbose: print("") # Extra newline if results.has_counts: @@ -307,12 +306,23 @@ def to_create_changed_unchanged_triple( for item in resources: cdf_resource = cdf_resource_by_id.get(loader.get_id(item)) - if cdf_resource and loader.are_equal(item, cdf_resource): + try: + are_equal = cdf_resource and loader.are_equal(item, cdf_resource) + except CogniteAPIError as e: + self.warn( + MediumSeverityWarning( + f"Failed to compare {loader.display_name} {loader.get_id(item)} for equality. Proceeding assuming not data in CDF. Error {e}." + ) + ) + print(Panel(traceback.format_exc())) + are_equal = False + if are_equal: unchanged.append(item) elif cdf_resource: to_update.append(item) else: to_create.append(item) + return to_create, to_update, unchanged def _verbose_print( diff --git a/cognite_toolkit/_cdf_tk/commands/featureflag.py b/cognite_toolkit/_cdf_tk/commands/featureflag.py new file mode 100644 index 000000000..6c8f14673 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/commands/featureflag.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +import tempfile +from enum import Enum +from functools import lru_cache +from pathlib import Path +from typing import Any, ClassVar + +import yaml +from rich import print +from rich.table import Table + +from cognite_toolkit._cdf_tk.commands._base import ToolkitCommand +from cognite_toolkit._cdf_tk.exceptions import ToolkitRequiredValueError + + +class Flags(Enum): + MODULES_CMD: ClassVar[dict[str, Any]] = {"visible": True, "description": "Enables the modules management subapp"} + INTERNAL: ClassVar[dict[str, Any]] = {"visible": False, "description": "Does nothing"} + + +class FeatureFlag: + @staticmethod + def _get_file() -> Path: + f = Path(tempfile.gettempdir()) / "tk-ff.bin" + if not f.exists(): + f.write_text("{}") + return f + + @staticmethod + def load_user_settings() -> dict[str, bool]: + return yaml.safe_load(FeatureFlag._get_file().read_text()) + + @staticmethod + def save_user_settings(flag: Flags, enabled: bool) -> None: + settings = FeatureFlag.load_user_settings() + settings.update({flag.name: enabled}) + FeatureFlag._get_file().write_text(yaml.dump(settings)) + FeatureFlag.is_enabled.cache_clear() + + @staticmethod + def reset_user_settings() -> None: + FeatureFlag._get_file().unlink() + FeatureFlag.is_enabled.cache_clear() + + @staticmethod + @lru_cache(typed=True) + def is_enabled(flag: str | Flags) -> bool: + if isinstance(flag, str): + fflag = FeatureFlag.to_flag(flag) + else: + fflag = flag + + if not fflag: + return False + + user_settings = FeatureFlag.load_user_settings() + return user_settings.get(fflag.name, False) + + @staticmethod + @lru_cache + def to_flag(flag: str) -> Flags | None: + try: + return Flags[flag.upper()] + except KeyError: + return None + + +class FeatureFlagCommand(ToolkitCommand): + def list(self) -> None: + user_settings = FeatureFlag.load_user_settings() + table = Table(title="feature flags") + table.add_column("Name", justify="left") + table.add_column("Description", justify="left") + table.add_column("Status", justify="left") + + for flag in Flags: + is_enabled = user_settings.get(flag.name, False) + if is_enabled or flag.value.get("visible", False): + table.add_row( + flag.name, + str(flag.value.get("description", "")), + "enabled" if is_enabled else "disabled", + style="yellow" if is_enabled else "", + ) + print(table) + + def set(self, flag: str, enabled: bool) -> None: + fflag = FeatureFlag.to_flag(flag) + if not fflag: + raise ToolkitRequiredValueError( + f"Unknown flag: [bold]{flag}[/]. Use the [bold]list[/] command to see available flags" + ) + FeatureFlag.save_user_settings(fflag, enabled) + print(f"Feature flag [bold yellow]{flag}[/] has been [bold yellow]{'enabled' if enabled else 'disabled'}[/]") + + def reset(self) -> None: + FeatureFlag.reset_user_settings() + print("Feature flags have been reset") diff --git a/cognite_toolkit/_cdf_tk/commands/pull.py b/cognite_toolkit/_cdf_tk/commands/pull.py index 82c289521..8354b45b3 100644 --- a/cognite_toolkit/_cdf_tk/commands/pull.py +++ b/cognite_toolkit/_cdf_tk/commands/pull.py @@ -392,6 +392,8 @@ def execute( system_config = SystemYAML.load_from_directory(source_path, env) config = BuildConfigYAML.load_from_directory(source_path, env) config.set_environment_variables() + # Todo Remove once the new modules in `_cdf_tk/prototypes/_packages` are finished. + config.variables.pop("_cdf_tk", None) config.environment.selected = config.available_modules print( Panel.fit( diff --git a/cognite_toolkit/_cdf_tk/constants.py b/cognite_toolkit/_cdf_tk/constants.py index ede0be70e..6e5f72332 100644 --- a/cognite_toolkit/_cdf_tk/constants.py +++ b/cognite_toolkit/_cdf_tk/constants.py @@ -36,3 +36,5 @@ ROOT_PATH = Path(__file__).parent.parent COGNITE_MODULES_PATH = ROOT_PATH / COGNITE_MODULES + +SUPPORT_MODULE_UPGRADE_FROM_VERSION = "0.1.0" diff --git a/cognite_toolkit/_cdf_tk/data_classes/_config_yaml.py b/cognite_toolkit/_cdf_tk/data_classes/_config_yaml.py index 3666351d9..0f36c15d8 100644 --- a/cognite_toolkit/_cdf_tk/data_classes/_config_yaml.py +++ b/cognite_toolkit/_cdf_tk/data_classes/_config_yaml.py @@ -5,7 +5,7 @@ import re from abc import ABC from collections import UserDict, defaultdict -from collections.abc import Sequence +from collections.abc import Iterable, Sequence from dataclasses import dataclass, field from pathlib import Path from typing import Any, Literal @@ -18,6 +18,7 @@ BUILD_ENVIRONMENT_FILE, DEFAULT_CONFIG_FILE, MODULE_PATH_SEP, + ROOT_MODULES, SEARCH_VARIABLES_SUFFIX, ) from cognite_toolkit._cdf_tk.exceptions import ToolkitEnvError, ToolkitMissingModuleError @@ -341,7 +342,35 @@ def __init__(self, environment: Environment, entries: dict[tuple[str, ...], Conf def as_build_config(self) -> BuildConfigYAML: return BuildConfigYAML(environment=self.environment, variables=self.dump()[self._variables], filepath=Path("")) + def load_selected_defaults(self, cognite_root_module: Path) -> InitConfigYAML: + if not self.environment.selected or len(self.environment.selected) == 0: + return self.load_defaults(cognite_root_module) + + relevant_defaults: list[Path] = [] + for selected in self.environment.selected: + relevant_defaults.extend(cognite_root_module.glob(f"**/{selected}/**/{DEFAULT_CONFIG_FILE}")) + + return self._load_defaults(cognite_root_module, relevant_defaults) + def load_defaults(self, cognite_root_module: Path) -> InitConfigYAML: + """Loads all default.config.yaml files in the cognite root module.""" + + default_files_iterable: Iterable[Path] + if cognite_root_module.name in ROOT_MODULES: + default_files_iterable = cognite_root_module.glob(f"**/{DEFAULT_CONFIG_FILE}") + else: + default_files_iterable = itertools.chain( + *[ + (cognite_root_module / root_module).glob(f"**/{DEFAULT_CONFIG_FILE}") + for root_module in ROOT_MODULES + if (cognite_root_module / root_module).exists() + ] + ) + + default_files = sorted(default_files_iterable, key=lambda f: f.relative_to(cognite_root_module)) + return self._load_defaults(cognite_root_module, default_files) + + def _load_defaults(self, cognite_root_module: Path, defaults_files: list[Path]) -> InitConfigYAML: """Loads all default.config.yaml files in the cognite root module. This extracts the default values from the default.config.yaml files and @@ -353,10 +382,8 @@ def load_defaults(self, cognite_root_module: Path) -> InitConfigYAML: Returns: self """ - defaults = sorted( - cognite_root_module.glob(f"**/{DEFAULT_CONFIG_FILE}"), key=lambda f: f.relative_to(cognite_root_module) - ) - for default_config in defaults: + + for default_config in defaults_files: parts = default_config.parent.relative_to(cognite_root_module).parts raw_file = default_config.read_text() file_comments = self._extract_comments(raw_file, key_prefix=tuple(parts)) diff --git a/cognite_toolkit/_cdf_tk/data_classes/_system_yaml.py b/cognite_toolkit/_cdf_tk/data_classes/_system_yaml.py index 789223056..7e90c3abb 100644 --- a/cognite_toolkit/_cdf_tk/data_classes/_system_yaml.py +++ b/cognite_toolkit/_cdf_tk/data_classes/_system_yaml.py @@ -4,11 +4,13 @@ from pathlib import Path from typing import Any, ClassVar +from packaging.version import Version +from packaging.version import parse as parse_version from rich import print -from cognite_toolkit._cdf_tk.constants import MODULE_PATH_SEP +from cognite_toolkit._cdf_tk.constants import MODULE_PATH_SEP, ROOT_MODULES from cognite_toolkit._cdf_tk.data_classes._base import ConfigCore, _load_version_variable -from cognite_toolkit._cdf_tk.exceptions import ToolkitMissingModuleError +from cognite_toolkit._cdf_tk.exceptions import ToolkitMissingModuleError, ToolkitMissingModulesError @dataclass @@ -17,6 +19,10 @@ class SystemYAML(ConfigCore): cdf_toolkit_version: str packages: dict[str, list[str | tuple[str, ...]]] = field(default_factory=dict) + @property + def module_version(self) -> Version: + return parse_version(self.cdf_toolkit_version) + @classmethod def _file_name(cls, build_env_name: str) -> str: return cls.file_name @@ -60,3 +66,14 @@ def validate_modules( f"Package {package} defined in {self.filepath.name!s} is referring " f"the following missing modules {missing}." ) + + @staticmethod + def validate_module_dir(source_path: Path) -> list[Path]: + sources = [module_dir for root_module in ROOT_MODULES if (module_dir := source_path / root_module).exists()] + if not sources: + directories = "\n".join(f" ┣ {name}" for name in ROOT_MODULES[:-1]) + raise ToolkitMissingModulesError( + f"Could not find the source modules directory.\nExpected to find one of the following directories\n" + f"{source_path.name}\n{directories}\n ┗ {ROOT_MODULES[-1]}" + ) + return sources diff --git a/cognite_toolkit/_cdf_tk/exceptions.py b/cognite_toolkit/_cdf_tk/exceptions.py index 71b2ac12d..42e340d1c 100644 --- a/cognite_toolkit/_cdf_tk/exceptions.py +++ b/cognite_toolkit/_cdf_tk/exceptions.py @@ -141,13 +141,27 @@ class UploadFileError(ToolkitError): pass +class ResourceRetrievalError(ToolkitError): ... + + class ResourceCreationError(ToolkitError): pass +class ResourceDeleteError(ToolkitError): ... + + class ResourceUpdateError(ToolkitError): pass class AmbiguousResourceFileError(ToolkitError): pass + + +class AuthenticationError(ToolkitError): + pass + + +class AuthorizationError(ToolkitError): + pass diff --git a/cognite_toolkit/_cdf_tk/loaders/_base_loaders.py b/cognite_toolkit/_cdf_tk/loaders/_base_loaders.py index aa8c26c60..0d2763be4 100644 --- a/cognite_toolkit/_cdf_tk/loaders/_base_loaders.py +++ b/cognite_toolkit/_cdf_tk/loaders/_base_loaders.py @@ -175,6 +175,10 @@ def update(self, items: T_CogniteResourceList) -> Sized: def delete(self, ids: SequenceNotStr[T_ID]) -> int: raise NotImplementedError + @abstractmethod + def iterate(self) -> Iterable[T_WritableCogniteResource]: + raise NotImplementedError + # The methods below have default implementations that can be overwritten in subclasses @classmethod @lru_cache(maxsize=1) diff --git a/cognite_toolkit/_cdf_tk/loaders/_resource_loaders.py b/cognite_toolkit/_cdf_tk/loaders/_resource_loaders.py index 2bbbd1081..5e492bcc4 100644 --- a/cognite_toolkit/_cdf_tk/loaders/_resource_loaders.py +++ b/cognite_toolkit/_cdf_tk/loaders/_resource_loaders.py @@ -416,6 +416,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: self.client.iam.groups.delete(found) return len(found) + def iterate(self) -> Iterable[Group]: + return self.client.iam.groups.list(all=True) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -530,6 +533,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: self.client.iam.security_categories.delete([item.id for item in retrieved if item.id]) return len(retrieved) + def iterate(self) -> Iterable[SecurityCategory]: + return self.client.iam.security_categories.list(limit=-1) + @final class DataSetsLoader(ResourceLoader[str, DataSetWrite, DataSet, DataSetWriteList, DataSetList]): @@ -619,6 +625,9 @@ def update(self, items: DataSetWriteList) -> DataSetList: def delete(self, ids: SequenceNotStr[str]) -> int: raise NotImplementedError("CDF does not support deleting data sets.") + def iterate(self) -> Iterable[DataSet]: + return iter(self.client.data_sets) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -694,6 +703,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: # All deleted successfully return len(ids) + def iterate(self) -> Iterable[LabelDefinition]: + return iter(self.client.labels) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -924,6 +936,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: self.client.functions.delete(external_id=cast(SequenceNotStr[str], ids)) return len(ids) + def iterate(self) -> Iterable[Function]: + return iter(self.client.functions) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -1055,6 +1070,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: count += 1 return count + def iterate(self) -> Iterable[FunctionSchedule]: + return iter(self.client.functions.schedules) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -1152,6 +1170,9 @@ def delete(self, ids: SequenceNotStr[RawDatabaseTable]) -> int: raise e return len(db_names) + def iterate(self) -> Iterable[RawDatabaseTable]: + return (RawDatabaseTable(db_name=cast(str, db.name)) for db in self.client.raw.databases) + def count(self, ids: SequenceNotStr[RawDatabaseTable]) -> int: nr_of_tables = 0 for db_name, raw_tables in itertools.groupby(sorted(ids), key=lambda x: x.db_name): @@ -1292,6 +1313,13 @@ def delete(self, ids: SequenceNotStr[RawDatabaseTable]) -> int: count += len(tables) return count + def iterate(self) -> Iterable[RawDatabaseTable]: + return ( + RawDatabaseTable(db_name=cast(str, db.name), table_name=cast(str, table.name)) + for db in self.client.raw.databases + for table in self.client.raw.tables(cast(str, db.name)) + ) + def count(self, ids: SequenceNotStr[RawDatabaseTable]) -> int: if not self._printed_warning: print(" [bold green]INFO:[/] Raw rows do not support count (there is no aggregation method).") @@ -1391,6 +1419,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: self.client.time_series.delete(external_id=existing, ignore_unknown_ids=True) return len(existing) + def iterate(self) -> Iterable[TimeSeries]: + return iter(self.client.time_series) + def count(self, ids: str | dict[str, Any] | SequenceNotStr[str | dict[str, Any]] | None) -> int: datapoints = cast( DatapointsList, @@ -1546,6 +1577,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: # All deleted successfully return len(ids) + def iterate(self) -> Iterable[DatapointSubscription]: + return iter(self.client.time_series.subscriptions) + def are_equal(self, local: DataPointSubscriptionWrite, cdf_resource: DatapointSubscription) -> bool: local_dumped = local.dump() cdf_dumped = cdf_resource.as_write().dump() @@ -1747,6 +1781,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: self.client.transformations.delete(external_id=existing, ignore_unknown_ids=True) return len(existing) + def iterate(self) -> Iterable[Transformation]: + return iter(self.client.transformations) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -1863,6 +1900,9 @@ def delete(self, ids: str | SequenceNotStr[str] | None) -> int: except CogniteNotFoundError as e: return len(cast(SequenceNotStr[str], ids)) - len(e.not_found) + def iterate(self) -> Iterable[TransformationSchedule]: + return iter(self.client.transformations.schedules) + @final class TransformationNotificationLoader( @@ -1963,6 +2003,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: self.client.transformations.notifications.delete([item.id for item in existing]) # type: ignore[misc] return len(existing) + def iterate(self) -> Iterable[TransformationNotification]: + return iter(self.client.transformations.notifications) + @classmethod def get_dependent_items(cls, item: dict) -> Iterable[tuple[type[ResourceLoader], Hashable]]: """Returns all items that this item requires. @@ -2109,6 +2152,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: return 0 return len(id_list) + def iterate(self) -> Iterable[ExtractionPipeline]: + return iter(self.client.extraction_pipelines) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -2239,6 +2285,12 @@ def delete(self, ids: SequenceNotStr[str]) -> int: count += 1 return count + def iterate(self) -> Iterable[ExtractionPipelineConfig]: + return ( + self.client.extraction_pipelines.config.retrieve(external_id=cast(str, pipeline.external_id)) + for pipeline in self.client.extraction_pipelines + ) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -2392,6 +2444,9 @@ def delete(self, ids: str | SequenceNotStr[str] | None) -> int: self.client.files.delete(external_id=cast(SequenceNotStr[str], ids)) return len(cast(SequenceNotStr[str], ids)) + def iterate(self) -> Iterable[FileMetadata]: + return iter(self.client.files) + def count(self, ids: SequenceNotStr[str]) -> int: return sum( 1 @@ -2502,6 +2557,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: deleted = self.client.data_modeling.spaces.delete(to_delete) return len(deleted) + def iterate(self) -> Iterable[Space]: + return iter(self.client.data_modeling.spaces) + def count(self, ids: SequenceNotStr[str]) -> int: # Bug in spec of aggregate requiring view_id to be passed in, so we cannot use it. # When this bug is fixed, it will be much faster to use aggregate. @@ -2634,6 +2692,9 @@ def delete(self, ids: SequenceNotStr[ContainerId]) -> int: deleted = self.client.data_modeling.containers.delete(cast(Sequence, ids)) return len(deleted) + def iterate(self) -> Iterable[Container]: + return iter(self.client.data_modeling.containers) + def count(self, ids: SequenceNotStr[ContainerId]) -> int: # Bug in spec of aggregate requiring view_id to be passed in, so we cannot use it. # When this bug is fixed, it will be much faster to use aggregate. @@ -2857,6 +2918,9 @@ def delete(self, ids: SequenceNotStr[ViewId]) -> int: print(f" [bold yellow]WARNING:[/] Could not delete views {to_delete} after {attempt_count} attempts.") return nr_of_deleted + def iterate(self) -> Iterable[View]: + return iter(self.client.data_modeling.views) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -3021,6 +3085,9 @@ def update(self, items: DataModelApplyList) -> DataModelList: def delete(self, ids: SequenceNotStr[DataModelId]) -> int: return len(self.client.data_modeling.data_models.delete(cast(Sequence, ids))) + def iterate(self) -> Iterable[DataModel]: + return iter(self.client.data_modeling.data_models) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: @@ -3163,6 +3230,9 @@ def delete(self, ids: SequenceNotStr[NodeId]) -> int: raise e return len(deleted.nodes) + def iterate(self) -> Iterable[Node]: + return iter(self.client.data_modeling.instances) + def count(self, ids: SequenceNotStr[NodeId]) -> int: return len(ids) @@ -3258,6 +3328,9 @@ def delete(self, ids: SequenceNotStr[str]) -> int: successes += 1 return successes + def iterate(self) -> Iterable[Workflow]: + return self.client.workflows.list(limit=-1) + @final class WorkflowVersionLoader( @@ -3341,6 +3414,9 @@ def delete(self, ids: SequenceNotStr[WorkflowVersionId]) -> int: successes += 1 return successes + def iterate(self) -> Iterable[WorkflowVersion]: + return self.client.workflows.versions.list(limit=-1) + @classmethod @lru_cache(maxsize=1) def get_write_cls_parameter_spec(cls) -> ParameterSpecSet: diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/__init__.py b/cognite_toolkit/_cdf_tk/prototypes/_packages/__init__.py new file mode 100644 index 000000000..5fc071237 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/__init__.py @@ -0,0 +1 @@ +__all__ = ["quickstart", "empty", "examples", "reference"] diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/empty_module/default.config.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/empty_module/default.config.yaml new file mode 100644 index 000000000..02e61d5d0 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/empty_module/default.config.yaml @@ -0,0 +1 @@ +my_variable: "Hello, World!" \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/manifest.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/manifest.yaml new file mode 100644 index 000000000..472aa02c5 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/empty/manifest.yaml @@ -0,0 +1,4 @@ +title: "Empty: I want to create my own modules" +modules: { + "empty_module": {"items": {}}, +} \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/examples/manifest.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/examples/manifest.yaml new file mode 100644 index 000000000..ce6ed0d7d --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/examples/manifest.yaml @@ -0,0 +1,5 @@ +"title": "Examples: a set of example modules for inspiration" +"modules": { + "cdf_data_pipeline_asset_valhall": {"items": {}}, + "cdf_data_pipeline_files_valhall": {"items": {}} +} \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/manifest.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/manifest.yaml new file mode 100644 index 000000000..4b8da7d1f --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/manifest.yaml @@ -0,0 +1,21 @@ +"title": "Quick Start: A set of modules for a CDF quick start project." +"modules": { + "sap_data_ingestion": { + "title": "SAP Data Pipeline", + }, + "pi_data_pipeline": { + "title": "PI Data Pipeline", + }, + "mqtt_data_pipeline": { + "title": "MQTT Data Pipeline", + }, + "files_contextualization": { + "title": "Files Contextualization", + }, + "asset_data_transformation": { + "title": "Asset Data Transformation", + }, + "infield": { + "title": "Infield", + }, +} \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/asset_to_parent_pipeline/transformations/asset_to_parent.Transformation.sql b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/asset_to_parent_pipeline/transformations/asset_to_parent.Transformation.sql new file mode 100644 index 000000000..14eccbedc --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/asset_to_parent_pipeline/transformations/asset_to_parent.Transformation.sql @@ -0,0 +1,5 @@ + +select + CAST(`Functional Loc.` AS STRING) AS externalId, + node_reference(''{{ data_space }}'', CAST(`SupFunctLoc.` AS STRING)) AS parent +from `data-dumps`.`data-dumps_dump FLOC RZ14` diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/asset_to_parent_pipeline/transformations/asset_to_parent.Transformation.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/asset_to_parent_pipeline/transformations/asset_to_parent.Transformation.yaml new file mode 100644 index 000000000..b83df9ec2 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/asset_to_parent_pipeline/transformations/asset_to_parent.Transformation.yaml @@ -0,0 +1,14 @@ +externalId: tr_idm_Asset_to_parent_upsert +name: idm_Asset_to_parent_upsert +destination: + type: instances + dataModel: + space: '{{ model_space }}' + externalId: IDM + version: v1 + destinationType: Asset + instanceSpace: '{{ data_space }}' +conflictMode: upsert +isPublic: true +ignoreNullFields: true +dataSetExternalId: '{{ dataset }}' diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/maintenanceorder_to_asset_pipeline/transformations/maintenanceorder_to_asset.Transformation.sql b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/maintenanceorder_to_asset_pipeline/transformations/maintenanceorder_to_asset.Transformation.sql new file mode 100644 index 000000000..aee4f3da2 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/maintenanceorder_to_asset_pipeline/transformations/maintenanceorder_to_asset.Transformation.sql @@ -0,0 +1,11 @@ +select CAST(`Order` AS STRING) AS externalId, + node_reference(''{{ data_space }}'', CAST(`Equipment` AS STRING)) AS asset +from ( + (select + * + from `workorder_mdi2_sap`.`workorder_mdi2_sap_Work orders areas 100-200-600-700_Sheet1`) + inner join (select + `externalId` as target_external_id + from cdf_nodes("'{{ model_space }}'", "MaintenanceOrder", "v1") + ) as target_table on `Equipment` = target_table.target_external_id + ) diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/maintenanceorder_to_asset_pipeline/transformations/maintenanceorder_to_asset.Transformation.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/maintenanceorder_to_asset_pipeline/transformations/maintenanceorder_to_asset.Transformation.yaml new file mode 100644 index 000000000..90988e10f --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/maintenanceorder_to_asset_pipeline/transformations/maintenanceorder_to_asset.Transformation.yaml @@ -0,0 +1,14 @@ +externalId: tr_idm_MaintenanceOrder_to_asset_upsert +name: idm_MaintenanceOrder_to_asset_upsert +destination: + type: instances + dataModel: + space: '{{ model_space }}' + externalId: IDM + version: v1 + destinationType: MaintenanceOrder + instanceSpace: '{{ data_space }}' +conflictMode: upsert +isPublic: true +ignoreNullFields: true +dataSetExternalId: '{{ dataset }}' diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_asset_pipeline/transformations/notification_to_asset.Transformation.sql b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_asset_pipeline/transformations/notification_to_asset.Transformation.sql new file mode 100644 index 000000000..34bee5aca --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_asset_pipeline/transformations/notification_to_asset.Transformation.sql @@ -0,0 +1,11 @@ +select CAST(`Notification` AS STRING) AS externalId, + node_reference(''{{ data_space }}'', CAST(`Notification` AS STRING)) AS asset +from ( + (select + * + from `workorder_mdi2_sap`.`20231211 Project Cognite Orders en Nots_Notifications MDI-2 2023`) + inner join (select + `externalId` as target_external_id + from cdf_nodes("'{{ model_space }}'", "Notification", "v1") + ) as target_table on `Notification` = target_table.target_external_id + ) diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_asset_pipeline/transformations/notification_to_asset.Transformation.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_asset_pipeline/transformations/notification_to_asset.Transformation.yaml new file mode 100644 index 000000000..614e9bb6c --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_asset_pipeline/transformations/notification_to_asset.Transformation.yaml @@ -0,0 +1,14 @@ +externalId: tr_idm_Notification_to_asset_upsert +name: idm_Notification_to_asset_upsert +destination: + type: instances + dataModel: + space: '{{ model_space }}' + externalId: IDM + version: v1 + destinationType: Notification + instanceSpace: '{{ data_space }}' +conflictMode: upsert +isPublic: true +ignoreNullFields: true +dataSetExternalId: '{{ dataset }}' diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_maintenanceorder_pipeline/transformations/notification_to_maintenanceorder.Transformation.sql b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_maintenanceorder_pipeline/transformations/notification_to_maintenanceorder.Transformation.sql new file mode 100644 index 000000000..9c565cc9a --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_maintenanceorder_pipeline/transformations/notification_to_maintenanceorder.Transformation.sql @@ -0,0 +1,11 @@ +select CAST(`Notification` AS STRING) AS externalId, + node_reference(''{{ data_space }}'', CAST(`Notification` AS STRING)) AS maintenanceOrder +from ( + (select + * + from `workorder_mdi2_sap`.`20231211 Project Cognite Orders en Nots_Notifications MDI-2 2023`) + inner join (select + `externalId` as target_external_id + from cdf_nodes("'{{ model_space }}'", "Notification", "v1") + ) as target_table on `Notification` = target_table.target_external_id + ) diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_maintenanceorder_pipeline/transformations/notification_to_maintenanceorder.Transformation.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_maintenanceorder_pipeline/transformations/notification_to_maintenanceorder.Transformation.yaml new file mode 100644 index 000000000..4caf34531 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_contextualization/notification_to_maintenanceorder_pipeline/transformations/notification_to_maintenanceorder.Transformation.yaml @@ -0,0 +1,14 @@ +externalId: tr_idm_Notification_to_maintenanceOrder_upsert +name: idm_Notification_to_maintenanceOrder_upsert +destination: + type: instances + dataModel: + space: '{{ model_space }}' + externalId: IDM + version: v1 + destinationType: Notification + instanceSpace: '{{ data_space }}' +conflictMode: upsert +isPublic: true +ignoreNullFields: true +dataSetExternalId: '{{ dataset }}' diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/asset_pipeline/raw/data-dumps_dump FLOC RZ14.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/asset_pipeline/raw/data-dumps_dump FLOC RZ14.yaml new file mode 100644 index 000000000..eff618ced --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/asset_pipeline/raw/data-dumps_dump FLOC RZ14.yaml @@ -0,0 +1,2 @@ +dbName: data-dumps +tableName: data-dumps_dump FLOC RZ14 diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/asset_pipeline/transformations/asset.Transformation.sql b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/asset_pipeline/transformations/asset.Transformation.sql new file mode 100644 index 000000000..4bfeb28f9 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/asset_pipeline/transformations/asset.Transformation.sql @@ -0,0 +1,13 @@ +SELECT + CAST('data-dumps' AS STRING) AS source, + CAST(`Created On` AS TIMESTAMP) AS sourceCreatedTime, + CAST(`Created By` AS STRING) AS sourceCreatedUser, + CAST(`Functional Loc.` AS STRING) AS sourceId, + CAST(`Changed On` AS TIMESTAMP) AS sourceUpdatedTime, + CAST(`Changed by` AS STRING) AS sourceUpdatedUser, + CAST(`Description` AS STRING) AS description, + CAST(`Functional Loc.` AS STRING) AS name, + CAST(NULL AS TIMESTAMP) AS lastPathMaterializationTime, + CAST(`Functional Loc.` AS STRING) AS externalId +FROM + `data-dumps`.`data-dumps_dump FLOC RZ14`; \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/asset_pipeline/transformations/asset.Transformation.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/asset_pipeline/transformations/asset.Transformation.yaml new file mode 100644 index 000000000..713769910 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/asset_pipeline/transformations/asset.Transformation.yaml @@ -0,0 +1,14 @@ +externalId: tr_idm_Asset_upsert +name: idm_Asset_upsert +destination: + type: instances + dataModel: + space: '{{ model_space }}' + externalId: IDM + version: v1 + destinationType: Asset + instanceSpace: '{{ data_space }}' +conflictMode: upsert +isPublic: true +ignoreNullFields: true +dataSetExternalId: '{{ dataset }}' diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/equipment_pipeline/raw/data-dumps_Equipment list MDI2.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/equipment_pipeline/raw/data-dumps_Equipment list MDI2.yaml new file mode 100644 index 000000000..02ef2cc7b --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/equipment_pipeline/raw/data-dumps_Equipment list MDI2.yaml @@ -0,0 +1,2 @@ +dbName: data-dumps +tableName: data-dumps_Equipment list MDI2 diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/maintenanceorder_pipeline/raw/workorder_mdi2_sap_Work orders areas 100-200-600-700_Sheet1.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/maintenanceorder_pipeline/raw/workorder_mdi2_sap_Work orders areas 100-200-600-700_Sheet1.yaml new file mode 100644 index 000000000..7eac8c24c --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/maintenanceorder_pipeline/raw/workorder_mdi2_sap_Work orders areas 100-200-600-700_Sheet1.yaml @@ -0,0 +1,2 @@ +dbName: workorder_mdi2_sap +tableName: workorder_mdi2_sap_Work orders areas 100-200-600-700_Sheet1 diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/maintenanceorder_pipeline/transformations/maintenanceorder.Transformation.sql b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/maintenanceorder_pipeline/transformations/maintenanceorder.Transformation.sql new file mode 100644 index 000000000..bedcd1d13 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/maintenanceorder_pipeline/transformations/maintenanceorder.Transformation.sql @@ -0,0 +1,20 @@ +SELECT + CAST(`Description` AS STRING) AS description, + CAST(`Description.1` AS STRING) AS name, + 'workorder_mdi2_sap' AS source, + CAST(`Created on` AS TIMESTAMP) AS sourceCreatedTime, + CAST(`User status` AS STRING) AS sourceCreatedUser, + CAST(`Functional Loc.` AS STRING) AS sourceId, + CAST(`Basic fin. date` AS TIMESTAMP) AS sourceUpdatedTime, + CAST(`User status` AS STRING) AS sourceUpdatedUser, + CAST(`Basic fin. date` AS TIMESTAMP) AS endTime, + CAST(`Basic fin. date` AS TIMESTAMP) AS scheduledEndTime, + CAST(`Created on` AS TIMESTAMP) AS scheduledStartTime, + CAST(`Created on` AS TIMESTAMP) AS startTime, + CAST(`Priority` AS STRING) AS priority, + CAST(`Priority` AS STRING) AS priorityDescription, + CAST(`User status` AS STRING) AS status, + CAST(`Plant section` AS STRING) AS type, + CAST(`Order` AS STRING) AS externalId +FROM + `workorder_mdi2_sap`.`workorder_mdi2_sap_Work orders areas 100-200-600-700_Sheet1`; \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/maintenanceorder_pipeline/transformations/maintenanceorder.Transformation.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/maintenanceorder_pipeline/transformations/maintenanceorder.Transformation.yaml new file mode 100644 index 000000000..7eefc47e7 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/maintenanceorder_pipeline/transformations/maintenanceorder.Transformation.yaml @@ -0,0 +1,14 @@ +externalId: tr_idm_MaintenanceOrder_upsert +name: idm_MaintenanceOrder_upsert +destination: + type: instances + dataModel: + space: '{{ model_space }}' + externalId: IDM + version: v1 + destinationType: MaintenanceOrder + instanceSpace: '{{ data_space }}' +conflictMode: upsert +isPublic: true +ignoreNullFields: true +dataSetExternalId: '{{ dataset }}' diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/notification_pipeline/raw/20231211 Project Cognite Orders en Nots_Notifications MDI-2 2023.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/notification_pipeline/raw/20231211 Project Cognite Orders en Nots_Notifications MDI-2 2023.yaml new file mode 100644 index 000000000..57e3a64f3 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/notification_pipeline/raw/20231211 Project Cognite Orders en Nots_Notifications MDI-2 2023.yaml @@ -0,0 +1,2 @@ +dbName: workorder_mdi2_sap +tableName: 20231211 Project Cognite Orders en Nots_Notifications MDI-2 2023 diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/notification_pipeline/transformations/notification.Transformation.sql b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/notification_pipeline/transformations/notification.Transformation.sql new file mode 100644 index 000000000..3542396a0 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/notification_pipeline/transformations/notification.Transformation.sql @@ -0,0 +1,20 @@ +SELECT + CAST(`Notification` AS STRING) AS name, + CAST(`Description` AS STRING) AS description, + 'workorder_mdi2_sap' AS source, + CAST(`Notif.date` AS TIMESTAMP) AS sourceCreatedTime, + CAST(`Planner group` AS STRING) AS sourceCreatedUser, + CAST(`Notification` AS STRING) AS sourceId, + CAST(`Notif.date` AS TIMESTAMP) AS sourceUpdatedTime, + CAST(`User status` AS STRING) AS sourceUpdatedUser, + CAST(`Required End` AS TIMESTAMP) AS endTime, + CAST(`Required End` AS TIMESTAMP) AS scheduledEndTime, + CAST(`Notif.date` AS TIMESTAMP) AS scheduledStartTime, + CAST(`Notif.date` AS TIMESTAMP) AS startTime, + CAST(`Notifictn type` AS STRING) AS priority, + CAST(`Description.1` AS STRING) AS priorityDescription, + CAST(`User status` AS STRING) AS status, + CAST(`Notifictn type` AS STRING) AS type, + CAST(`Notification` AS STRING) AS externalId +FROM + `workorder_mdi2_sap`.`20231211 Project Cognite Orders en Nots_Notifications MDI-2 2023`; \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/notification_pipeline/transformations/notification.Transformation.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/notification_pipeline/transformations/notification.Transformation.yaml new file mode 100644 index 000000000..b1f29244c --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/notification_pipeline/transformations/notification.Transformation.yaml @@ -0,0 +1,14 @@ +externalId: tr_idm_Notification_upsert +name: idm_Notification_upsert +destination: + type: instances + dataModel: + space: '{{ model_space }}' + externalId: IDM + version: v1 + destinationType: Notification + instanceSpace: '{{ data_space }}' +conflictMode: upsert +isPublic: true +ignoreNullFields: true +dataSetExternalId: '{{ dataset }}' diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/operation_pipeline/raw/workorder_mdi2_sap_Work_orders_areas_operations_100-200-600-700.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/operation_pipeline/raw/workorder_mdi2_sap_Work_orders_areas_operations_100-200-600-700.yaml new file mode 100644 index 000000000..55f3cb911 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/data_population/operation_pipeline/raw/workorder_mdi2_sap_Work_orders_areas_operations_100-200-600-700.yaml @@ -0,0 +1,2 @@ +dbName: workorder_mdi2_sap +tableName: workorder_mdi2_sap_Work_orders_areas_operations_100-200-600-700 diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/default.config.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/default.config.yaml new file mode 100644 index 000000000..f73e2761d --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/default.config.yaml @@ -0,0 +1,3 @@ +# Values here are only valid for this module. +# Space for all nodes and edges ingested by sap_data_ingestion. +data_space: \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/data_models/sp_sap_maia.Space.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/data_models/sp_sap_maia.Space.yaml new file mode 100644 index 000000000..d7eada5d1 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/data_models/sp_sap_maia.Space.yaml @@ -0,0 +1 @@ +space: '{{ data_space }}' diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/data_sets/ds_sap_population.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/data_sets/ds_sap_population.yaml new file mode 100644 index 000000000..d7077f5e9 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/data_sets/ds_sap_population.yaml @@ -0,0 +1,3 @@ +externalId: '{{ dataset }}' +name: The SAP Transformation and Orchestration pipeline +description: This dataset used for the SAP -> IDM transformation/orchestration pipeline diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/workflows/wf_sap_population.Workflow.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/workflows/wf_sap_population.Workflow.yaml new file mode 100644 index 000000000..dc9aa4873 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/workflows/wf_sap_population.Workflow.yaml @@ -0,0 +1,2 @@ +externalId: wf_sap_population +description: A workflow for Populating and Contextualizing SAP data diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/workflows/wf_sap_population.WorkflowVersion.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/workflows/wf_sap_population.WorkflowVersion.yaml new file mode 100644 index 000000000..1093b5017 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/quickstart/sap_data_ingestion/orchestration/workflows/wf_sap_population.WorkflowVersion.yaml @@ -0,0 +1,79 @@ +workflowExternalId: wf_sap_population +version: '1' +workflowDefinition: + tasks: + - externalId: idm_Asset_upsert + type: transformation + parameters: + transformation: + externalId: tr_idm_Asset_upsert + concurrencyPolicy: fail + retries: 3 + timeout: 3600 + onFailure: abortWorkflow + - externalId: idm_Notification_upsert + type: transformation + parameters: + transformation: + externalId: tr_idm_Notification_upsert + concurrencyPolicy: fail + retries: 3 + timeout: 3600 + onFailure: abortWorkflow + - externalId: idm_MaintenanceOrder_upsert + type: transformation + parameters: + transformation: + externalId: tr_idm_MaintenanceOrder_upsert + concurrencyPolicy: fail + retries: 3 + timeout: 3600 + onFailure: abortWorkflow + - externalId: idm_Asset_to_parent_upsert + type: transformation + parameters: + transformation: + externalId: tr_idm_Asset_to_parent_upsert + concurrencyPolicy: fail + retries: 3 + timeout: 3600 + onFailure: abortWorkflow + dependsOn: + - externalId: idm_Asset_upsert + - externalId: idm_Notification_to_asset_upsert + type: transformation + parameters: + transformation: + externalId: tr_idm_Notification_to_asset_upsert + concurrencyPolicy: fail + retries: 3 + timeout: 3600 + onFailure: abortWorkflow + dependsOn: + - externalId: idm_Notification_upsert + - externalId: idm_Asset_upsert + - externalId: idm_MaintenanceOrder_to_asset_upsert + type: transformation + parameters: + transformation: + externalId: tr_idm_MaintenanceOrder_to_asset_upsert + concurrencyPolicy: fail + retries: 3 + timeout: 3600 + onFailure: abortWorkflow + dependsOn: + - externalId: idm_MaintenanceOrder_upsert + - externalId: idm_Asset_upsert + - externalId: idm_Notification_to_maintenanceOrder_upsert + type: transformation + parameters: + transformation: + externalId: tr_idm_Notification_to_maintenanceOrder_upsert + concurrencyPolicy: fail + retries: 3 + timeout: 3600 + onFailure: abortWorkflow + dependsOn: + - externalId: idm_Notification_upsert + - externalId: idm_MaintenanceOrder_upsert + description: Sap Ingestion diff --git a/cognite_toolkit/_cdf_tk/prototypes/_packages/reference/manifest.yaml b/cognite_toolkit/_cdf_tk/prototypes/_packages/reference/manifest.yaml new file mode 100644 index 000000000..8e4acf7ed --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/_packages/reference/manifest.yaml @@ -0,0 +1,2 @@ +"title": "All supported resources as reference" +"modules": {"workflow": {}, "transformations": {}, "functions": {}, "groups": {}} \ No newline at end of file diff --git a/cognite_toolkit/_cdf_tk/prototypes/commands/__init__.py b/cognite_toolkit/_cdf_tk/prototypes/commands/__init__.py new file mode 100644 index 000000000..08473f140 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/commands/__init__.py @@ -0,0 +1,3 @@ +from .modules import ModulesCommand + +__all__ = ["ModulesCommand"] diff --git a/cognite_toolkit/_cdf_tk/prototypes/commands/_changes.py b/cognite_toolkit/_cdf_tk/prototypes/commands/_changes.py new file mode 100644 index 000000000..c4b8aa10d --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/commands/_changes.py @@ -0,0 +1,359 @@ +from __future__ import annotations + +import itertools +from collections.abc import Iterator, MutableSequence +from functools import lru_cache +from pathlib import Path + +from packaging.version import Version +from packaging.version import parse as parse_version + +from cognite_toolkit._cdf_tk.constants import COGNITE_MODULES +from cognite_toolkit._cdf_tk.data_classes import SystemYAML +from cognite_toolkit._cdf_tk.utils import read_yaml_file +from cognite_toolkit._version import __version__ + + +class Change: + """A change is a single migration step that can be applied to a project.""" + + deprecated_from: Version + required_from: Version | None = None + has_file_changes: bool = False + + def __init__(self, project_dir: Path) -> None: + self._project_path = project_dir + + +class AutomaticChange(Change): + """An automatic change is a change that can be applied automatically to a project.""" + + def do(self) -> set[Path]: + return set() + + +class ManualChange(Change): + """A manual change is a change that requires manual intervention to be applied to a project.""" + + def needs_to_change(self) -> set[Path]: + return set() + + def instructions(self, files: set[Path]) -> str: + return "" + + +class SystemYAMLMoved(AutomaticChange): + """The _system.yaml file is now expected to in the root of the project. +Before it was expected to be in the cognite_modules folder. +This change moves the file to the root of the project. + +Before: +```bash + my_project/ + cognite_modules/ + _system.yaml +``` +After: +```bash + my_project/ + _system.yaml +``` + """ + + deprecated_from = Version("0.2.0a3") + required_from = Version("0.2.0a3") + has_file_changes = True + + def do(self) -> set[Path]: + system_yaml = self._project_path / COGNITE_MODULES / SystemYAML.file_name + if not system_yaml.exists(): + return set() + new_system_yaml = self._project_path / SystemYAML.file_name + system_yaml.rename(new_system_yaml) + return {system_yaml} + + +class RenamedModulesSection(AutomaticChange): + """The 'modules' section in the config files has been renamed to 'variables'. +This change updates the config files to use the new name. + +For example in config.dev.yaml, before: +```yaml + modules: + cognite_modules: + cdf_cluster: ${CDF_CLUSTER} + cicd_clientId: ${IDP_CLIENT_ID} + cicd_clientSecret: ${IDP_CLIENT_SECRET} +``` +After: +```yaml + variables: + cognite_modules: + cdf_cluster: ${CDF_CLUSTER} + cicd_clientId: ${IDP_CLIENT_ID} + cicd_clientSecret: ${IDP_CLIENT_SECRET} +``` + """ + + deprecated_from = Version("0.2.0a3") + required_from = Version("0.2.0a3") + has_file_changes = True + + def do(self) -> set[Path]: + changed: set[Path] = set() + for config_yaml in self._project_path.glob("config.*.yaml"): + data_raw = config_yaml.read_text() + # We do not parse the YAML file to avoid removing comments + updated_file: list[str] = [] + for line in data_raw.splitlines(): + if line.startswith("modules:"): + changed.add(config_yaml) + updated_file.append(line.replace("modules:", "variables:")) + else: + updated_file.append(line) + config_yaml.write_text("\n".join(updated_file)) + return changed + + +class BuildCleanFlag(AutomaticChange): + """The `cdf-tk build` command no longer accepts the `--clean` flag. + +The build command now always cleans the build directory before building. +To avoid cleaning the build directory, you can use the `--no-clean` flag. + """ + + deprecated_from = Version("0.2.0a3") + required_from = Version("0.2.0a3") + has_file_changes = False + + +class CommonFunctionCodeNotSupported(ManualChange): + """.""" + + deprecated_from = Version("0.2.0a4") + required_from = Version("0.2.0a4") + has_file_changes = True + + def needs_to_change(self) -> set[Path]: + common_function_code = self._project_path / "common_function_code" + if not common_function_code.exists(): + return set() + needs_change = {common_function_code} + for py_file in self._project_path.rglob("*.py"): + content = py_file.read_text().splitlines() + use_common_function_code = any( + (line.startswith("from common") or line.startswith("import common")) for line in content + ) + if use_common_function_code: + needs_change.add(py_file) + return needs_change + + def instructions(self, files: set[Path]) -> str: + to_update = [] + for module, py_files in itertools.groupby(sorted(files, key=self.get_module_name), key=self.get_module_name): + if module == Path("."): + # This is the common_function_code folder + continue + to_update.append(f" - In module {module.relative_to(self._project_path).as_posix()!r}:") + for py_file in py_files: + to_update.append(f" - In file {py_file.relative_to(module).as_posix()!r}") + to_update_str = "\n".join(to_update) + return ( + "Cognite-Toolkit no longer supports the common functions code.\n" + f"Please update the following files to not use 'common' module:\n{to_update_str}" + f"\n\nThen remove the '{self._project_path.name}/common_function_code' folder." + ) + + @staticmethod + @lru_cache(maxsize=128) + def get_module_name(file_path: Path) -> Path: + while file_path.parent != file_path: + if file_path.name == "functions": + return file_path.parent + file_path = file_path.parent + return Path(".") + + +class FunctionExternalDataSetIdRenamed(AutomaticChange): + """The 'externalDataSetId' field in function YAML files has been renamed to 'dataSetExternalId'. +This change updates the function YAML files to use the new name. + +The motivation for this change is to make the naming consistent with the rest of the Toolkit. + +For example, in functions/my_function.yaml, before: +```yaml +externalDataSetId: my_external_id +``` +After: +```yaml +dataSetExternalId: my_external_id +``` + """ + + deprecated_from = Version("0.2.0a5") + required_from = Version("0.2.0a5") + has_file_changes = True + + def do(self) -> set[Path]: + changed: set[Path] = set() + for resource_yaml in self._project_path.glob("*.yaml"): + if resource_yaml.parent == "functions": + content = resource_yaml.read_text() + if "externalDataSetId" in content: + changed.add(resource_yaml) + content = content.replace("externalDataSetId", "dataSetExternalId") + resource_yaml.write_text(content) + return changed + + +class ConfigYAMLSelectedRenaming(AutomaticChange): + """The 'environment.selected_modules_and_packages' field in the config.yaml files has been +renamed to 'selected'. +This change updates the config files to use the new name. + +For example, in config.dev.yaml, before: +```yaml +environment: + selected_modules_and_packages: + - my_module +``` +After: +```yaml +environment: + selected: + - my_module +``` + """ + + deprecated_from = Version("0.2.0b1") + has_file_changes = True + + def do(self) -> set[Path]: + changed = set() + for config_yaml in self._project_path.glob("config.*.yaml"): + data = config_yaml.read_text() + if "selected_modules_and_packages" in data: + changed.add(config_yaml) + data = data.replace("selected_modules_and_packages", "selected") + config_yaml.write_text(data) + return changed + + +class RequiredFunctionLocation(AutomaticChange): + """Function Resource YAML files are now expected to be in the 'functions' folder. +Before they could be in subfolders inside the 'functions' folder. + +This change moves the function YAML files to the 'functions' folder. + +For example, before: +```bash + modules/ + my_module/ + functions/ + some_subdirectory/ + my_function.yaml +``` +After: +```bash + modules/ + my_module/ + functions/ + my_function.yaml +``` + """ + + deprecated_from = Version("0.2.0b3") + required_from = Version("0.2.0b3") + has_file_changes = True + + def do(self) -> set[Path]: + changed = set() + for resource_yaml in self._project_path.glob("functions/**/*.yaml"): + if self._is_function(resource_yaml): + new_path = self._new_path(resource_yaml) + if new_path != resource_yaml: + resource_yaml.rename(new_path) + changed.add(new_path) + return changed + + @staticmethod + def _is_function(resource_yaml: Path) -> bool: + # Functions require a 'name' field and to distinguish from a FunctionSchedule + # we check that the 'cronExpression' field is not present + parsed = read_yaml_file(resource_yaml) + if isinstance(parsed, dict): + return "name" in parsed and "cronExpression" not in parsed + elif isinstance(parsed, list): + return all("name" in item and "cronExpression" not in item for item in parsed) + return False + + @staticmethod + def _new_path(resource_yaml: Path) -> Path: + # Search the path for the 'functions' folder and move the file there + for parent in resource_yaml.parents: + if parent.name == "functions": + return parent / resource_yaml.name + return resource_yaml + + +class UpdateModuleVersion(AutomaticChange): + deprecated_from = parse_version(__version__) + required_from = parse_version(__version__) + has_file_changes = True + + def do(self) -> set[Path]: + system_yaml = self._project_path / SystemYAML.file_name + if not system_yaml.exists(): + return set() + raw = system_yaml.read_text() + new_system_yaml = [] + changes: set[Path] = set() + # We do not parse the YAML file to avoid removing comments + for line in raw.splitlines(): + if line.startswith("cdf_toolkit_version:"): + new_line = f"cdf_toolkit_version: {__version__}" + new_system_yaml.append(new_line) + if new_line != line: + changes.add(system_yaml) + else: + new_system_yaml.append(line) + system_yaml.write_text("\n".join(new_system_yaml)) + return changes + + +UPDATE_MODULE_VERSION_DOCSTRING = """In the _system.yaml file, the 'cdf_toolkit_version' field has been updated to the same version as the CLI. + +This change updated the 'cdf_toolkit_version' field in the _system.yaml file to the same version as the CLI. + +In _system.yaml, before: +```yaml +cdf_toolkit_version: {module_version} +``` +After: +```yaml +cdf_toolkit_version: {cli_version} +``` + """ +UpdateModuleVersion.__doc__ = UPDATE_MODULE_VERSION_DOCSTRING + + +_CHANGES: list[type[Change]] = [ + change for change in itertools.chain(AutomaticChange.__subclasses__(), ManualChange.__subclasses__()) +] + + +class Changes(list, MutableSequence[Change]): + @classmethod + def load(cls, module_version: Version, project_path: Path) -> Changes: + return cls([change(project_path) for change in _CHANGES if change.deprecated_from >= module_version]) + + @property + def required_changes(self) -> Changes: + return Changes([change for change in self if change.required_from is not None]) + + @property + def optional_changes(self) -> Changes: + return Changes([change for change in self if change.required_from is None]) + + def __iter__(self) -> Iterator[Change]: + return super().__iter__() diff --git a/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py b/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py new file mode 100644 index 000000000..c11c9f296 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/commands/modules.py @@ -0,0 +1,412 @@ +from __future__ import annotations + +import shutil +import subprocess +from collections.abc import MutableMapping +from contextlib import suppress +from importlib import resources +from pathlib import Path +from typing import Any, Optional + +import questionary +import typer +import yaml +from packaging.version import Version +from packaging.version import parse as parse_version +from rich import print +from rich.markdown import Markdown +from rich.padding import Padding +from rich.panel import Panel +from rich.rule import Rule +from rich.tree import Tree + +import cognite_toolkit +from cognite_toolkit._cdf_tk.commands._base import ToolkitCommand +from cognite_toolkit._cdf_tk.constants import ALT_CUSTOM_MODULES, COGNITE_MODULES, SUPPORT_MODULE_UPGRADE_FROM_VERSION +from cognite_toolkit._cdf_tk.data_classes import Environment, InitConfigYAML, SystemYAML +from cognite_toolkit._cdf_tk.exceptions import ToolkitRequiredValueError +from cognite_toolkit._cdf_tk.prototypes import _packages +from cognite_toolkit._cdf_tk.prototypes.commands._changes import ( + UPDATE_MODULE_VERSION_DOCSTRING, + AutomaticChange, + Changes, + ManualChange, + UpdateModuleVersion, +) +from cognite_toolkit._cdf_tk.tk_warnings import MediumSeverityWarning +from cognite_toolkit._cdf_tk.utils import read_yaml_file +from cognite_toolkit._version import __version__ + +custom_style_fancy = questionary.Style( + [ + ("qmark", "fg:#673ab7 bold"), # token in front of the question + ("question", "bold"), # question text + ("answer", "fg:#f44336 bold"), # submitted answer text behind the question + ("pointer", "fg:#673ab7 bold"), # pointer used in select and checkbox prompts + ("highlighted", "fg:#673ab7 bold"), # pointed-at choice in select and checkbox prompts + ("selected", "fg:#673ab7"), # style for a selected item of a checkbox + ("separator", "fg:#cc5454"), # separator in lists + ("instruction", ""), # user instructions for select, rawselect, checkbox + ("text", ""), # plain text + ("disabled", "fg:#858585 italic"), # disabled choices for select and checkbox prompts + ] +) + +INDENT = " " +POINTER = INDENT + "▶" + + +class Packages(dict, MutableMapping[str, dict[str, Any]]): + @classmethod + def load(cls) -> Packages: + packages = {} + for module in _packages.__all__: + manifest = Path(_packages.__file__).parent / module / "manifest.yaml" + if not manifest.exists(): + continue + content = manifest.read_text() + if yaml.__with_libyaml__: + packages[manifest.parent.name] = yaml.CSafeLoader(content).get_data() + else: + packages[manifest.parent.name] = yaml.SafeLoader(content).get_data() + return cls(packages) + + +class ModulesCommand(ToolkitCommand): + def _build_tree(self, item: dict | list, tree: Tree) -> None: + if not isinstance(item, dict): + return + for key, value in item.items(): + subtree = tree.add(key) + for subvalue in value: + if isinstance(subvalue, dict): + self._build_tree(subvalue, subtree) + else: + subtree.add(subvalue) + + def _create( + self, init_dir: str, selected: dict[str, dict[str, Any]], environments: list[str], mode: str | None + ) -> None: + if mode == "overwrite": + print(f"{INDENT}[yellow]Clearing directory[/]") + if Path.is_dir(Path(init_dir)): + shutil.rmtree(init_dir) + + modules_root_dir = Path(init_dir) / ALT_CUSTOM_MODULES + modules_root_dir.mkdir(parents=True, exist_ok=True) + + for package, modules in selected.items(): + print(f"{INDENT}[{'yellow' if mode == 'overwrite' else 'green'}]Creating {package}[/]") + + for module in modules: + print(f"{INDENT*2}[{'yellow' if mode == 'overwrite' else 'green'}]Creating module {module}[/]") + source_dir = Path(_packages.__file__).parent / package / module + if not Path(source_dir).exists(): + print(f"{INDENT*3}[red]Module {module} not found in package {package}. Skipping...[/]") + continue + module_dir = modules_root_dir / package / module + if Path(module_dir).exists() and mode == "update": + if questionary.confirm( + f"{INDENT}Module {module} already exists in folder {module_dir}. Would you like to overwrite?", + default=False, + ).ask(): + shutil.rmtree(module_dir) + else: + continue + + shutil.copytree(source_dir, module_dir, ignore=shutil.ignore_patterns("default.*")) + + for environment in environments: + # if mode == "update": + config_init = InitConfigYAML( + Environment( + name=environment, + project=f"", + build_type="dev" if environment == "dev" else "prod", + selected=list(selected.keys()) if selected else ["empty"], + ) + ).load_selected_defaults(Path(_packages.__file__).parent) + print(f"{INDENT}[{'yellow' if mode == 'overwrite' else 'green'}]Creating config.{environment}.yaml[/]") + Path(init_dir + f"/config.{environment}.yaml").write_text(config_init.dump_yaml_with_comments()) + + _system_yaml_file = Path(resources.files(cognite_toolkit.__name__)) / SystemYAML.file_name # type: ignore[arg-type] + _system_yaml = read_yaml_file(_system_yaml_file) + _system_yaml.pop("packages", None) + + (Path(init_dir) / SystemYAML.file_name).write_text(f"#DO NOT EDIT THIS FILE!\n{yaml.safe_dump(_system_yaml)}") + + def init(self, init_dir: Optional[str] = None, arg_package: Optional[str] = None) -> None: + print("\n") + print( + Panel( + "\n".join( + [ + "Welcome to the CDF Toolkit!", + "This wizard will help you prepare modules in the folder you enter.", + "The modules are thematically bundled in packages you can choose between. You can add more by repeating the process.", + "You can use the arrow keys ⬆ ⬇ on your keyboard to select modules, and press enter ⮐ to continue with your selection.", + ] + ), + title="Interactive template wizard", + style="green", + padding=(1, 2), + ) + ) + + available = Packages().load() + if not available: + raise ToolkitRequiredValueError("No available packages found at location") + + mode = "new" + + if not init_dir: + init_dir = questionary.text( + "Which directory would you like to create templates in? (typically customer name)", + default="new_project", + ).ask() + if not init_dir or init_dir.strip() == "": + raise ToolkitRequiredValueError("You must provide a directory name.") + + if (Path(init_dir) / ALT_CUSTOM_MODULES).is_dir(): + mode = questionary.select( + f"Directory {init_dir}/modules already exists. What would you like to do?", + choices=[ + questionary.Choice("Abort", "abort"), + questionary.Choice("Overwrite (clean existing)", "overwrite"), + ], + pointer=POINTER, + style=custom_style_fancy, + instruction="use arrow up/down and " + "⮐ " + " to save", + ).ask() + if mode == "abort": + print("Aborting...") + raise typer.Exit() + + print(f" [{'yellow' if mode == 'overwrite' else 'green'}]Using directory [bold]{init_dir}[/]") + + selected: dict[str, dict[str, Any]] = {} + if arg_package: + if not available.get(arg_package): + raise ToolkitRequiredValueError( + f"Package {arg_package} is unknown. Available packages are {', '.join(available)}" + ) + else: + selected[arg_package] = available[arg_package].get("modules", {}).keys() + available.pop(arg_package) + + while True: + if len(selected) > 0: + print("\n[bold]You have selected the following modules:[/]\n") + + tree = Tree(ALT_CUSTOM_MODULES) + self._build_tree(selected, tree) + print(Padding.indent(tree, 5)) + print("\n") + + if len(available) > 0: + if not questionary.confirm("Would you like to add more?", default=False).ask(): + break + + package_id = questionary.select( + "Which package would you like to include?", + instruction="Use arrow up/down and ⮐ to save", + choices=[questionary.Choice(value.get("title", key), key) for key, value in available.items()], + pointer=POINTER, + style=custom_style_fancy, + ).ask() + + selection = questionary.checkbox( + f"Which modules in {package_id} would you like to include?", + instruction="Use arrow up/down, press space to select item(s) and enter to save", + choices=[ + questionary.Choice( + value.get("title", key), key, checked=True if key in selected.get(package_id, {}) else False + ) + for key, value in available[package_id].get("modules", {}).items() + ], + qmark=INDENT, + pointer=POINTER, + style=custom_style_fancy, + ).ask() + + if len(selection) > 0: + selected[package_id] = selection + else: + selected[package_id] = available[package_id].get("modules", {}).keys() + + if not questionary.confirm("Would you like to continue with creation?", default=True).ask(): + print("Exiting...") + raise typer.Exit() + else: + environments = questionary.checkbox( + "Which environments would you like to include?", + instruction="Use arrow up/down, press space to select item(s) and enter to save", + choices=[ + questionary.Choice(title="dev", checked=True), + questionary.Choice(title="prod", checked=True), + questionary.Choice(title="staging", checked=False), + ], + qmark=INDENT, + pointer=POINTER, + style=custom_style_fancy, + ).ask() + self._create(init_dir, selected, environments, mode) + print( + Panel( + f"""Modules have been prepared in [bold]{init_dir}[/]. \nNext steps: + 1. Run `cdf-tk auth verify --interactive to set up credentials. + 2. Configure your project in the config files. Use cdf-tk build for assistance. + 3. Run `cdf-tk deploy --dry-run` to verify the deployment.""", + style="green", + ) + ) + + if "empty" in selected: + print( + Panel( + "Please check out https://developer.cognite.com/sdks/toolkit/modules/ for guidance on writing custom modules", + ) + ) + + raise typer.Exit() + + def upgrade(self, project_dir: str | Path | None = None, verbose: bool = False) -> Changes: + project_path = Path(project_dir or ".") + module_version = self._get_module_version(project_path) + cli_version = parse_version(__version__) + + if cli_version < module_version: + upgrade = "poetry add cognite-toolkit@" if CLICommands.use_poetry() else "pip install cognite-toolkit==" + print( + f"Modules are at a higher version ({module_version}) than the installed CLI ({__version__})." + f"Please upgrade the CLI to match the modules: `{upgrade}{module_version}`." + ) + return Changes() + + if module_version < Version(SUPPORT_MODULE_UPGRADE_FROM_VERSION): + print( + f"The modules upgrade command is not supported for versions below {SUPPORT_MODULE_UPGRADE_FROM_VERSION}." + ) + return Changes() + + if not CLICommands.use_git(): + self.warn(MediumSeverityWarning("git is not installed. It is strongly recommended to use version control.")) + else: + if not CLICommands.has_initiated_repo(): + self.warn(MediumSeverityWarning("git repository not initiated. Did you forget to run `git init`?")) + else: + if CLICommands.has_uncommitted_changes(): + print("Uncommitted changes detected. Please commit your changes before upgrading the modules.") + return Changes() + # Update the docstring of the change 'UpdateModuleVersion' to be more informative + UpdateModuleVersion.__doc__ = UPDATE_MODULE_VERSION_DOCSTRING.format( + module_version=module_version, cli_version=cli_version + ) + + changes = Changes.load(module_version, project_path) + if not changes: + print("No changes required.") + return changes + + print( + Panel( + f"Found {len(changes)} changes from {module_version} to {cli_version}", + title="Upgrade Modules", + style="green", + ) + ) + + total_changed: set[Path] = set() + for change in changes: + color = "green" + changed_files: set[Path] = set() + if change.has_file_changes: + if isinstance(change, AutomaticChange): + changed_files = change.do() + color = "yellow" if changed_files else "green" + total_changed.update(changed_files) + elif isinstance(change, ManualChange): + changed_files = change.needs_to_change() + color = "red" if changed_files else "green" + print( + Panel( + f"Change: {type(change).__name__}", + style=color, + expand=False, + ) + ) + if not changed_files and change.has_file_changes: + suffix = "have been changed" if isinstance(change, AutomaticChange) else "need to be changed" + print(f"No files {suffix}.") + else: + if isinstance(change, ManualChange): + print(Markdown(change.instructions(changed_files))) + elif isinstance(change, AutomaticChange): + print("The following files have been changed:") + for file in changed_files: + print(Markdown(f" - {file.relative_to(project_path).as_posix()}")) + if changed_files or not change.has_file_changes or verbose: + print(Markdown(change.__doc__ or "Missing description.")) + print(Rule()) + + use_git = CLICommands.use_git() and CLICommands.has_initiated_repo() + summary = ["All automatic changes have been applied."] + color = "green" + if total_changed: + summary.append(f"A total of {len(total_changed)} files have been changed.") + else: + summary.append("No files have been changed.") + if manual_changes := [change for change in changes if isinstance(change, ManualChange)]: + summary.append( + f"A total of {len(manual_changes)} changes require manual intervention: {', '.join([type(change).__name__ for change in manual_changes])}." + ) + color = "yellow" + if use_git and total_changed: + summary.append("\nPlease review the changes and commit them if you are satisfied.") + summary.append("You can use `git diff` to see the changes or use your IDE to inspect the changes.") + summary.append( + "If you are not satisfied with the changes, you can use `git checkout -- ` to revert " + "a file or `git checkout .` to revert all changes." + ) + print(Panel("\n".join(summary), title="Upgrade Complete", style=color)) + return changes + + @staticmethod + def _get_module_version(project_path: Path) -> Version: + if (system_yaml := project_path / SystemYAML.file_name).exists(): + # From 0.2.0a3 we have the _system.yaml on the root of the project + content = read_yaml_file(system_yaml) + elif (system_yaml := project_path / COGNITE_MODULES / SystemYAML.file_name).exists(): + # Up to 0.2.0a2 we have the _system.yaml in the cognite_modules folder + content = read_yaml_file(system_yaml) + else: + raise ToolkitRequiredValueError("No system.yaml file found in project.") + return parse_version(content.get("cdf_toolkit_version", "0.0.0")) + + +class CLICommands: + @classmethod + def use_poetry(cls) -> bool: + with suppress(Exception): + return shutil.which("poetry") is not None + return False + + @classmethod + def use_git(cls) -> bool: + with suppress(Exception): + return shutil.which("git") is not None + return False + + @classmethod + def has_initiated_repo(cls) -> bool: + with suppress(Exception): + result = subprocess.run("git rev-parse --is-inside-work-tree".split(), stdout=subprocess.PIPE) + return result.returncode == 0 + return False + + @classmethod + def has_uncommitted_changes(cls) -> bool: + with suppress(Exception): + result = subprocess.run("git diff --quiet".split(), stdout=subprocess.PIPE) + return result.returncode != 0 + return False diff --git a/cognite_toolkit/_cdf_tk/prototypes/featureflag.py b/cognite_toolkit/_cdf_tk/prototypes/featureflag.py deleted file mode 100644 index 48aa38312..000000000 --- a/cognite_toolkit/_cdf_tk/prototypes/featureflag.py +++ /dev/null @@ -1,20 +0,0 @@ -import os - -from rich import print - - -def enabled(flag: str) -> bool: - """ - Check if a feature flag is enabled. - - Args: - flag (str): The feature flag to check. - - Returns: - bool: True if the feature flag is enabled, False otherwise. - """ - if os.environ.get(flag, "false").lower() == "true": - if enabled("FF_PRINT_FLAGS"): - print(f"[yellow]Feature flag {flag} is enabled.[/]") - return True - return False diff --git a/cognite_toolkit/_cdf_tk/prototypes/interactive_init.py b/cognite_toolkit/_cdf_tk/prototypes/interactive_init.py deleted file mode 100644 index 6ad94ccfc..000000000 --- a/cognite_toolkit/_cdf_tk/prototypes/interactive_init.py +++ /dev/null @@ -1,232 +0,0 @@ -from __future__ import annotations - -from pathlib import Path -from typing import Annotated, Any, Optional, Union - -import questionary -import typer -from rich import print -from rich.padding import Padding -from rich.panel import Panel -from rich.tree import Tree - -from cognite_toolkit._cdf_tk.exceptions import ToolkitRequiredValueError - -INDENT = " " -POINTER = INDENT + "▶" - -custom_style_fancy = questionary.Style( - [ - ("qmark", "fg:#673ab7 bold"), # token in front of the question - ("question", "bold"), # question text - ("answer", "fg:#f44336 bold"), # submitted answer text behind the question - ("pointer", "fg:#673ab7 bold"), # pointer used in select and checkbox prompts - ("highlighted", "fg:#673ab7 bold"), # pointed-at choice in select and checkbox prompts - ("selected", "fg:#673ab7"), # style for a selected item of a checkbox - ("separator", "fg:#cc5454"), # separator in lists - ("instruction", ""), # user instructions for select, rawselect, checkbox - ("text", ""), # plain text - ("disabled", "fg:#858585 italic"), # disabled choices for select and checkbox prompts - ] -) - - -def get_packages() -> dict[str, dict[str, Any]]: - return { - "quickstart": { - "title": "Quick Start: A set of modules for a CDF quick start project.", - "items": { - "sap_data_pipeline": { - "title": "SAP Data Pipeline", - }, - "pi_data_pipeline": { - "title": "PI Data Pipeline", - }, - "mqtt_data_pipeline": { - "title": "MQTT Data Pipeline", - }, - "files_contextualization": { - "title": "Files Contextualization", - }, - "asset_data_transformation": { - "title": "Asset Data Transformation", - }, - "infield": { - "title": "Infield", - }, - }, - }, - "examples": { - "title": "Examples: a set of example modules for inspiration", - "items": { - "cdf_data_pipeline_asset_valhall": {"items": {}}, - "cdf_data_pipeline_files_valhall": {"items": {}}, - }, - }, - "reference": { - "title": "All supported resources as reference", - "items": {"workflow": {}, "transformations": {}, "functions": {}, "groups": {}}, - }, - } - - -class InteractiveInit(typer.Typer): - def __init__(self, *args, **kwargs) -> None: # type: ignore - super().__init__(*args, **kwargs) - self.command()(self.interactive) - - def build_tree(self, item: Union[dict, list], tree: Tree) -> None: - if isinstance(item, dict): - for key, value in item.items(): - subtree = tree.add(key) - for subvalue in value: - if isinstance(subvalue, dict): - self.build_tree(subvalue, subtree) - else: - subtree.add(subvalue) - - def create(self, init_dir: str, selected: dict[str, list[str]], mode: str | None) -> None: - pass - - def interactive( - self, - ctx: typer.Context, - init_dir: Annotated[ - Optional[str], - typer.Option( - help="Directory path to project to initialize or upgrade with templates.", - ), - ] = None, - arg_selected: Annotated[ - Optional[str], - typer.Option( - help=f"List of modules to include. Options are '{list(get_packages().keys())}'", - ), - ] = None, - numeric: Annotated[ - Optional[bool], - typer.Option( - help="Use numeric selection instead of arrow keys.", - ), - ] = False, - ) -> None: - """Initialize or upgrade a new CDF project with templates interactively.""" - - print("\n") - print( - Panel( - "This wizard will help you prepare modules in the folder you enter. The modules are thematically bundled in packages you can choose between.", - title="Interactive template wizard", - style="green", - padding=(1, 2), - ) - ) - - selected: dict[str, Any] = {} - available = get_packages() - mode = "new" - - if not init_dir: - init_dir = questionary.text( - "Which directory would you like to create templates in? (typically customer name)", - default="new_project", - ).ask() - - if init_dir and Path(init_dir).is_dir(): - if numeric: - mode = questionary.rawselect( - "Directory already exists. What would you like to do?", - choices=[ - questionary.Choice("Abort", "abort"), - questionary.Choice("Overwrite (clean existing)", "overwrite"), - questionary.Choice("Update (add to or replace existing)", "update"), - ], - pointer=POINTER, - style=custom_style_fancy, - instruction="(Press 1, 2 or 3)", - ).ask() - else: - questionary.select( - "Directory already exists. What would you like to do?", - choices=[ - questionary.Choice("Abort", "abort"), - questionary.Choice("Overwrite (clean existing)", "overwrite"), - questionary.Choice("Update (add to or replace existing)", "update"), - ], - pointer=POINTER, - style=custom_style_fancy, - instruction="use arrow up/down and " + "⮐ " + " to save", - ).ask() - if mode == "abort": - print("Aborting...") - raise typer.Exit() - - if not init_dir: - raise ToolkitRequiredValueError("Directory path is required.") - - print(f" [{'yellow' if mode == 'overwrite' else 'green'}]Using directory [bold]{init_dir}[/]") - - loop = True - while loop: - if not arg_selected: - if numeric: - package_id = questionary.rawselect( - "Which package would you like to include?", - instruction="Type the number of your choice and press enter", - choices=[questionary.Choice(value.get("title", key), key) for key, value in available.items()], - pointer=POINTER, - style=custom_style_fancy, - ).ask() - - else: - package_id = questionary.select( - "Which package would you like to include?", - instruction="Use arrow up/down and ⮐ to save", - choices=[questionary.Choice(value.get("title", key), key) for key, value in available.items()], - pointer=POINTER, - style=custom_style_fancy, - ).ask() - - if package_id: - selected[package_id] = [] - selection = questionary.checkbox( - f"Which modules of {package_id} would you like to include?", - instruction="Use arrow up/down, press space to select item(s) and enter to save", - choices=[ - questionary.Choice(value.get("title", key), key) - for key, value in available[package_id].get("items", {}).items() - ], - qmark=INDENT, - pointer=POINTER, - style=custom_style_fancy, - ).ask() - if len(selection) > 0: - selected[package_id] = selection - else: - selected[package_id] = available[package_id].get("items", {}).keys() - available.pop(package_id) - - print("\n[bold]You have selected the following modules:[/] :robot:\n") - - tree = Tree("modules") - self.build_tree(selected, tree) - print(Padding.indent(tree, 5)) - print("\n") - - if len(available) > 0: - if questionary.confirm("Would you like to add more?", default=False).ask(): - continue - - loop = False - if not questionary.confirm("Would you like to continue with creation?", default=True).ask(): - print("Exiting...") - raise typer.Exit() - else: - self.create(init_dir, selected, mode) - print("Done!") - raise typer.Exit() - - -command = InteractiveInit( - name="init", help="Initialize or upgrade a new CDF project with templates interactively." -).interactive diff --git a/cognite_toolkit/_cdf_tk/prototypes/landing_app.py b/cognite_toolkit/_cdf_tk/prototypes/landing_app.py new file mode 100644 index 000000000..d1cf717ca --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/landing_app.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +import typer +from rich import print +from rich.panel import Panel + + +class Landing(typer.Typer): + def __init__(self, *args, **kwargs) -> None: # type: ignore + super().__init__(*args, **kwargs) + self.command()(self.main_init) + + def main_init(self) -> None: + """Guidance on how to get started""" + print( + Panel( + "\n".join( + [ + "[bold]Follow these steps to get started with the Cognite Data Fusion Toolkit:[/b]", + "", + "1. Run [bold]cdf-tk repo init (folder_name)[/] to prepare the working folder.", # todo: connect the folder name with the modules command + "2. Run [bold]cd (folder_name)[/] to enter the newly created folder.", + "3. Run [bold]cdf-tk modules init[/] to select the configuration modules you would like to start with.", + "4. Run [bold]cdf-tk auth verify --interactive[/] to check that you have access to the relevant CDF project. ", + " [italic](if you already have a .env file, you can copy it into the folder and run [bold]cdf-tk auth verify[/] directly)[/]", + "5. Run [bold]cdf-tk build modules[/] to build the configuration and look for variables that need your attention. Repeat for as many times as needed.", + "6. Run [bold]cdf-tk deploy --dry-run[/] to simulate the deployment of the configuration to the CDF project. Review the report provided", + "7. Run [bold]cdf-tk deploy[/] to deploy the configuration to the CDF project.", + "8. Commit the changes to your version control system.", + ] + ), + title="Getting started", + style="green", + padding=(1, 2), + ) + # ) + # Panel("""Welcome to the Cognite Toolkit! + # \n1. Run `cdf-tk repo init (folder_name)` to initialize a new CDF project + # \n2. Run `cdf-tk modules (folder_name)` to select modules.")) + # \n3. Run `cdf-tk auth verify [--interactive] (folder_name)` to check access" + # \n4. Run `cdf-tk build + # \n5. Run `cdf-tk deploy --dry-run` to simulate deployment + # "))""") + ) diff --git a/cognite_toolkit/_cdf_tk/prototypes/modules_app.py b/cognite_toolkit/_cdf_tk/prototypes/modules_app.py new file mode 100644 index 000000000..b6703b339 --- /dev/null +++ b/cognite_toolkit/_cdf_tk/prototypes/modules_app.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from typing import Annotated, Optional + +import typer +from rich import print + +from cognite_toolkit._cdf import _get_user_command +from cognite_toolkit._cdf_tk.prototypes.commands.modules import ModulesCommand +from cognite_toolkit._version import __version__ + + +class Modules(typer.Typer): + def __init__(self, *args, **kwargs) -> None: # type: ignore + super().__init__(*args, **kwargs) + self.callback(invoke_without_command=True)(self.main) + self.command()(self.init) + self.command()(self.upgrade) + + def main(self, ctx: typer.Context) -> None: + """Commands to manage modules""" + if ctx.invoked_subcommand is None: + print("Use [bold yellow]cdf-tk modules --help[/] for more information.") + + def init( + self, + arg_init_dir: Annotated[ + Optional[str], + typer.Option( + "--init-dir", + help="Directory path to project to initialize or upgrade with templates.", + ), + ] = None, + arg_package: Annotated[ + Optional[str], + typer.Option( + "--package", + help="Name of package to include", + ), + ] = None, + ) -> None: + """Initialize or upgrade a new CDF project with templates interactively.""" + + cmd = ModulesCommand(user_command=_get_user_command()) + cmd.init( + init_dir=arg_init_dir, + arg_package=arg_package, + ) + + def upgrade( + self, + project_dir: Annotated[ + Optional[str], + typer.Argument( + help="Directory path to project to upgrade with templates. Defaults to current directory.", + ), + ] = None, + ) -> None: + cmd = ModulesCommand(user_command=_get_user_command()) + cmd.upgrade(project_dir=project_dir) + + +# This is a trick to use an f-string for the docstring +Modules.upgrade.__doc__ = f"""Upgrade the existing CDF project modules to version {__version__}.""" diff --git a/cognite_toolkit/_cdf_tk/tk_warnings/fileread.py b/cognite_toolkit/_cdf_tk/tk_warnings/fileread.py index 278e608e0..c286ead33 100644 --- a/cognite_toolkit/_cdf_tk/tk_warnings/fileread.py +++ b/cognite_toolkit/_cdf_tk/tk_warnings/fileread.py @@ -68,6 +68,19 @@ def get_message(self) -> str: ) +@dataclass(frozen=True) +class UnknownResourceTypeWarning(YAMLFileWarning): + severity = SeverityLevel.MEDIUM + + suggestion: str | None + + def get_message(self) -> str: + msg = f"{type(self).__name__}: In file {self.filepath.as_posix()!r}." + if self.suggestion: + msg += f" Did you mean to call the file {self.suggestion!r}?" + return SeverityFormat.get_rich_severity_format(self.severity, msg) + + @dataclass(frozen=True) class UnusedParameterWarning(YAMLFileWithElementWarning): severity = SeverityLevel.LOW diff --git a/cognite_toolkit/_cdf_tk/utils.py b/cognite_toolkit/_cdf_tk/utils.py index 84ece9227..2bb62a616 100644 --- a/cognite_toolkit/_cdf_tk/utils.py +++ b/cognite_toolkit/_cdf_tk/utils.py @@ -44,7 +44,12 @@ from rich.prompt import Confirm, Prompt from cognite_toolkit._cdf_tk.constants import _RUNNING_IN_BROWSER, ROOT_MODULES -from cognite_toolkit._cdf_tk.exceptions import ToolkitError, ToolkitResourceMissingError, ToolkitYAMLFormatError +from cognite_toolkit._cdf_tk.exceptions import ( + AuthenticationError, + ToolkitError, + ToolkitResourceMissingError, + ToolkitYAMLFormatError, +) from cognite_toolkit._version import __version__ if sys.version_info < (3, 10): @@ -185,9 +190,8 @@ def _read_and_validate(self, verbose: bool = False, skip_prompt: bool = False) - self._set_cluster_defaults() self.project = reader.prompt_user("project") if not (self.cluster and self.project): - reader.status = "error" - reader.messages.append(" [bold red]ERROR[/]: CDF Cluster and project are required.") - return reader + missing = [field for field in ["cluster", "project"] if not getattr(self, field)] + raise AuthenticationError(f"CDF Cluster and project are required. Missing: {', '.join(missing)}.") self.cdf_url = reader.prompt_user("cdf_url", expected=f"https://{self.cluster}.cognitedata.com") self.login_flow = reader.prompt_user("login_flow", choices=self.login_flow_options()) # type: ignore[assignment] if self.login_flow == "token": @@ -212,8 +216,7 @@ def _read_and_validate(self, verbose: bool = False, skip_prompt: bool = False) - if self.login_flow == "client_credentials": self.audience = reader.prompt_user("audience", expected=f"https://{self.cluster}.cognitedata.com") else: - reader.status = "error" - reader.messages.append(f"The login flow {self.login_flow} is not supported") + raise AuthenticationError(f"The login flow {self.login_flow} is not supported") if not skip_prompt: if Path(".env").exists(): @@ -296,7 +299,7 @@ class AuthReaderValidation: def __init__(self, auth_vars: AuthVariables, verbose: bool, skip_prompt: bool = False): self._auth_vars = auth_vars - self.status: Literal["ok", "error", "warning"] = "ok" + self.status: Literal["ok", "warning"] = "ok" self.messages: list[str] = [] self.verbose = verbose self.skip_prompt = skip_prompt @@ -407,28 +410,27 @@ def __init__(self, token: str | None = None, cluster: str | None = None, project return auth_vars = AuthVariables.from_env(self._environ) - self._failed = not self.initialize_from_auth_variables(auth_vars) + self.initialize_from_auth_variables(auth_vars) def _initialize_in_browser(self) -> None: try: self._client = CogniteClient() except Exception as e: - print(f"[bold red]Error[/] Failed to initialize CogniteClient in browser: {e}") - else: - if self._cluster or self._project: - print("[bold yellow]Warning[/] Cluster and project are arguments ignored when running in the browser.") - self._cluster = self._client.config.base_url.removeprefix("https://").split(".", maxsplit=1)[0] - self._project = self._client.config.project - self._cdf_url = self._client.config.base_url + raise AuthenticationError(f"Failed to initialize CogniteClient in browser: {e}") + + if self._cluster or self._project: + print("[bold yellow]Warning[/] Cluster and project are arguments ignored when running in the browser.") + self._cluster = self._client.config.base_url.removeprefix("https://").split(".", maxsplit=1)[0] + self._project = self._client.config.project + self._cdf_url = self._client.config.base_url - def initialize_from_auth_variables(self, auth: AuthVariables) -> bool: + def initialize_from_auth_variables(self, auth: AuthVariables) -> None: """Initialize the CDFToolConfig from the AuthVariables and returns whether it was successful or not.""" cluster = auth.cluster or self._cluster project = auth.project or self._project if cluster is None or project is None: - print(" [bold red]Error[/] Cluster and Project must be set to authenticate the client.") - return False + raise AuthenticationError("Cluster and Project must be set to authenticate the client.") self._cluster = cluster self._project = project @@ -436,18 +438,16 @@ def initialize_from_auth_variables(self, auth: AuthVariables) -> bool: if auth.login_flow == "token": if not auth.token: - print(" [bold red]Error[/] Login flow=token is set but no CDF_TOKEN is not provided.") - return False + raise AuthenticationError("Login flow=token is set but no CDF_TOKEN is not provided.") self._credentials_provider = Token(auth.token) elif auth.login_flow == "interactive": if auth.scopes: self._scopes = [auth.scopes] if not (auth.client_id and auth.authority_url and auth.scopes): - print( - " [bold red]Error[/] Login flow=interactive is set but missing required authentication " + raise AuthenticationError( + "Login flow=interactive is set but missing required authentication " "variables: IDP_CLIENT_ID and IDP_TENANT_ID (or IDP_AUTHORITY_URL). Cannot authenticate the client." ) - return False self._credentials_provider = OAuthInteractive( authority_url=auth.authority_url, client_id=auth.client_id, @@ -465,12 +465,11 @@ def initialize_from_auth_variables(self, auth: AuthVariables) -> bool: self._audience = auth.audience if not (auth.token_url and auth.client_id and auth.client_secret and self._scopes and self._audience): - print( - " [bold yellow]Error[/] Login flow=client_credentials is set but missing required authentication " + raise AuthenticationError( + "Login flow=client_credentials is set but missing required authentication " "variables: IDP_CLIENT_ID, IDP_CLIENT_SECRET and IDP_TENANT_ID (or IDP_TOKEN_URL). " "Cannot authenticate the client." ) - return False self._credentials_provider = OAuthClientCredentials( token_url=auth.token_url, @@ -480,8 +479,7 @@ def initialize_from_auth_variables(self, auth: AuthVariables) -> bool: audience=self._audience, ) else: - print(f" [bold red]Error[/] Login flow {auth.login_flow} is not supported.") - return False + raise AuthenticationError(f"Login flow {auth.login_flow} is not supported.") self._client = CogniteClient( ClientConfig( @@ -492,7 +490,6 @@ def initialize_from_auth_variables(self, auth: AuthVariables) -> bool: ) ) self._update_environment_variables() - return True def reinitialize_client(self) -> None: """Reinitialize the client with the current configuration.""" @@ -557,16 +554,6 @@ def __str__(self) -> str: environment, indent=2, sort_keys=True ) - @property - # Flag set if something that should have worked failed if a data set is - # loaded and/or deleted. - def failed(self) -> bool: - return self._failed - - @failed.setter - def failed(self, value: bool) -> None: - self._failed = value - @property def client(self) -> CogniteClient: if self._client is None: @@ -854,8 +841,11 @@ def load_yaml_inject_variables( f"It is expected in {filepath.name}." ) - # CSafeLoader is faster than yaml.safe_load - result = yaml.CSafeLoader(content).get_data() + if yaml.__with_libyaml__: + # CSafeLoader is faster than yaml.safe_load + result = yaml.CSafeLoader(content).get_data() + else: + result = yaml.safe_load(content) if required_return_type == "any": return result elif required_return_type == "list": @@ -886,8 +876,11 @@ def read_yaml_file( filepath: path to the YAML file """ try: - # CSafeLoader is faster than yaml.safe_load - config_data = yaml.CSafeLoader(filepath.read_text()).get_data() + if yaml.__with_libyaml__: + # CSafeLoader is faster than yaml.safe_load + config_data = yaml.CSafeLoader(filepath.read_text()).get_data() + else: + config_data = yaml.safe_load(filepath.read_text()) except yaml.YAMLError as e: print(f" [bold red]ERROR:[/] reading {filepath}: {e}") return {} diff --git a/cognite_toolkit/_system.yaml b/cognite_toolkit/_system.yaml index a1c2157da..17afed34d 100644 --- a/cognite_toolkit/_system.yaml +++ b/cognite_toolkit/_system.yaml @@ -25,4 +25,4 @@ packages: - example_pump_data_model # This part is used by cdf-toolkit to keep track of the version and help you upgrade. -cdf_toolkit_version: 0.2.0 +cdf_toolkit_version: 0.2.1 \ No newline at end of file diff --git a/cognite_toolkit/_version.py b/cognite_toolkit/_version.py index cb7429902..c19de657b 100644 --- a/cognite_toolkit/_version.py +++ b/cognite_toolkit/_version.py @@ -1 +1 @@ -__version__ = "0.2.0" +__version__ = "0.2.1" diff --git a/poetry.lock b/poetry.lock index 9b35fbbb4..325fb9213 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "arrow" @@ -40,13 +40,13 @@ aio = ["aiohttp (>=3.0)"] [[package]] name = "azure-identity" -version = "1.16.0" +version = "1.16.1" description = "Microsoft Azure Identity Library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "azure-identity-1.16.0.tar.gz", hash = "sha256:6ff1d667cdcd81da1ceab42f80a0be63ca846629f518a922f7317a7e3c844e1b"}, - {file = "azure_identity-1.16.0-py3-none-any.whl", hash = "sha256:722fdb60b8fdd55fa44dc378b8072f4b419b56a5e54c0de391f644949f3a826f"}, + {file = "azure-identity-1.16.1.tar.gz", hash = "sha256:6d93f04468f240d59246d8afde3091494a5040d4f141cad0f49fc0c399d0d91e"}, + {file = "azure_identity-1.16.1-py3-none-any.whl", hash = "sha256:8fb07c25642cd4ac422559a8b50d3e77f73dcc2bbfaba419d06d6c9d7cff6726"}, ] [package.dependencies] @@ -287,13 +287,13 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cognite-extractor-utils" -version = "7.2.0" +version = "7.2.1" description = "Utilities for easier development of extractors for CDF" optional = false python-versions = "<4.0.0,>=3.8.0" files = [ - {file = "cognite_extractor_utils-7.2.0-py3-none-any.whl", hash = "sha256:d1a0542c5ec0db495f25ee873a6b3b08fb7569dac7c8ec5e34cdd0bae234cddf"}, - {file = "cognite_extractor_utils-7.2.0.tar.gz", hash = "sha256:d62a27b11c36c705b05f4d74bb947777417d16e348d404dac833ede16174ff0e"}, + {file = "cognite_extractor_utils-7.2.1-py3-none-any.whl", hash = "sha256:462ef18528f404a63c9f2134adcbcaf04b6de0c7872916c5282b6ab93969c9f6"}, + {file = "cognite_extractor_utils-7.2.1.tar.gz", hash = "sha256:6a87cf8a2f901bd3d7217840425e02235ebf56aabdccc8ea0d9f9896d85a5998"}, ] [package.dependencies] @@ -327,13 +327,13 @@ python-json-logger = ">=2.0.7,<3.0.0" [[package]] name = "cognite-sdk" -version = "7.49.0" +version = "7.51.0" description = "Cognite Python SDK" optional = false python-versions = "<4.0,>=3.8" files = [ - {file = "cognite_sdk-7.49.0-py3-none-any.whl", hash = "sha256:6e8c846c0e985095224162db0530af60f8f21d3c7d4887e5802b5347feed48e7"}, - {file = "cognite_sdk-7.49.0.tar.gz", hash = "sha256:0cafa4bf21447f280c4ba1ae281e22477d9bf6295c8e9f4ce2d0cbd94d0a273e"}, + {file = "cognite_sdk-7.51.0-py3-none-any.whl", hash = "sha256:3b0126199455006a4638a2d0318c37071b600c9722c0fe9a31a999e3e4c2829b"}, + {file = "cognite_sdk-7.51.0.tar.gz", hash = "sha256:a2796f37b213405a6db69c519c8e9c99046e43f3ae89383222534ac965205fd0"}, ] [package.dependencies] @@ -562,18 +562,18 @@ testing = ["hatch", "pre-commit", "pytest", "tox"] [[package]] name = "filelock" -version = "3.14.0" +version = "3.15.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.14.0-py3-none-any.whl", hash = "sha256:43339835842f110ca7ae60f1e1c160714c5a6afd15a2873419ab185334975c0f"}, - {file = "filelock-3.14.0.tar.gz", hash = "sha256:6ea72da3be9b8c82afd3edcf99f2fffbb5076335a5ae4d03248bb5b6c3eae78a"}, + {file = "filelock-3.15.1-py3-none-any.whl", hash = "sha256:71b3102950e91dfc1bb4209b64be4dc8854f40e5f534428d8684f953ac847fac"}, + {file = "filelock-3.15.1.tar.gz", hash = "sha256:58a2549afdf9e02e10720eaa4d4470f56386d7a6f72edd7d0596337af8ed7ad8"}, ] [package.extras] docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"] typing = ["typing-extensions (>=4.8)"] [[package]] @@ -799,35 +799,51 @@ files = [ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] +[[package]] +name = "mixpanel" +version = "4.10.1" +description = "Official Mixpanel library for Python" +optional = false +python-versions = ">=2.7, !=3.4.*" +files = [ + {file = "mixpanel-4.10.1-py2.py3-none-any.whl", hash = "sha256:a7a338b7197327e36356dbc1903086e7626db6d88367ccdd732b3f3c60d3b3ed"}, + {file = "mixpanel-4.10.1.tar.gz", hash = "sha256:29a6b5773dd34f05cf8e249f4e1d16e7b6280d6b58894551ce9a5aad7700a115"}, +] + +[package.dependencies] +requests = ">=2.4.2" +six = ">=1.9.0" +urllib3 = "*" + [[package]] name = "more-itertools" -version = "10.2.0" +version = "10.3.0" description = "More routines for operating on iterables, beyond itertools" optional = false python-versions = ">=3.8" files = [ - {file = "more-itertools-10.2.0.tar.gz", hash = "sha256:8fccb480c43d3e99a00087634c06dd02b0d50fbf088b380de5a41a015ec239e1"}, - {file = "more_itertools-10.2.0-py3-none-any.whl", hash = "sha256:686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684"}, + {file = "more-itertools-10.3.0.tar.gz", hash = "sha256:e5d93ef411224fbcef366a6e8ddc4c5781bc6359d43412a65dd5964e46111463"}, + {file = "more_itertools-10.3.0-py3-none-any.whl", hash = "sha256:ea6a02e24a9161e51faad17a8782b92a0df82c12c1c8886fec7f0c3fa1a1b320"}, ] [[package]] name = "msal" -version = "1.28.0" +version = "1.28.1" description = "The Microsoft Authentication Library (MSAL) for Python library enables your app to access the Microsoft Cloud by supporting authentication of users with Microsoft Azure Active Directory accounts (AAD) and Microsoft Accounts (MSA) using industry standard OAuth2 and OpenID Connect." optional = false python-versions = ">=3.7" files = [ - {file = "msal-1.28.0-py3-none-any.whl", hash = "sha256:3064f80221a21cd535ad8c3fafbb3a3582cd9c7e9af0bb789ae14f726a0ca99b"}, - {file = "msal-1.28.0.tar.gz", hash = "sha256:80bbabe34567cb734efd2ec1869b2d98195c927455369d8077b3c542088c5c9d"}, + {file = "msal-1.28.1-py3-none-any.whl", hash = "sha256:563c2d70de77a2ca9786aab84cb4e133a38a6897e6676774edc23d610bfc9e7b"}, + {file = "msal-1.28.1.tar.gz", hash = "sha256:d72bbfe2d5c2f2555f4bc6205be4450ddfd12976610dd9a16a9ab0f05c68b64d"}, ] [package.dependencies] -cryptography = ">=0.6,<45" +cryptography = ">=2.5,<45" PyJWT = {version = ">=1.0.0,<3", extras = ["crypto"]} requests = ">=2.0.0,<3" [package.extras] -broker = ["pymsalruntime (>=0.13.2,<0.15)"] +broker = ["pymsalruntime (>=0.13.2,<0.17)"] [[package]] name = "msal-extensions" @@ -944,47 +960,56 @@ files = [ [[package]] name = "numpy" -version = "1.26.4" +version = "2.0.0" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, + {file = "numpy-2.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f"}, + {file = "numpy-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2"}, + {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238"}, + {file = "numpy-2.0.0-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514"}, + {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196"}, + {file = "numpy-2.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1"}, + {file = "numpy-2.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc"}, + {file = "numpy-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787"}, + {file = "numpy-2.0.0-cp310-cp310-win32.whl", hash = "sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98"}, + {file = "numpy-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b"}, + {file = "numpy-2.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5"}, + {file = "numpy-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289"}, + {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609"}, + {file = "numpy-2.0.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871"}, + {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4"}, + {file = "numpy-2.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581"}, + {file = "numpy-2.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995"}, + {file = "numpy-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f"}, + {file = "numpy-2.0.0-cp311-cp311-win32.whl", hash = "sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f"}, + {file = "numpy-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c"}, + {file = "numpy-2.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f"}, + {file = "numpy-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85"}, + {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2"}, + {file = "numpy-2.0.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e"}, + {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2"}, + {file = "numpy-2.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a"}, + {file = "numpy-2.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95"}, + {file = "numpy-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9"}, + {file = "numpy-2.0.0-cp312-cp312-win32.whl", hash = "sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54"}, + {file = "numpy-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df"}, + {file = "numpy-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de"}, + {file = "numpy-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb"}, + {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f"}, + {file = "numpy-2.0.0-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86"}, + {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a"}, + {file = "numpy-2.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d"}, + {file = "numpy-2.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4"}, + {file = "numpy-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44"}, + {file = "numpy-2.0.0-cp39-cp39-win32.whl", hash = "sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275"}, + {file = "numpy-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65"}, + {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6"}, + {file = "numpy-2.0.0-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a"}, + {file = "numpy-2.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad"}, + {file = "numpy-2.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9"}, + {file = "numpy-2.0.0.tar.gz", hash = "sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864"}, ] [[package]] @@ -1005,57 +1030,57 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] [[package]] name = "orjson" -version = "3.10.3" +version = "3.10.5" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.10.3-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9fb6c3f9f5490a3eb4ddd46fc1b6eadb0d6fc16fb3f07320149c3286a1409dd8"}, - {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:252124b198662eee80428f1af8c63f7ff077c88723fe206a25df8dc57a57b1fa"}, - {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9f3e87733823089a338ef9bbf363ef4de45e5c599a9bf50a7a9b82e86d0228da"}, - {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8334c0d87103bb9fbbe59b78129f1f40d1d1e8355bbed2ca71853af15fa4ed3"}, - {file = "orjson-3.10.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1952c03439e4dce23482ac846e7961f9d4ec62086eb98ae76d97bd41d72644d7"}, - {file = "orjson-3.10.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c0403ed9c706dcd2809f1600ed18f4aae50be263bd7112e54b50e2c2bc3ebd6d"}, - {file = "orjson-3.10.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:382e52aa4270a037d41f325e7d1dfa395b7de0c367800b6f337d8157367bf3a7"}, - {file = "orjson-3.10.3-cp310-none-win32.whl", hash = "sha256:be2aab54313752c04f2cbaab4515291ef5af8c2256ce22abc007f89f42f49109"}, - {file = "orjson-3.10.3-cp310-none-win_amd64.whl", hash = "sha256:416b195f78ae461601893f482287cee1e3059ec49b4f99479aedf22a20b1098b"}, - {file = "orjson-3.10.3-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:73100d9abbbe730331f2242c1fc0bcb46a3ea3b4ae3348847e5a141265479700"}, - {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:544a12eee96e3ab828dbfcb4d5a0023aa971b27143a1d35dc214c176fdfb29b3"}, - {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:520de5e2ef0b4ae546bea25129d6c7c74edb43fc6cf5213f511a927f2b28148b"}, - {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccaa0a401fc02e8828a5bedfd80f8cd389d24f65e5ca3954d72c6582495b4bcf"}, - {file = "orjson-3.10.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7bc9e8bc11bac40f905640acd41cbeaa87209e7e1f57ade386da658092dc16"}, - {file = "orjson-3.10.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3582b34b70543a1ed6944aca75e219e1192661a63da4d039d088a09c67543b08"}, - {file = "orjson-3.10.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c23dfa91481de880890d17aa7b91d586a4746a4c2aa9a145bebdbaf233768d5"}, - {file = "orjson-3.10.3-cp311-none-win32.whl", hash = "sha256:1770e2a0eae728b050705206d84eda8b074b65ee835e7f85c919f5705b006c9b"}, - {file = "orjson-3.10.3-cp311-none-win_amd64.whl", hash = "sha256:93433b3c1f852660eb5abdc1f4dd0ced2be031ba30900433223b28ee0140cde5"}, - {file = "orjson-3.10.3-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a39aa73e53bec8d410875683bfa3a8edf61e5a1c7bb4014f65f81d36467ea098"}, - {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0943a96b3fa09bee1afdfccc2cb236c9c64715afa375b2af296c73d91c23eab2"}, - {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e852baafceff8da3c9defae29414cc8513a1586ad93e45f27b89a639c68e8176"}, - {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18566beb5acd76f3769c1d1a7ec06cdb81edc4d55d2765fb677e3eaa10fa99e0"}, - {file = "orjson-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bd2218d5a3aa43060efe649ec564ebedec8ce6ae0a43654b81376216d5ebd42"}, - {file = "orjson-3.10.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cf20465e74c6e17a104ecf01bf8cd3b7b252565b4ccee4548f18b012ff2f8069"}, - {file = "orjson-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ba7f67aa7f983c4345eeda16054a4677289011a478ca947cd69c0a86ea45e534"}, - {file = "orjson-3.10.3-cp312-none-win32.whl", hash = "sha256:17e0713fc159abc261eea0f4feda611d32eabc35708b74bef6ad44f6c78d5ea0"}, - {file = "orjson-3.10.3-cp312-none-win_amd64.whl", hash = "sha256:4c895383b1ec42b017dd2c75ae8a5b862fc489006afde06f14afbdd0309b2af0"}, - {file = "orjson-3.10.3-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:be2719e5041e9fb76c8c2c06b9600fe8e8584e6980061ff88dcbc2691a16d20d"}, - {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0175a5798bdc878956099f5c54b9837cb62cfbf5d0b86ba6d77e43861bcec2"}, - {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:978be58a68ade24f1af7758626806e13cff7748a677faf95fbb298359aa1e20d"}, - {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16bda83b5c61586f6f788333d3cf3ed19015e3b9019188c56983b5a299210eb5"}, - {file = "orjson-3.10.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ad1f26bea425041e0a1adad34630c4825a9e3adec49079b1fb6ac8d36f8b754"}, - {file = "orjson-3.10.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9e253498bee561fe85d6325ba55ff2ff08fb5e7184cd6a4d7754133bd19c9195"}, - {file = "orjson-3.10.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0a62f9968bab8a676a164263e485f30a0b748255ee2f4ae49a0224be95f4532b"}, - {file = "orjson-3.10.3-cp38-none-win32.whl", hash = "sha256:8d0b84403d287d4bfa9bf7d1dc298d5c1c5d9f444f3737929a66f2fe4fb8f134"}, - {file = "orjson-3.10.3-cp38-none-win_amd64.whl", hash = "sha256:8bc7a4df90da5d535e18157220d7915780d07198b54f4de0110eca6b6c11e290"}, - {file = "orjson-3.10.3-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9059d15c30e675a58fdcd6f95465c1522b8426e092de9fff20edebfdc15e1cb0"}, - {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d40c7f7938c9c2b934b297412c067936d0b54e4b8ab916fd1a9eb8f54c02294"}, - {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4a654ec1de8fdaae1d80d55cee65893cb06494e124681ab335218be6a0691e7"}, - {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:831c6ef73f9aa53c5f40ae8f949ff7681b38eaddb6904aab89dca4d85099cb78"}, - {file = "orjson-3.10.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99b880d7e34542db89f48d14ddecbd26f06838b12427d5a25d71baceb5ba119d"}, - {file = "orjson-3.10.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2e5e176c994ce4bd434d7aafb9ecc893c15f347d3d2bbd8e7ce0b63071c52e25"}, - {file = "orjson-3.10.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b69a58a37dab856491bf2d3bbf259775fdce262b727f96aafbda359cb1d114d8"}, - {file = "orjson-3.10.3-cp39-none-win32.whl", hash = "sha256:b8d4d1a6868cde356f1402c8faeb50d62cee765a1f7ffcfd6de732ab0581e063"}, - {file = "orjson-3.10.3-cp39-none-win_amd64.whl", hash = "sha256:5102f50c5fc46d94f2033fe00d392588564378260d64377aec702f21a7a22912"}, - {file = "orjson-3.10.3.tar.gz", hash = "sha256:2b166507acae7ba2f7c315dcf185a9111ad5e992ac81f2d507aac39193c2c818"}, + {file = "orjson-3.10.5-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:545d493c1f560d5ccfc134803ceb8955a14c3fcb47bbb4b2fee0232646d0b932"}, + {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4324929c2dd917598212bfd554757feca3e5e0fa60da08be11b4aa8b90013c1"}, + {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8c13ca5e2ddded0ce6a927ea5a9f27cae77eee4c75547b4297252cb20c4d30e6"}, + {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b6c8e30adfa52c025f042a87f450a6b9ea29649d828e0fec4858ed5e6caecf63"}, + {file = "orjson-3.10.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:338fd4f071b242f26e9ca802f443edc588fa4ab60bfa81f38beaedf42eda226c"}, + {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6970ed7a3126cfed873c5d21ece1cd5d6f83ca6c9afb71bbae21a0b034588d96"}, + {file = "orjson-3.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:235dadefb793ad12f7fa11e98a480db1f7c6469ff9e3da5e73c7809c700d746b"}, + {file = "orjson-3.10.5-cp310-none-win32.whl", hash = "sha256:be79e2393679eda6a590638abda16d167754393f5d0850dcbca2d0c3735cebe2"}, + {file = "orjson-3.10.5-cp310-none-win_amd64.whl", hash = "sha256:c4a65310ccb5c9910c47b078ba78e2787cb3878cdded1702ac3d0da71ddc5228"}, + {file = "orjson-3.10.5-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cdf7365063e80899ae3a697def1277c17a7df7ccfc979990a403dfe77bb54d40"}, + {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b68742c469745d0e6ca5724506858f75e2f1e5b59a4315861f9e2b1df77775a"}, + {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7d10cc1b594951522e35a3463da19e899abe6ca95f3c84c69e9e901e0bd93d38"}, + {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcbe82b35d1ac43b0d84072408330fd3295c2896973112d495e7234f7e3da2e1"}, + {file = "orjson-3.10.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c0eb7e0c75e1e486c7563fe231b40fdd658a035ae125c6ba651ca3b07936f5"}, + {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:53ed1c879b10de56f35daf06dbc4a0d9a5db98f6ee853c2dbd3ee9d13e6f302f"}, + {file = "orjson-3.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:099e81a5975237fda3100f918839af95f42f981447ba8f47adb7b6a3cdb078fa"}, + {file = "orjson-3.10.5-cp311-none-win32.whl", hash = "sha256:1146bf85ea37ac421594107195db8bc77104f74bc83e8ee21a2e58596bfb2f04"}, + {file = "orjson-3.10.5-cp311-none-win_amd64.whl", hash = "sha256:36a10f43c5f3a55c2f680efe07aa93ef4a342d2960dd2b1b7ea2dd764fe4a37c"}, + {file = "orjson-3.10.5-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:68f85ecae7af14a585a563ac741b0547a3f291de81cd1e20903e79f25170458f"}, + {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28afa96f496474ce60d3340fe8d9a263aa93ea01201cd2bad844c45cd21f5268"}, + {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cd684927af3e11b6e754df80b9ffafd9fb6adcaa9d3e8fdd5891be5a5cad51e"}, + {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d21b9983da032505f7050795e98b5d9eee0df903258951566ecc358f6696969"}, + {file = "orjson-3.10.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ad1de7fef79736dde8c3554e75361ec351158a906d747bd901a52a5c9c8d24b"}, + {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2d97531cdfe9bdd76d492e69800afd97e5930cb0da6a825646667b2c6c6c0211"}, + {file = "orjson-3.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d69858c32f09c3e1ce44b617b3ebba1aba030e777000ebdf72b0d8e365d0b2b3"}, + {file = "orjson-3.10.5-cp312-none-win32.whl", hash = "sha256:64c9cc089f127e5875901ac05e5c25aa13cfa5dbbbd9602bda51e5c611d6e3e2"}, + {file = "orjson-3.10.5-cp312-none-win_amd64.whl", hash = "sha256:b2efbd67feff8c1f7728937c0d7f6ca8c25ec81373dc8db4ef394c1d93d13dc5"}, + {file = "orjson-3.10.5-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:03b565c3b93f5d6e001db48b747d31ea3819b89abf041ee10ac6988886d18e01"}, + {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:584c902ec19ab7928fd5add1783c909094cc53f31ac7acfada817b0847975f26"}, + {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5a35455cc0b0b3a1eaf67224035f5388591ec72b9b6136d66b49a553ce9eb1e6"}, + {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1670fe88b116c2745a3a30b0f099b699a02bb3482c2591514baf5433819e4f4d"}, + {file = "orjson-3.10.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:185c394ef45b18b9a7d8e8f333606e2e8194a50c6e3c664215aae8cf42c5385e"}, + {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ca0b3a94ac8d3886c9581b9f9de3ce858263865fdaa383fbc31c310b9eac07c9"}, + {file = "orjson-3.10.5-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dfc91d4720d48e2a709e9c368d5125b4b5899dced34b5400c3837dadc7d6271b"}, + {file = "orjson-3.10.5-cp38-none-win32.whl", hash = "sha256:c05f16701ab2a4ca146d0bca950af254cb7c02f3c01fca8efbbad82d23b3d9d4"}, + {file = "orjson-3.10.5-cp38-none-win_amd64.whl", hash = "sha256:8a11d459338f96a9aa7f232ba95679fc0c7cedbd1b990d736467894210205c09"}, + {file = "orjson-3.10.5-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:85c89131d7b3218db1b24c4abecea92fd6c7f9fab87441cfc342d3acc725d807"}, + {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb66215277a230c456f9038d5e2d84778141643207f85336ef8d2a9da26bd7ca"}, + {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51bbcdea96cdefa4a9b4461e690c75ad4e33796530d182bdd5c38980202c134a"}, + {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbead71dbe65f959b7bd8cf91e0e11d5338033eba34c114f69078d59827ee139"}, + {file = "orjson-3.10.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5df58d206e78c40da118a8c14fc189207fffdcb1f21b3b4c9c0c18e839b5a214"}, + {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c4057c3b511bb8aef605616bd3f1f002a697c7e4da6adf095ca5b84c0fd43595"}, + {file = "orjson-3.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b39e006b00c57125ab974362e740c14a0c6a66ff695bff44615dcf4a70ce2b86"}, + {file = "orjson-3.10.5-cp39-none-win32.whl", hash = "sha256:eded5138cc565a9d618e111c6d5c2547bbdd951114eb822f7f6309e04db0fb47"}, + {file = "orjson-3.10.5-cp39-none-win_amd64.whl", hash = "sha256:cc28e90a7cae7fcba2493953cff61da5a52950e78dc2dacfe931a317ee3d8de7"}, + {file = "orjson-3.10.5.tar.gz", hash = "sha256:7a5baef8a4284405d96c90c7c62b755e9ef1ada84c2406c24a9ebec86b89f46d"}, ] [[package]] @@ -1461,6 +1486,21 @@ dev = ["matplotlib", "mypy", "numpy", "pandas", "pillow", "pre-commit", "restruc image = ["numpy", "pillow"] num = ["numpy", "pandas"] +[[package]] +name = "pytest-rerunfailures" +version = "14.0" +description = "pytest plugin to re-run tests to eliminate flaky failures" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-rerunfailures-14.0.tar.gz", hash = "sha256:4a400bcbcd3c7a4ad151ab8afac123d90eca3abe27f98725dc4d9702887d2e92"}, + {file = "pytest_rerunfailures-14.0-py3-none-any.whl", hash = "sha256:4197bdd2eaeffdbf50b5ea6e7236f47ff0e44d1def8dae08e409f536d84e7b32"}, +] + +[package.dependencies] +packaging = ">=17.1" +pytest = ">=7.2" + [[package]] name = "pytest-xdist" version = "3.6.1" @@ -2005,4 +2045,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "c3343b66dfaf4bdd5f2a32023700f9c6c60906b4510715367e747e7b19a3c68f" +content-hash = "6251a80491d118d26f57904d6719bbfe1f3609948ba0ce2df1964088fa721cb4" diff --git a/pyproject.toml b/pyproject.toml index 4f427b89b..f888f1f9f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "cognite_toolkit" -version = "0.2.0" +version = "0.2.1" description = "Official Cognite Data Fusion tool for project templates and configuration deployment" authors = ["Cognite AS "] license = "Apache-2" @@ -14,7 +14,7 @@ include = ["cognite_toolkit/*", "cognite_toolkit/**/*"] [tool.poetry.dependencies] python = "^3.9" python-dotenv = "^1.0.0" -cognite-sdk = {version = "^7.47.0", extras = ["pandas"]} +cognite-sdk = {version = "^7.51.0", extras = ["pandas"]} cognite-extractor-utils = ">=7" pandas = ">=1.5.3, <3.0" pyyaml = "^6.0.1" @@ -22,6 +22,7 @@ typer = {version = ">=0.12.0, <1.0", extras = ["all"]} sentry-sdk = "^2.1.0" cognite-logger = "^0.6" questionary = "^2.0.1" +mixpanel = "^4.10.1" [tool.poetry.group.dev.dependencies] mypy = "^1.8.0" @@ -30,6 +31,7 @@ pytest = "^8.0.0" pytest-icdiff = "*" # Used for better diffs in pytest pytest-regressions = "^2.4.2" pytest-xdist = "^3.6.1" +pytest-rerunfailures = "^14.0" types-PyYAML = "^6" twine = "^5.0.0" tomli = { version = "^2.0.1", python = "<3.11" } @@ -57,6 +59,12 @@ follow_imports = "normal" ignore_missing_imports = true exclude = ["cognite_toolkit/cognite_modules", "cognite_toolkit/build*"] +[tool.ruff] +line-length = 120 + +[tool.ruff.format] +# Ruff messes up the docstrings which are expected to be in Markdown format +exclude = ["cognite_toolkit/_cdf_tk/prototypes/commands/_changes.py"] [build-system] requires = ["poetry-core"] diff --git a/tests/tests_integration/test_loaders/test_resource_loaders.py b/tests/tests_integration/test_loaders/test_resource_loaders.py index 26ebf1ba9..772e262f5 100644 --- a/tests/tests_integration/test_loaders/test_resource_loaders.py +++ b/tests/tests_integration/test_loaders/test_resource_loaders.py @@ -82,6 +82,8 @@ def dummy_schedule(cognite_client: CogniteClient, dummy_function: Function) -> F class TestFunctionScheduleLoader: + # The function schedule service is fairly unstable, so we need to rerun the tests if they fail. + @pytest.mark.flaky(reruns=3, reruns_delay=10, only_rerun=["AssertionError"]) def test_update_function_schedule( self, cognite_client: CogniteClient, dummy_function: Function, dummy_schedule: FunctionSchedule ) -> None: diff --git a/tests/tests_unit/data/project_for_test/_system.yaml b/tests/tests_unit/data/project_for_test/_system.yaml index 28d9ec026..47db71382 100644 --- a/tests/tests_unit/data/project_for_test/_system.yaml +++ b/tests/tests_unit/data/project_for_test/_system.yaml @@ -4,4 +4,4 @@ packages: - child_module # This part is used by cdf-toolkit to keep track of the version and help you upgrade. -cdf_toolkit_version: 0.2.0 +cdf_toolkit_version: 0.2.1 diff --git a/tests/tests_unit/data/project_no_cognite_modules/_system.yaml b/tests/tests_unit/data/project_no_cognite_modules/_system.yaml index 4a9a0bd43..8c452625f 100644 --- a/tests/tests_unit/data/project_no_cognite_modules/_system.yaml +++ b/tests/tests_unit/data/project_no_cognite_modules/_system.yaml @@ -3,4 +3,4 @@ packages: {} # This part is used by cdf-toolkit to keep track of the version and help you upgrade. -cdf_toolkit_version: 0.2.0 +cdf_toolkit_version: 0.2.1 diff --git a/tests/tests_unit/data/run_data/_system.yaml b/tests/tests_unit/data/run_data/_system.yaml index a1c2157da..afbcc3bad 100644 --- a/tests/tests_unit/data/run_data/_system.yaml +++ b/tests/tests_unit/data/run_data/_system.yaml @@ -25,4 +25,4 @@ packages: - example_pump_data_model # This part is used by cdf-toolkit to keep track of the version and help you upgrade. -cdf_toolkit_version: 0.2.0 +cdf_toolkit_version: 0.2.1 diff --git a/tests/tests_unit/test_cdf_tk/test_auth.py b/tests/tests_unit/test_cdf_tk/test_commands/test_auth.py similarity index 94% rename from tests/tests_unit/test_cdf_tk/test_auth.py rename to tests/tests_unit/test_cdf_tk/test_commands/test_auth.py index 69835a39b..43db2b35b 100644 --- a/tests/tests_unit/test_cdf_tk/test_auth.py +++ b/tests/tests_unit/test_cdf_tk/test_commands/test_auth.py @@ -18,6 +18,7 @@ from pytest import MonkeyPatch from cognite_toolkit._cdf_tk.commands import AuthCommand +from cognite_toolkit._cdf_tk.exceptions import AuthorizationError from cognite_toolkit._cdf_tk.tk_warnings import ( HighSeverityWarning, LowSeverityWarning, @@ -167,7 +168,8 @@ def mock_verify_client(*args, **kwargs): cdf_tool_config.verify_client.side_effect = mock_verify_client cmd = AuthCommand(print_warning=False) - cmd.check_auth(cdf_tool_config, group_file=Path(AUTH_DATA / "rw-group.yaml")) + with pytest.raises(AuthorizationError) as e: + cmd.check_auth(cdf_tool_config, group_file=Path(AUTH_DATA / "rw-group.yaml")) assert len(cmd.warning_list) == 1 assert set(cmd.warning_list) == { @@ -176,3 +178,7 @@ def mock_verify_client(*args, **kwargs): "does not have the basic group write access rights." ) } + assert str(e.value) == ( + "Unable to continue, the service principal/application configured for this " + "client does not have the basic read group access rights." + ) diff --git a/tests/tests_unit/test_cdf_tk/test_commands/test_build.py b/tests/tests_unit/test_cdf_tk/test_commands/test_build.py index 950e82836..2c10b7b51 100644 --- a/tests/tests_unit/test_cdf_tk/test_commands/test_build.py +++ b/tests/tests_unit/test_cdf_tk/test_commands/test_build.py @@ -1,15 +1,119 @@ +from __future__ import annotations + +from collections.abc import Iterable from pathlib import Path +from typing import Any import pytest -from cognite_toolkit._cdf_tk.commands.build import BuildCommand +from cognite_toolkit._cdf_tk.commands.build import BuildCommand, _BuildState, _Helpers +from cognite_toolkit._cdf_tk.data_classes import BuildConfigYAML, Environment from cognite_toolkit._cdf_tk.exceptions import AmbiguousResourceFileError +@pytest.fixture(scope="session") +def dummy_environment() -> Environment: + return Environment( + name="dev", + project="my_project", + build_type="dev", + selected=["none"], + ) + + class TestBuildCommand: def test_get_loader_raises_ambiguous_error(self): with pytest.raises(AmbiguousResourceFileError) as e: BuildCommand()._get_loader( - "transformations", destination=Path("my_module") / "transformations" / "notification.yaml" + "transformations", + destination=Path("transformation") / "notification.yaml", + source_path=Path("my_module") / "transformations" / "notification.yaml", ) assert "Ambiguous resource file" in str(e.value) + + +def valid_yaml_semantics_test_cases() -> Iterable[pytest.ParameterSet]: + yield pytest.param( + """ +- dbName: src:005:test:rawdb:state +- dbName: src:002:weather:rawdb:state +- dbName: uc:001:demand:rawdb:state +- dbName: in:all:rawdb:state +- dbName: src:001:sap:rawdb +""", + Path("build/raw/raw.yaml"), + id="Multiple Raw Databases", + ) + + yield pytest.param( + """ +dbName: src:005:test:rawdb:state +""", + Path("build/raw/raw.yaml"), + id="Single Raw Database", + ) + + yield pytest.param( + """ +dbName: src:005:test:rawdb:state +tableName: myTable +""", + Path("build/raw/raw.yaml"), + id="Single Raw Database with table", + ) + + yield pytest.param( + """ +- dbName: src:005:test:rawdb:state + tableName: myTable +- dbName: src:002:weather:rawdb:state + tableName: myOtherTable +""", + Path("build/raw/raw.yaml"), + id="Multiple Raw Databases with table", + ) + + +class TestCheckYamlSemantics: + @pytest.mark.parametrize("raw_yaml, source_path", list(valid_yaml_semantics_test_cases())) + def test_valid_yaml(self, raw_yaml: str, source_path: Path, dummy_environment: Environment): + state = _BuildState.create(BuildConfigYAML(dummy_environment, filepath=Path("dummy"), variables={})) + cmd = BuildCommand(print_warning=False) + # Only used in error messages + destination = Path("build/raw/raw.yaml") + yaml_warnings = cmd.validate(raw_yaml, source_path, destination, state, False) + assert not yaml_warnings + + +@pytest.fixture() +def my_config(): + return { + "top_variable": "my_top_variable", + "module_a": { + "readwrite_source_id": "my_readwrite_source_id", + "readonly_source_id": "my_readonly_source_id", + }, + "parent": {"child": {"child_variable": "my_child_variable"}}, + } + + +def test_split_config(my_config: dict[str, Any]) -> None: + expected = { + "": {"top_variable": "my_top_variable"}, + "module_a": { + "readwrite_source_id": "my_readwrite_source_id", + "readonly_source_id": "my_readonly_source_id", + }, + "parent.child": {"child_variable": "my_child_variable"}, + } + actual = _Helpers.to_variables_by_module_path(my_config) + + assert actual == expected + + +def test_create_local_config(my_config: dict[str, Any]): + configs = _Helpers.to_variables_by_module_path(my_config) + + local_config = _Helpers.create_local_config(configs, Path("parent/child/auth/")) + + assert dict(local_config.items()) == {"top_variable": "my_top_variable", "child_variable": "my_child_variable"} diff --git a/tests/tests_unit/test_cdf_tk/test_describe.py b/tests/tests_unit/test_cdf_tk/test_commands/test_describe.py similarity index 100% rename from tests/tests_unit/test_cdf_tk/test_describe.py rename to tests/tests_unit/test_cdf_tk/test_commands/test_describe.py diff --git a/tests/tests_unit/test_cdf_tk/test_commands/test_feature_flags.py b/tests/tests_unit/test_cdf_tk/test_commands/test_feature_flags.py new file mode 100644 index 000000000..b7586b67e --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_commands/test_feature_flags.py @@ -0,0 +1,14 @@ +from cognite_toolkit._cdf_tk.commands.featureflag import FeatureFlag, Flags + + +class TestFeatureCommand: + def test_unknown_flag_returns_false(self): + assert FeatureFlag.is_enabled("unknown_flag") is False + + def test_user_setting_is_stored(self): + FeatureFlag.reset_user_settings() + assert FeatureFlag.is_enabled("MODULES_CMD") is False + + def test_user_setting_is_read(self): + FeatureFlag.save_user_settings(FeatureFlag.to_flag("modules_cmd"), True) + assert FeatureFlag.is_enabled(Flags.MODULES_CMD) diff --git a/tests/tests_unit/test_cdf_tk/test_pull.py b/tests/tests_unit/test_cdf_tk/test_commands/test_pull.py similarity index 100% rename from tests/tests_unit/test_cdf_tk/test_pull.py rename to tests/tests_unit/test_cdf_tk/test_commands/test_pull.py diff --git a/tests/tests_unit/test_cdf_tk/test_run.py b/tests/tests_unit/test_cdf_tk/test_commands/test_run.py similarity index 100% rename from tests/tests_unit/test_cdf_tk/test_run.py rename to tests/tests_unit/test_cdf_tk/test_commands/test_run.py diff --git a/tests/tests_unit/test_cdf_tk/test_data_classes/__init__.py b/tests/tests_unit/test_cdf_tk/test_data_classes/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/tests_unit/test_cdf_tk/test_data_classes/conftest.py b/tests/tests_unit/test_cdf_tk/test_data_classes/conftest.py new file mode 100644 index 000000000..32cd25b2a --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_data_classes/conftest.py @@ -0,0 +1,21 @@ +from __future__ import annotations + +import pytest + +from cognite_toolkit._cdf_tk.data_classes import Environment +from tests.tests_unit.data import PYTEST_PROJECT + + +@pytest.fixture(scope="session") +def config_yaml() -> str: + return (PYTEST_PROJECT / "config.dev.yaml").read_text() + + +@pytest.fixture(scope="session") +def dummy_environment() -> Environment: + return Environment( + name="dev", + project="my_project", + build_type="dev", + selected=["none"], + ) diff --git a/tests/tests_unit/test_cdf_tk/test_data_classes/test_build_config_yaml.py b/tests/tests_unit/test_cdf_tk/test_data_classes/test_build_config_yaml.py new file mode 100644 index 000000000..6e32dc086 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_data_classes/test_build_config_yaml.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Any + +import pytest + +from cognite_toolkit._cdf_tk.commands import BuildCommand +from cognite_toolkit._cdf_tk.data_classes import BuildConfigYAML, Environment, SystemYAML +from cognite_toolkit._cdf_tk.loaders import LOADER_BY_FOLDER_NAME +from cognite_toolkit._cdf_tk.utils import iterate_modules +from tests.tests_unit.data import PYTEST_PROJECT +from tests.tests_unit.test_cdf_tk.constants import BUILD_DIR + + +class TestBuildConfigYAML: + def test_build_config_create_valid_build_folder(self, config_yaml: str) -> None: + build_env_name = "dev" + system_config = SystemYAML.load_from_directory(PYTEST_PROJECT, build_env_name) + config = BuildConfigYAML.load_from_directory(PYTEST_PROJECT, build_env_name) + available_modules = {module.name for module, _ in iterate_modules(PYTEST_PROJECT)} + config.environment.selected = list(available_modules) + + BuildCommand().build_config( + BUILD_DIR, PYTEST_PROJECT, config=config, system_config=system_config, clean=True, verbose=False + ) + + # The resulting build folder should only have subfolders that are matching the folder name + # used by the loaders. + invalid_resource_folders = [ + dir_.name for dir_ in BUILD_DIR.iterdir() if dir_.is_dir() and dir_.name not in LOADER_BY_FOLDER_NAME + ] + assert not invalid_resource_folders, f"Invalid resource folders after build: {invalid_resource_folders}" + + @pytest.mark.parametrize( + "modules, expected_available_modules", + [ + pytest.param({"another_module": {}}, ["another_module"], id="Single module"), + pytest.param( + { + "cognite_modules": { + "top_variable": "my_top_variable", + "a_module": { + "source_id": "123-456-789", + }, + "parent_module": { + "parent_variable": "my_parent_variable", + "child_module": { + "dataset_external_id": "ds_my_dataset", + }, + }, + "module_without_variables": {}, + } + }, + ["a_module", "child_module", "module_without_variables"], + id="Multiple nested modules", + ), + ], + ) + def test_available_modules( + self, modules: dict[str, Any], expected_available_modules: list[str], dummy_environment: Environment + ) -> None: + config = BuildConfigYAML(dummy_environment, filepath=Path("dummy"), variables=modules) + + assert sorted(config.available_modules) == sorted(expected_available_modules) diff --git a/tests/tests_unit/test_cdf_tk/test_data_classes/test_config_yaml.py b/tests/tests_unit/test_cdf_tk/test_data_classes/test_config_yaml.py new file mode 100644 index 000000000..ee8b04c00 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_data_classes/test_config_yaml.py @@ -0,0 +1,165 @@ +from __future__ import annotations + +from typing import Any + +import pytest +import yaml + +from cognite_toolkit._cdf_tk.data_classes import ConfigEntry, Environment, InitConfigYAML +from cognite_toolkit._cdf_tk.utils import YAMLComment, flatten_dict +from tests.tests_unit.data import PYTEST_PROJECT + + +class TestConfigYAML: + def test_producing_correct_keys(self, config_yaml: str, dummy_environment: Environment) -> None: + expected_keys = set(flatten_dict(yaml.safe_load(config_yaml))) + # Custom keys are not loaded from the module folder. + # This custom key is added o the dev.config.yaml for other tests. + expected_keys.remove(("variables", "custom_modules", "my_example_module", "transformation_is_paused")) + # Skip all environment variables + expected_keys = {k for k in expected_keys if not k[0] == "environment"} + + config = InitConfigYAML(dummy_environment).load_defaults(PYTEST_PROJECT) + + actual_keys = set(config.keys()) + missing = expected_keys - actual_keys + assert not missing, f"Missing keys: {missing}" + extra = actual_keys - expected_keys + assert not extra, f"Extra keys: {extra}" + + def test_extract_extract_config_yaml_comments(self, config_yaml: str) -> None: + expected_comments = { + ("variables", "cognite_modules", "a_module", "readonly_source_id"): YAMLComment( + above=["This is a comment in the middle of the file"], after=[] + ), + ("variables", "cognite_modules", "another_module", "default_location"): YAMLComment( + above=["This is a comment at the beginning of the module."] + ), + ("variables", "cognite_modules", "another_module", "source_asset"): YAMLComment( + after=["This is an extra comment added to the config only 'lore ipsum'"] + ), + ("variables", "cognite_modules", "another_module", "source_files"): YAMLComment( + after=["This is a comment after a variable"] + ), + } + + actual_comments = InitConfigYAML._extract_comments(config_yaml) + + assert actual_comments == expected_comments + + @pytest.mark.parametrize( + "raw_file, key_prefix, expected_comments", + [ + pytest.param( + """--- +# This is a module comment +variable: value # After variable comment +# Before variable comment +variable2: value2 +variable3: 'value with #in it' +variable4: "value with #in it" # But a comment after +""", + tuple("super_module.module_a".split(".")), + { + ("super_module", "module_a", "variable"): YAMLComment( + after=["After variable comment"], above=["This is a module comment"] + ), + ("super_module", "module_a", "variable2"): YAMLComment(above=["Before variable comment"]), + ("super_module", "module_a", "variable4"): YAMLComment(after=["But a comment after"]), + }, + id="module comments", + ) + ], + ) + def test_extract_default_config_comments( + self, raw_file: str, key_prefix: tuple[str, ...], expected_comments: dict[str, Any] + ): + actual_comments = InitConfigYAML._extract_comments(raw_file, key_prefix) + assert actual_comments == expected_comments + + def test_persist_variable_with_comment(self, config_yaml: str) -> None: + custom_comment = "This is an extra comment added to the config only 'lore ipsum'" + + config = InitConfigYAML.load_existing(config_yaml).load_defaults(PYTEST_PROJECT) + + dumped = config.dump_yaml_with_comments() + loaded = yaml.safe_load(dumped) + assert loaded["variables"]["cognite_modules"]["another_module"]["source_asset"] == "my_new_workmate" + assert custom_comment in dumped + + def test_added_and_removed_variables(self, config_yaml: str) -> None: + existing_config_yaml = yaml.safe_load(config_yaml) + # Added = Exists in the BUILD_CONFIG directory default.config.yaml files but not in config.yaml + existing_config_yaml["variables"]["cognite_modules"]["another_module"].pop("source_asset") + # Removed = Exists in config.yaml but not in the BUILD_CONFIG directory default.config.yaml files + existing_config_yaml["variables"]["cognite_modules"]["another_module"]["removed_variable"] = "old_value" + + config = InitConfigYAML.load_existing(yaml.safe_dump(existing_config_yaml)).load_defaults(PYTEST_PROJECT) + + removed = [v for v in config.values() if v.default_value is None] + # There is already a custom variable in the config.yaml file + assert len(removed) == 2 + assert ("variables", "cognite_modules", "another_module", "removed_variable") in [v.key_path for v in removed] + + added = [v for v in config.values() if v.current_value is None] + assert len(added) == 1 + assert added[0].key_path == ("variables", "cognite_modules", "another_module", "source_asset") + + def test_load_variables(self, dummy_environment: Environment) -> None: + expected = { + ("variables", "cognite_modules", "a_module", "readonly_source_id"), + # default_location is used in two modules and is moved to the top level + ("variables", "cognite_modules", "default_location"), + ("variables", "cognite_modules", "another_module", "source_files"), + ("variables", "cognite_modules", "another_module", "model_space"), + ("variables", "cognite_modules", "parent_module", "child_module", "source_asset"), + } + + config = InitConfigYAML(dummy_environment).load_variables(PYTEST_PROJECT, propagate_reused_variables=True) + + missing = expected - set(config.keys()) + extra = set(config.keys()) - expected + assert not missing, f"Missing keys: {missing}. Got extra {extra}" + assert not extra, f"Extra keys: {extra}" + + def test_load_parent_variables(self, dummy_environment: Environment) -> None: + config = InitConfigYAML( + dummy_environment, + { + ("variables", "cognite_modules", "infield", "shared_variable"): ConfigEntry( + key_path=("variables", "cognite_modules", "infield", "shared_variable"), + default_value="shared_value", + ) + }, + ) + + config._load_variables({"shared_variable": {("cognite_modules", "infield", "cdf_infield_common")}}) + + assert ("variables", "cognite_modules", "infield", "shared_variable") in config.keys() + assert ("variables", "cognite_modules", "infield", "cdf_infield_common", "shared_variable") not in config.keys() + + def test_finds_selected_defaults( + self, + ) -> None: + environment = Environment( + name="dev", + project="my_project", + build_type="dev", + selected=["cognite_modules/a_module"], + ) + + config_all = InitConfigYAML(environment).load_defaults(PYTEST_PROJECT) + config_selected = InitConfigYAML(environment).load_selected_defaults(PYTEST_PROJECT) + + assert len(config_all) > len(config_selected) + assert ("variables", "cognite_modules", "a_module", "readonly_source_id") in config_all.keys() + assert ("variables", "cognite_modules", "a_module", "readonly_source_id") in config_selected.keys() + + assert ("variables", "cognite_modules", "parent_module", "child_module", "child_variable") in config_all.keys() + assert ( + "variables", + "cognite_modules", + "parent_module", + "child_module", + "child_variable", + ) not in config_selected.keys() diff --git a/tests/tests_unit/test_cdf_tk/test_loaders.py b/tests/tests_unit/test_cdf_tk/test_loaders.py deleted file mode 100644 index 84357f314..000000000 --- a/tests/tests_unit/test_cdf_tk/test_loaders.py +++ /dev/null @@ -1,1209 +0,0 @@ -import os -import pathlib -from collections import Counter, defaultdict -from collections.abc import Hashable, Iterable -from pathlib import Path -from unittest.mock import MagicMock, patch - -import pytest -import requests -import yaml -from cognite.client import data_modeling as dm -from cognite.client.data_classes import ( - DataSet, - ExtractionPipelineConfig, - FileMetadata, - FunctionWrite, - Group, - GroupWrite, - Transformation, - TransformationSchedule, -) -from cognite.client.data_classes.data_modeling import Edge, Node, NodeApply -from pytest import MonkeyPatch -from pytest_regressions.data_regression import DataRegressionFixture - -from cognite_toolkit._cdf_tk._parameters import ParameterSet, read_parameters_from_dict -from cognite_toolkit._cdf_tk.commands import BuildCommand, CleanCommand, DeployCommand -from cognite_toolkit._cdf_tk.data_classes import ( - BuildConfigYAML, - Environment, - InitConfigYAML, - SystemYAML, -) -from cognite_toolkit._cdf_tk.exceptions import ToolkitYAMLFormatError -from cognite_toolkit._cdf_tk.loaders import ( - LOADER_BY_FOLDER_NAME, - LOADER_LIST, - RESOURCE_LOADER_LIST, - ContainerLoader, - DataModelLoader, - DatapointsLoader, - DataSetsLoader, - ExtractionPipelineConfigLoader, - ExtractionPipelineLoader, - FileMetadataLoader, - FunctionLoader, - GroupAllScopedLoader, - GroupLoader, - GroupResourceScopedLoader, - Loader, - NodeLoader, - RawDatabaseLoader, - RawTableLoader, - ResourceLoader, - ResourceTypes, - SpaceLoader, - TimeSeriesLoader, - TransformationLoader, - ViewLoader, -) -from cognite_toolkit._cdf_tk.loaders.data_classes import NodeAPICall, NodeApplyListWithCall, RawDatabaseTable -from cognite_toolkit._cdf_tk.utils import ( - CDFToolConfig, - module_from_path, - resource_folder_from_path, - tmp_build_directory, -) -from cognite_toolkit._cdf_tk.validation import validate_resource_yaml -from tests.constants import REPO_ROOT -from tests.tests_unit.approval_client import ApprovalCogniteClient -from tests.tests_unit.data import LOAD_DATA, PYTEST_PROJECT -from tests.tests_unit.test_cdf_tk.constants import BUILD_DIR, SNAPSHOTS_DIR_ALL -from tests.tests_unit.utils import FakeCogniteResourceGenerator, mock_read_yaml_file - -SNAPSHOTS_DIR = SNAPSHOTS_DIR_ALL / "load_data_snapshots" - - -@pytest.mark.parametrize( - "loader_cls", - [ - FileMetadataLoader, - DatapointsLoader, - ], -) -def test_loader_class( - loader_cls: type[ResourceLoader], - cognite_client_approval: ApprovalCogniteClient, - data_regression: DataRegressionFixture, -): - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - cdf_tool.client = cognite_client_approval.mock_client - cdf_tool.data_set_id = 999 - - cmd = DeployCommand(print_warning=False) - loader = loader_cls.create_loader(cdf_tool, LOAD_DATA) - cmd.deploy_resources(loader, cdf_tool, dry_run=False) - - dump = cognite_client_approval.dump() - data_regression.check(dump, fullpath=SNAPSHOTS_DIR / f"{loader.folder_name}.yaml") - - -class TestFunctionLoader: - def test_load_functions(self, cognite_client_approval: ApprovalCogniteClient): - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - - loader = FunctionLoader.create_loader(cdf_tool, None) - loaded = loader.load_resource(LOAD_DATA / "functions" / "1.my_functions.yaml", cdf_tool, skip_validation=False) - assert len(loaded) == 2 - - def test_load_function(self, cognite_client_approval: ApprovalCogniteClient): - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - - loader = FunctionLoader.create_loader(cdf_tool, None) - loaded = loader.load_resource(LOAD_DATA / "functions" / "1.my_function.yaml", cdf_tool, skip_validation=False) - assert isinstance(loaded, FunctionWrite) - - -class TestDataSetsLoader: - def test_upsert_data_set(self, cognite_client_approval: ApprovalCogniteClient): - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - cdf_tool.client = cognite_client_approval.mock_client - - loader = DataSetsLoader.create_loader(cdf_tool, None) - loaded = loader.load_resource(LOAD_DATA / "data_sets" / "1.my_datasets.yaml", cdf_tool, skip_validation=False) - assert len(loaded) == 2 - - first = DataSet.load(loaded[0].dump()) - # Set the properties that are set on the server side - first.id = 42 - first.created_time = 42 - first.last_updated_time = 42 - # Simulate that the data set is already in CDF - cognite_client_approval.append(DataSet, first) - cmd = DeployCommand(print_warning=False) - to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple(loaded, loader) - - assert len(to_create) == 1 - assert len(to_change) == 0 - assert len(unchanged) == 1 - - -class TestContainerLoader: - @pytest.mark.parametrize( - "item", - [ - pytest.param( - { - "properties": { - "myDirectRelation": { - "name": "my direct relation", - "type": { - "type": "direct", - "container": { - "type": "container", - "space": "sp_my_space", - "externalId": "my_container", - }, - }, - } - } - }, - id="Direct relation property with require constraint.", - ), - ], - ) - def test_valid_spec(self, item: dict): - spec = ContainerLoader.get_write_cls_parameter_spec() - dumped = read_parameters_from_dict(item) - - extra = dumped - spec - - assert not extra, f"Extra keys: {extra}" - - -class TestViewLoader: - @pytest.mark.parametrize( - "item", - [ - pytest.param( - { - "filter": { - "hasData": [ - {"type": "container", "space": "sp_my_space", "externalId": "container_id"}, - {"type": "view", "space": "sp_my_space", "externalId": "view_id"}, - ] - } - }, - id="HasData Filter", - ), - pytest.param( - { - "properties": { - "reverseDirectRelation": { - "connectionType": "multi_reverse_direct_relation", - "source": { - "type": "view", - "space": "sp_my_space", - "externalId": "view_id", - "version": "v42", - }, - "through": { - "source": { - "type": "view", - "space": "sp_my_space", - "externalId": "view_id", - "version": "v42", - }, - "identifier": "view_property", - }, - } - } - }, - id="Reverse Direct Relation Property", - ), - ], - ) - def test_valid_spec(self, item: dict): - spec = ViewLoader.get_write_cls_parameter_spec() - dumped = read_parameters_from_dict(item) - - extra = dumped - spec - - assert not extra, f"Extra keys: {extra}" - - def test_update_view_with_interface(self, cognite_client_approval: ApprovalCogniteClient): - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - cdf_tool.client = cognite_client_approval.mock_client - prop1 = dm.MappedProperty( - dm.ContainerId(space="sp_space", external_id="container_id"), - "prop1", - type=dm.Text(), - nullable=True, - auto_increment=False, - ) - interface = dm.View( - space="sp_space", - external_id="interface", - version="1", - properties={"prop1": prop1}, - last_updated_time=1, - created_time=1, - description=None, - name=None, - filter=None, - implements=None, - writable=True, - used_for="node", - is_global=False, - ) - # Note that child views always contain all properties of their parent interfaces. - child_cdf = dm.View( - space="sp_space", - external_id="child", - version="1", - properties={"prop1": prop1}, - last_updated_time=1, - created_time=1, - description=None, - name=None, - filter=None, - implements=[interface.as_id()], - writable=True, - used_for="node", - is_global=False, - ) - child_local = dm.ViewApply( - space="sp_space", - external_id="child", - version="1", - implements=[interface.as_id()], - ) - # Simulating that the interface and child_cdf are available in CDF - cognite_client_approval.append(dm.View, [interface, child_cdf]) - - loader = ViewLoader.create_loader(cdf_tool, None) - cmd = DeployCommand(print_warning=False) - to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple( - dm.ViewApplyList([child_local]), loader - ) - - assert len(to_create) == 0 - assert len(to_change) == 0 - assert len(unchanged) == 1 - - @pytest.mark.parametrize( - "item, expected", - [ - pytest.param( - { - "space": "sp_my_space", - "properties": { - "name": { - "container": { - "type": "container", - "space": "my_container_space", - "externalId": "my_container", - } - } - }, - }, - [ - (SpaceLoader, "sp_my_space"), - (ContainerLoader, dm.ContainerId(space="my_container_space", external_id="my_container")), - ], - id="View with one container property", - ), - pytest.param( - { - "space": "sp_my_space", - "properties": { - "toEdge": { - "source": { - "type": "view", - "space": "my_view_space", - "externalId": "my_view", - "version": "1", - }, - "edgeSource": { - "type": "view", - "space": "my_other_view_space", - "externalId": "my_edge_view", - "version": "42", - }, - } - }, - }, - [ - (SpaceLoader, "sp_my_space"), - (ViewLoader, dm.ViewId(space="my_view_space", external_id="my_view", version="1")), - (ViewLoader, dm.ViewId(space="my_other_view_space", external_id="my_edge_view", version="42")), - ], - id="View with one container property", - ), - ], - ) - def test_get_dependent_items(self, item: dict, expected: list[tuple[type[ResourceLoader], Hashable]]) -> None: - actual = ViewLoader.get_dependent_items(item) - - assert list(actual) == expected - - -class TestDataModelLoader: - def test_update_data_model_random_view_order(self, cognite_client_approval: ApprovalCogniteClient): - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - cdf_tool.client = cognite_client_approval.mock_client - cdf_data_model = dm.DataModel( - space="sp_space", - external_id="my_model", - version="1", - views=[ - dm.ViewId(space="sp_space", external_id="first", version="1"), - dm.ViewId(space="sp_space", external_id="second", version="1"), - ], - last_updated_time=1, - created_time=1, - description=None, - name=None, - is_global=False, - ) - # Simulating that the data model is available in CDF - cognite_client_approval.append(dm.DataModel, cdf_data_model) - - local_data_model = dm.DataModelApply( - space="sp_space", - external_id="my_model", - version="1", - views=[ - dm.ViewId(space="sp_space", external_id="second", version="1"), - dm.ViewId(space="sp_space", external_id="first", version="1"), - ], - description=None, - name=None, - ) - - loader = DataModelLoader.create_loader(cdf_tool, None) - cmd = DeployCommand(print_warning=False) - to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple( - dm.DataModelApplyList([local_data_model]), loader - ) - - assert len(to_create) == 0 - assert len(to_change) == 0 - assert len(unchanged) == 1 - - -class TestGroupLoader: - def test_load_all_scoped_only(self, cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): - loader = GroupAllScopedLoader.create_loader(cdf_tool_config, None) - loaded = loader.load_resource( - LOAD_DATA / "auth" / "1.my_group_unscoped.yaml", cdf_tool_config, skip_validation=False - ) - assert loaded.name == "unscoped_group_name" - - loaded = loader.load_resource( - LOAD_DATA / "auth" / "1.my_group_scoped.yaml", cdf_tool_config, skip_validation=False - ) - assert loaded is None - - def test_load_resource_scoped_only(self, cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): - loader = GroupResourceScopedLoader.create_loader(cdf_tool_config, None) - loaded = loader.load_resource( - LOAD_DATA / "auth" / "1.my_group_unscoped.yaml", cdf_tool_config, skip_validation=False - ) - - assert loaded is None - - loaded = loader.load_resource( - LOAD_DATA / "auth" / "1.my_group_scoped.yaml", cdf_tool_config, skip_validation=False - ) - assert loaded.name == "scoped_group_name" - assert len(loaded.capabilities) == 4 - - caps = {str(type(element).__name__): element for element in loaded.capabilities} - - assert all(isinstance(item, int) for item in caps["DataSetsAcl"].scope.ids) - assert all(isinstance(item, int) for item in caps["AssetsAcl"].scope.ids) - assert all(isinstance(item, int) for item in caps["ExtractionConfigsAcl"].scope.ids) - assert caps["SessionsAcl"].scope._scope_name == "all" - - def test_load_group_list_resource_scoped_only(self, cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): - loader = GroupResourceScopedLoader.create_loader(cdf_tool_config, None) - loaded = loader.load_resource( - LOAD_DATA / "auth" / "1.my_group_list_combined.yaml", cdf_tool_config, skip_validation=True - ) - - assert isinstance(loaded, GroupWrite) - assert loaded.name == "scoped_group_name" - - def test_load_group_list_all_scoped_only(self, cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): - loader = GroupAllScopedLoader.create_loader(cdf_tool_config, None) - loaded = loader.load_resource( - LOAD_DATA / "auth" / "1.my_group_list_combined.yaml", cdf_tool_config, skip_validation=True - ) - - assert isinstance(loaded, GroupWrite) - assert loaded.name == "unscoped_group_name" - - def test_unchanged_new_group( - self, cdf_tool_config: CDFToolConfig, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch - ): - loader = GroupResourceScopedLoader.create_loader(cdf_tool_config, None) - loaded = loader.load_resource( - LOAD_DATA / "auth" / "1.my_group_scoped.yaml", cdf_tool_config, skip_validation=True - ) - - # Simulate that one group is is already in CDF - cognite_client_approval.append( - Group, - [ - Group( - id=123, - name=loaded.name, - source_id=loaded.source_id, - capabilities=loaded.capabilities, - metadata=loaded.metadata, - is_deleted=False, - ) - ], - ) - - new_group = GroupWrite(name="new_group", source_id="123", capabilities=[]) - cmd = DeployCommand(print_warning=False) - to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple( - resources=[loaded, new_group], loader=loader - ) - - assert len(to_create) == 1 - assert len(to_change) == 0 - assert len(unchanged) == 1 - - def test_upsert_group( - self, cdf_tool_config: CDFToolConfig, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch - ): - loader = GroupResourceScopedLoader.create_loader(cdf_tool_config, None) - loaded = loader.load_resource( - LOAD_DATA / "auth" / "1.my_group_scoped.yaml", cdf_tool_config, skip_validation=True - ) - cmd = DeployCommand(print_warning=False) - - # Simulate that the group is is already in CDF, but with fewer capabilities - # Simulate that one group is is already in CDF - cognite_client_approval.append( - Group, - [ - Group( - id=123, - name=loaded.name, - source_id=loaded.source_id, - capabilities=loaded.capabilities[0:1], - metadata=loaded.metadata, - is_deleted=False, - ) - ], - ) - - # group exists, no changes - to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple(resources=[loaded], loader=loader) - - assert len(to_create) == 0 - assert len(to_change) == 1 - assert len(unchanged) == 0 - - cmd._update_resources( - to_change, - loader, - ) - - assert cognite_client_approval.create_calls()["Group"] == 1 - assert cognite_client_approval.delete_calls()["Group"] == 1 - - @pytest.mark.parametrize( - "item, expected", - [ - pytest.param( - {"capabilities": [{"dataModelsAcl": {"scope": {"spaceIdScope": {"spaceIds": ["space1", "space2"]}}}}]}, - [(SpaceLoader, "space1"), (SpaceLoader, "space2")], - id="SpaceId scope", - ), - pytest.param( - {"capabilities": [{"timeSeriesAcl": {"scope": {"datasetScope": {"ids": ["ds_dataset1"]}}}}]}, - [ - (DataSetsLoader, "ds_dataset1"), - ], - id="Dataset scope", - ), - pytest.param( - { - "capabilities": [ - {"extractionRunsAcl": {"scope": {"extractionPipelineScope": {"ids": ["ex_my_extraction"]}}}} - ] - }, - [ - (ExtractionPipelineLoader, "ex_my_extraction"), - ], - id="Extraction pipeline scope", - ), - pytest.param( - {"capabilities": [{"rawAcl": {"scope": {"tableScope": {"dbsToTables": {"my_db": ["my_table"]}}}}}]}, - [ - (RawDatabaseLoader, RawDatabaseTable("my_db")), - (RawTableLoader, RawDatabaseTable("my_db", "my_table")), - ], - id="Table scope", - ), - pytest.param( - {"capabilities": [{"datasetsAcl": {"scope": {"idscope": {"ids": ["ds_my_dataset"]}}}}]}, - [ - (DataSetsLoader, "ds_my_dataset"), - ], - id="ID scope dataset", - ), - pytest.param( - {"capabilities": [{"extractionPipelinesAcl": {"scope": {"idscope": {"ids": ["ex_my_extraction"]}}}}]}, - [ - (ExtractionPipelineLoader, "ex_my_extraction"), - ], - id="ID scope extractionpipline ", - ), - ], - ) - def test_get_dependent_items(self, item: dict, expected: list[tuple[type[ResourceLoader], Hashable]]) -> None: - actual_dependent_items = GroupLoader.get_dependent_items(item) - - assert list(actual_dependent_items) == expected - - -class TestTimeSeriesLoader: - timeseries_yaml = """ -externalId: pi_160696 -name: VAL_23-PT-92504:X.Value -dataSetExternalId: ds_timeseries_oid -isString: false -metadata: - compdev: '0' - location5: '2' -isStep: false -description: PH 1stStgSuctCool Gas Out -""" - - def test_load_skip_validation_no_preexisting_dataset( - self, - cognite_client_approval: ApprovalCogniteClient, - cdf_tool_config_real: CDFToolConfig, - monkeypatch: MonkeyPatch, - ) -> None: - loader = TimeSeriesLoader(cognite_client_approval.mock_client, None) - mock_read_yaml_file({"timeseries.yaml": yaml.safe_load(self.timeseries_yaml)}, monkeypatch) - loaded = loader.load_resource(Path("timeseries.yaml"), cdf_tool_config_real, skip_validation=True) - - assert len(loaded) == 1 - assert loaded[0].data_set_id == -1 - - def test_load_skip_validation_with_preexisting_dataset( - self, - cognite_client_approval: ApprovalCogniteClient, - cdf_tool_config_real: CDFToolConfig, - monkeypatch: MonkeyPatch, - ) -> None: - cognite_client_approval.append(DataSet, DataSet(id=12345, external_id="ds_timeseries_oid")) - loader = TimeSeriesLoader(cognite_client_approval.mock_client, None) - - mock_read_yaml_file({"timeseries.yaml": yaml.safe_load(self.timeseries_yaml)}, monkeypatch) - - loaded = loader.load_resource(Path("timeseries.yaml"), cdf_tool_config_real, skip_validation=True) - - assert len(loaded) == 1 - assert loaded[0].data_set_id == 12345 - - -class TestTransformationLoader: - trafo_yaml = """ -externalId: tr_first_transformation -name: 'example:first:transformation' -interval: '{{scheduleHourly}}' -isPaused: true -query: "INLINE" -destination: - type: 'assets' -ignoreNullFields: true -isPublic: true -conflictMode: upsert -""" - - trafo_sql = "FILE" - - def test_no_auth_load( - self, - cognite_client_approval: ApprovalCogniteClient, - cdf_tool_config_real: CDFToolConfig, - monkeypatch: MonkeyPatch, - ) -> None: - loader = TransformationLoader(cognite_client_approval.mock_client, None) - mock_read_yaml_file({"transformation.yaml": yaml.CSafeLoader(self.trafo_yaml).get_data()}, monkeypatch) - loaded = loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) - assert loaded.destination_oidc_credentials is None - assert loaded.source_oidc_credentials is None - - def test_oidc_auth_load( - self, - cognite_client_approval: ApprovalCogniteClient, - cdf_tool_config_real: CDFToolConfig, - monkeypatch: MonkeyPatch, - ) -> None: - loader = TransformationLoader(cognite_client_approval.mock_client, None) - - resource = yaml.CSafeLoader(self.trafo_yaml).get_data() - - resource["authentication"] = { - "clientId": "{{cicd_clientId}}", - "clientSecret": "{{cicd_clientSecret}}", - "tokenUri": "{{cicd_tokenUri}}", - "cdfProjectName": "{{cdfProjectName}}", - "scopes": "{{cicd_scopes}}", - "audience": "{{cicd_audience}}", - } - - mock_read_yaml_file({"transformation.yaml": resource}, monkeypatch) - - loaded = loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) - assert loaded.destination_oidc_credentials.dump() == loaded.source_oidc_credentials.dump() - assert loaded.destination is not None - - def test_oidc_raise_if_invalid( - self, - cognite_client_approval: ApprovalCogniteClient, - cdf_tool_config_real: CDFToolConfig, - monkeypatch: MonkeyPatch, - ) -> None: - loader = TransformationLoader(cognite_client_approval.mock_client, None) - - resource = yaml.CSafeLoader(self.trafo_yaml).get_data() - - resource["authentication"] = { - "clientId": "{{cicd_clientId}}", - "clientSecret": "{{cicd_clientSecret}}", - } - - mock_read_yaml_file({"transformation.yaml": resource}, monkeypatch) - - with pytest.raises(ToolkitYAMLFormatError): - loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) - - def test_sql_file( - self, - cognite_client_approval: ApprovalCogniteClient, - cdf_tool_config_real: CDFToolConfig, - monkeypatch: MonkeyPatch, - ) -> None: - loader = TransformationLoader(cognite_client_approval.mock_client, None) - - resource = yaml.CSafeLoader(self.trafo_yaml).get_data() - resource.pop("query") - mock_read_yaml_file({"transformation.yaml": resource}, monkeypatch) - - with patch.object(TransformationLoader, "_get_query_file", return_value=Path("transformation.sql")): - with patch.object(pathlib.Path, "read_text", return_value=self.trafo_sql): - loaded = loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) - assert loaded.query == self.trafo_sql - - def test_sql_inline( - self, - cognite_client_approval: ApprovalCogniteClient, - cdf_tool_config_real: CDFToolConfig, - monkeypatch: MonkeyPatch, - ) -> None: - loader = TransformationLoader(cognite_client_approval.mock_client, None) - - resource = yaml.CSafeLoader(self.trafo_yaml).get_data() - - mock_read_yaml_file({"transformation.yaml": resource}, monkeypatch) - - with patch.object(TransformationLoader, "_get_query_file", return_value=None): - loaded = loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) - assert loaded.query == resource["query"] - - def test_if_ambiguous( - self, - cognite_client_approval: ApprovalCogniteClient, - cdf_tool_config_real: CDFToolConfig, - monkeypatch: MonkeyPatch, - ) -> None: - loader = TransformationLoader(cognite_client_approval.mock_client, None) - - mock_read_yaml_file({"transformation.yaml": yaml.CSafeLoader(self.trafo_yaml).get_data()}, monkeypatch) - - with pytest.raises(ToolkitYAMLFormatError): - with patch.object(TransformationLoader, "_get_query_file", return_value=Path("transformation.sql")): - with patch.object(pathlib.Path, "read_text", return_value=self.trafo_sql): - loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) - - @pytest.mark.parametrize( - "item, expected", - [ - pytest.param( - { - "dataSetExternalId": "ds_my_dataset", - "destination": { - "type": "instances", - "dataModel": { - "space": "sp_model_space", - "externalId": "my_model", - "version": "v1", - "destinationType": "assets", - }, - "instanceSpace": "sp_data_space", - }, - }, - [ - (DataSetsLoader, "ds_my_dataset"), - (SpaceLoader, "sp_data_space"), - (DataModelLoader, dm.DataModelId(space="sp_model_space", external_id="my_model", version="v1")), - ], - id="Transformation to data model", - ), - pytest.param( - { - "destination": { - "type": "nodes", - "view": {"space": "sp_space", "externalId": "my_view", "version": "v1"}, - "instanceSpace": "sp_data_space", - } - }, - [ - (SpaceLoader, "sp_data_space"), - (ViewLoader, dm.ViewId(space="sp_space", external_id="my_view", version="v1")), - ], - id="Transformation to nodes ", - ), - pytest.param( - {"destination": {"type": "raw", "database": "my_db", "table": "my_table"}}, - [ - (RawDatabaseLoader, RawDatabaseTable("my_db")), - (RawTableLoader, RawDatabaseTable("my_db", "my_table")), - ], - id="Transformation to RAW table", - ), - ], - ) - def test_get_dependent_items(self, item: dict, expected: list[tuple[type[ResourceLoader], Hashable]]) -> None: - actual = TransformationLoader.get_dependent_items(item) - - assert list(actual) == expected - - -class TestNodeLoader: - @pytest.mark.parametrize( - "yamL_raw, expected", - [ - pytest.param( - """space: my_space -externalId: my_external_id""", - NodeApplyListWithCall([NodeApply("my_space", "my_external_id")]), - id="Single node no API call", - ), - pytest.param( - """- space: my_space - externalId: my_first_node -- space: my_space - externalId: my_second_node -""", - NodeApplyListWithCall( - [ - NodeApply("my_space", "my_first_node"), - NodeApply("my_space", "my_second_node"), - ] - ), - id="Multiple nodes no API call", - ), - pytest.param( - """autoCreateDirectRelations: true -skipOnVersionConflict: false -replace: true -node: - space: my_space - externalId: my_external_id""", - NodeApplyListWithCall([NodeApply("my_space", "my_external_id")], NodeAPICall(True, False, True)), - id="Single node with API call", - ), - pytest.param( - """autoCreateDirectRelations: true -skipOnVersionConflict: false -replace: true -nodes: -- space: my_space - externalId: my_first_node -- space: my_space - externalId: my_second_node - """, - NodeApplyListWithCall( - [ - NodeApply("my_space", "my_first_node"), - NodeApply("my_space", "my_second_node"), - ], - NodeAPICall(True, False, True), - ), - id="Multiple nodes with API call", - ), - ], - ) - def test_load_nodes( - self, - yamL_raw: str, - expected: NodeApplyListWithCall, - cdf_tool_config: CDFToolConfig, - monkeypatch: MonkeyPatch, - ) -> None: - loader = NodeLoader.create_loader(cdf_tool_config, None) - mock_read_yaml_file({"my_node.yaml": yaml.safe_load(yamL_raw)}, monkeypatch) - loaded = loader.load_resource(Path("my_node.yaml"), cdf_tool_config, skip_validation=True) - - assert loaded.dump() == expected.dump() - - -class TestExtractionPipelineDependencies: - _yaml = """ - externalId: 'ep_src_asset_hamburg_sap' - name: 'Hamburg SAP' - dataSetId: 12345 - """ - - config_yaml = """ - externalId: 'ep_src_asset' - description: 'DB extractor config reading data from Springfield SAP' - """ - - def test_load_extraction_pipeline_upsert_create_one( - self, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch - ): - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - cdf_tool.client = cognite_client_approval.mock_client - - cognite_client_approval.append( - ExtractionPipelineConfig, - ExtractionPipelineConfig( - external_id="ep_src_asset", - description="DB extractor config reading data from Springfield SAP", - ), - ) - - def test_load_extraction_pipeline_upsert_update_one( - self, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch - ): - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - cdf_tool.client = cognite_client_approval.mock_client - - cognite_client_approval.append( - ExtractionPipelineConfig, - ExtractionPipelineConfig( - external_id="ep_src_asset", - description="DB extractor config reading data from Springfield SAP", - config="\n logger: \n {level: WARN}", - ), - ) - - mock_read_yaml_file( - {"extraction_pipeline.config.yaml": yaml.CSafeLoader(self.config_yaml).get_data()}, monkeypatch - ) - - cmd = DeployCommand(print_warning=False) - loader = ExtractionPipelineConfigLoader.create_loader(cdf_tool, None) - resources = loader.load_resource(Path("extraction_pipeline.config.yaml"), cdf_tool, skip_validation=False) - to_create, changed, unchanged = cmd.to_create_changed_unchanged_triple([resources], loader) - assert len(to_create) == 0 - assert len(changed) == 1 - assert len(unchanged) == 0 - - def test_load_extraction_pipeline_delete_one( - self, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch - ): - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - cdf_tool.client = cognite_client_approval.mock_client - - cognite_client_approval.append( - ExtractionPipelineConfig, - ExtractionPipelineConfig( - external_id="ep_src_asset", - description="DB extractor config reading data from Springfield SAP", - config="\n logger: \n {level: WARN}", - ), - ) - - mock_read_yaml_file( - {"extraction_pipeline.config.yaml": yaml.CSafeLoader(self.config_yaml).get_data()}, monkeypatch - ) - - cmd = CleanCommand(print_warning=False) - loader = ExtractionPipelineConfigLoader.create_loader(cdf_tool, None) - with patch.object( - ExtractionPipelineConfigLoader, "find_files", return_value=[Path("extraction_pipeline.config.yaml")] - ): - res = cmd.clean_resources(loader, cdf_tool, dry_run=True, drop=True) - assert res.deleted == 1 - - -class TestExtractionPipelineLoader: - @pytest.mark.parametrize( - "item, expected", - [ - pytest.param( - { - "dataSetExternalId": "ds_my_dataset", - "rawTables": [ - {"dbName": "my_db", "tableName": "my_table"}, - {"dbName": "my_db", "tableName": "my_table2"}, - ], - }, - [ - (DataSetsLoader, "ds_my_dataset"), - (RawDatabaseLoader, RawDatabaseTable("my_db")), - (RawTableLoader, RawDatabaseTable("my_db", "my_table")), - (RawTableLoader, RawDatabaseTable("my_db", "my_table2")), - ], - id="Extraction pipeline to Table", - ), - ], - ) - def test_get_dependent_items(self, item: dict, expected: list[tuple[type[ResourceLoader], Hashable]]) -> None: - actual = ExtractionPipelineLoader.get_dependent_items(item) - - assert list(actual) == expected - - -class TestDeployResources: - def test_deploy_resource_order(self, cognite_client_approval: ApprovalCogniteClient): - build_env_name = "dev" - system_config = SystemYAML.load_from_directory(PYTEST_PROJECT, build_env_name) - config = BuildConfigYAML.load_from_directory(PYTEST_PROJECT, build_env_name) - config.environment.selected = ["another_module"] - build_cmd = BuildCommand() - build_cmd.build_config( - BUILD_DIR, PYTEST_PROJECT, config=config, system_config=system_config, clean=True, verbose=False - ) - expected_order = ["MyView", "MyOtherView"] - cdf_tool = MagicMock(spec=CDFToolConfig) - cdf_tool.verify_client.return_value = cognite_client_approval.mock_client - cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client - cdf_tool.client = cognite_client_approval.mock_client - - cmd = DeployCommand(print_warning=False) - cmd.deploy_resources(ViewLoader.create_loader(cdf_tool, BUILD_DIR), cdf_tool, dry_run=False) - - views = cognite_client_approval.dump(sort=False)["View"] - - actual_order = [view["externalId"] for view in views] - - assert actual_order == expected_order - - -class TestFormatConsistency: - @pytest.mark.parametrize("Loader", RESOURCE_LOADER_LIST) - def test_fake_resource_generator( - self, Loader: type[ResourceLoader], cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch - ): - fakegenerator = FakeCogniteResourceGenerator(seed=1337) - - loader = Loader.create_loader(cdf_tool_config, None) - instance = fakegenerator.create_instance(loader.resource_write_cls) - - assert isinstance(instance, loader.resource_write_cls) - - @pytest.mark.parametrize("Loader", RESOURCE_LOADER_LIST) - def test_loader_takes_dict( - self, Loader: type[ResourceLoader], cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch - ): - loader = Loader.create_loader(cdf_tool_config, None) - - if loader.resource_cls in [Transformation, FileMetadata]: - pytest.skip("Skipped loaders that require secondary files") - elif loader.resource_cls in [Edge, Node]: - pytest.skip(f"Skipping {loader.resource_cls} because it has special properties") - elif Loader in [GroupResourceScopedLoader]: - pytest.skip(f"Skipping {loader.resource_cls} because it requires scoped capabilities") - - instance = FakeCogniteResourceGenerator(seed=1337).create_instance(loader.resource_write_cls) - - # special case - if isinstance(instance, TransformationSchedule): - del instance.id # Client validation does not allow id and externalid to be set simultaneously - - mock_read_yaml_file({"dict.yaml": instance.dump()}, monkeypatch) - - loaded = loader.load_resource( - filepath=Path(loader.folder_name) / "dict.yaml", ToolGlobals=cdf_tool_config, skip_validation=True - ) - assert isinstance( - loaded, (loader.resource_write_cls, loader.list_write_cls) - ), f"loaded must be an instance of {loader.list_write_cls} or {loader.resource_write_cls} but is {type(loaded)}" - - @pytest.mark.parametrize("Loader", RESOURCE_LOADER_LIST) - def test_loader_takes_list( - self, Loader: type[ResourceLoader], cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch - ): - loader = Loader.create_loader(cdf_tool_config, None) - - if loader.resource_cls in [Transformation, FileMetadata]: - pytest.skip("Skipped loaders that require secondary files") - elif loader.resource_cls in [Edge, Node]: - pytest.skip(f"Skipping {loader.resource_cls} because it has special properties") - elif Loader in [GroupResourceScopedLoader]: - pytest.skip(f"Skipping {loader.resource_cls} because it requires scoped capabilities") - - instances = FakeCogniteResourceGenerator(seed=1337).create_instances(loader.list_write_cls) - - # special case - if isinstance(loader.resource_cls, TransformationSchedule): - for instance in instances: - del instance.id # Client validation does not allow id and externalid to be set simultaneously - - mock_read_yaml_file({"dict.yaml": instances.dump()}, monkeypatch) - - loaded = loader.load_resource( - filepath=Path(loader.folder_name) / "dict.yaml", ToolGlobals=cdf_tool_config, skip_validation=True - ) - assert isinstance( - loaded, (loader.resource_write_cls, loader.list_write_cls) - ), f"loaded must be an instance of {loader.list_write_cls} or {loader.resource_write_cls} but is {type(loaded)}" - - @staticmethod - def check_url(url) -> bool: - try: - response = requests.get(url, allow_redirects=True) - return response.status_code >= 200 and response.status_code <= 300 - except requests.exceptions.RequestException: - return False - - @pytest.mark.parametrize("Loader", LOADER_LIST) - def test_loader_has_doc_url(self, Loader: type[Loader], cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): - loader = Loader.create_loader(cdf_tool_config, None) - assert loader.doc_url() != loader._doc_base_url, f"{Loader.folder_name} is missing doc_url deep link" - assert self.check_url(loader.doc_url()), f"{Loader.folder_name} doc_url is not accessible" - - -def test_resource_types_is_up_to_date() -> None: - expected = set(LOADER_BY_FOLDER_NAME.keys()) - actual = set(ResourceTypes.__args__) - - missing = expected - actual - extra = actual - expected - assert not missing, f"Missing {missing=}" - assert not extra, f"Extra {extra=}" - - -def cognite_module_files_with_loader() -> Iterable[ParameterSet]: - source_path = REPO_ROOT / "cognite_toolkit" - env = "dev" - with tmp_build_directory() as build_dir: - system_config = SystemYAML.load_from_directory(source_path, env) - config_init = InitConfigYAML( - Environment( - name="not used", - project=os.environ.get("CDF_PROJECT", ""), - build_type="dev", - selected=[], - ) - ).load_defaults(source_path) - config = config_init.as_build_config() - config.set_environment_variables() - config.environment.selected = config.available_modules - - source_by_build_path = BuildCommand().build_config( - build_dir=build_dir, - source_dir=source_path, - config=config, - system_config=system_config, - clean=True, - verbose=False, - ) - for filepath in build_dir.rglob("*.yaml"): - try: - resource_folder = resource_folder_from_path(filepath) - except ValueError: - # Not a resource file - continue - loaders = LOADER_BY_FOLDER_NAME.get(resource_folder, []) - if not loaders: - continue - loader = next((loader for loader in loaders if loader.is_supported_file(filepath)), None) - if loader is None: - raise ValueError(f"Could not find loader for {filepath}") - if loader is FunctionLoader and filepath.parent.name != loader.folder_name: - # Functions will only accept YAML in root function folder. - continue - if issubclass(loader, ResourceLoader): - raw = yaml.CSafeLoader(filepath.read_text()).get_data() - source_path = source_by_build_path[filepath] - module_name = module_from_path(source_path) - if isinstance(raw, dict): - yield pytest.param(loader, raw, id=f"{module_name} - {filepath.stem} - dict") - elif isinstance(raw, list): - for no, item in enumerate(raw): - yield pytest.param(loader, item, id=f"{module_name} - {filepath.stem} - list {no}") - - -class TestResourceLoaders: - @pytest.mark.parametrize("loader_cls", RESOURCE_LOADER_LIST) - def test_get_write_cls_spec(self, loader_cls: type[ResourceLoader]): - resource = FakeCogniteResourceGenerator(seed=1337, max_list_dict_items=1).create_instance( - loader_cls.resource_write_cls - ) - resource_dump = resource.dump(camel_case=True) - # These two are handled by the toolkit - resource_dump.pop("dataSetId", None) - resource_dump.pop("fileId", None) - dumped = read_parameters_from_dict(resource_dump) - spec = loader_cls.get_write_cls_parameter_spec() - - extra = dumped - spec - - # The spec is calculated based on the resource class __init__ method. - # There can be deviations in the output from the dump. If that is the case, - # the 'get_write_cls_parameter_spec' must be updated in the loader. See, for example, the DataModelLoader. - assert sorted(extra) == [] - - @pytest.mark.parametrize("loader_cls, content", list(cognite_module_files_with_loader())) - def test_write_cls_spec_against_cognite_modules(self, loader_cls: type[ResourceLoader], content: dict) -> None: - spec = loader_cls.get_write_cls_parameter_spec() - - warnings = validate_resource_yaml(content, spec, Path("test.yaml")) - - assert sorted(warnings) == [] - - @pytest.mark.parametrize("loader_cls", RESOURCE_LOADER_LIST) - def test_empty_required_capabilities_when_no_items( - self, loader_cls: type[ResourceLoader], cdf_tool_config: CDFToolConfig - ): - actual = loader_cls.get_required_capability(loader_cls.list_write_cls([])) - - assert actual == [] - - def test_unique_kind_by_folder(self): - kind = defaultdict(list) - for loader_cls in RESOURCE_LOADER_LIST: - kind[loader_cls.folder_name].append(loader_cls.kind) - - duplicated = {folder: Counter(kinds) for folder, kinds in kind.items() if len(set(kinds)) != len(kinds)} - # we have two types Group loaders, one for scoped and one for all - # this is intended and thus not an issue. - duplicated.pop("auth") - - assert not duplicated, f"Duplicated kind by folder: {duplicated!s}" - - -class TestLoaders: - def test_unique_display_names(self, cdf_tool_config: CDFToolConfig): - name_by_count = Counter( - [loader_cls.create_loader(cdf_tool_config, None).display_name for loader_cls in LOADER_LIST] - ) - - duplicates = {name: count for name, count in name_by_count.items() if count > 1} - - assert not duplicates, f"Duplicate display names: {duplicates}" diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/__init__.py b/tests/tests_unit/test_cdf_tk/test_loaders/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_base_loaders.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_base_loaders.py new file mode 100644 index 000000000..f72078dbd --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_base_loaders.py @@ -0,0 +1,312 @@ +import os +from collections import Counter, defaultdict +from collections.abc import Iterable +from pathlib import Path +from unittest.mock import MagicMock + +import pytest +import requests +import yaml +from cognite.client.data_classes import ( + FileMetadata, + Transformation, + TransformationSchedule, +) +from cognite.client.data_classes.data_modeling import Edge, Node +from pytest import MonkeyPatch +from pytest_regressions.data_regression import DataRegressionFixture + +from cognite_toolkit._cdf_tk._parameters import ParameterSet, read_parameters_from_dict +from cognite_toolkit._cdf_tk.commands import BuildCommand, DeployCommand +from cognite_toolkit._cdf_tk.data_classes import ( + BuildConfigYAML, + Environment, + InitConfigYAML, + SystemYAML, +) +from cognite_toolkit._cdf_tk.loaders import ( + LOADER_BY_FOLDER_NAME, + LOADER_LIST, + RESOURCE_LOADER_LIST, + DatapointsLoader, + FileMetadataLoader, + FunctionLoader, + GroupResourceScopedLoader, + Loader, + ResourceLoader, + ResourceTypes, + ViewLoader, +) +from cognite_toolkit._cdf_tk.utils import ( + CDFToolConfig, + module_from_path, + resource_folder_from_path, + tmp_build_directory, +) +from cognite_toolkit._cdf_tk.validation import validate_resource_yaml +from tests.constants import REPO_ROOT +from tests.tests_unit.approval_client import ApprovalCogniteClient +from tests.tests_unit.data import LOAD_DATA, PYTEST_PROJECT +from tests.tests_unit.test_cdf_tk.constants import BUILD_DIR, SNAPSHOTS_DIR_ALL +from tests.tests_unit.utils import FakeCogniteResourceGenerator, mock_read_yaml_file + +SNAPSHOTS_DIR = SNAPSHOTS_DIR_ALL / "load_data_snapshots" + + +@pytest.mark.parametrize( + "loader_cls", + [ + FileMetadataLoader, + DatapointsLoader, + ], +) +def test_loader_class( + loader_cls: type[ResourceLoader], + cognite_client_approval: ApprovalCogniteClient, + data_regression: DataRegressionFixture, +): + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.client = cognite_client_approval.mock_client + cdf_tool.data_set_id = 999 + + cmd = DeployCommand(print_warning=False) + loader = loader_cls.create_loader(cdf_tool, LOAD_DATA) + cmd.deploy_resources(loader, cdf_tool, dry_run=False) + + dump = cognite_client_approval.dump() + data_regression.check(dump, fullpath=SNAPSHOTS_DIR / f"{loader.folder_name}.yaml") + + +class TestDeployResources: + def test_deploy_resource_order(self, cognite_client_approval: ApprovalCogniteClient): + build_env_name = "dev" + system_config = SystemYAML.load_from_directory(PYTEST_PROJECT, build_env_name) + config = BuildConfigYAML.load_from_directory(PYTEST_PROJECT, build_env_name) + config.environment.selected = ["another_module"] + build_cmd = BuildCommand() + build_cmd.build_config( + BUILD_DIR, PYTEST_PROJECT, config=config, system_config=system_config, clean=True, verbose=False + ) + expected_order = ["MyView", "MyOtherView"] + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.client = cognite_client_approval.mock_client + + cmd = DeployCommand(print_warning=False) + cmd.deploy_resources(ViewLoader.create_loader(cdf_tool, BUILD_DIR), cdf_tool, dry_run=False) + + views = cognite_client_approval.dump(sort=False)["View"] + + actual_order = [view["externalId"] for view in views] + + assert actual_order == expected_order + + +class TestFormatConsistency: + @pytest.mark.parametrize("Loader", RESOURCE_LOADER_LIST) + def test_fake_resource_generator( + self, Loader: type[ResourceLoader], cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch + ): + fakegenerator = FakeCogniteResourceGenerator(seed=1337) + + loader = Loader.create_loader(cdf_tool_config, None) + instance = fakegenerator.create_instance(loader.resource_write_cls) + + assert isinstance(instance, loader.resource_write_cls) + + @pytest.mark.parametrize("Loader", RESOURCE_LOADER_LIST) + def test_loader_takes_dict( + self, Loader: type[ResourceLoader], cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch + ): + loader = Loader.create_loader(cdf_tool_config, None) + + if loader.resource_cls in [Transformation, FileMetadata]: + pytest.skip("Skipped loaders that require secondary files") + elif loader.resource_cls in [Edge, Node]: + pytest.skip(f"Skipping {loader.resource_cls} because it has special properties") + elif Loader in [GroupResourceScopedLoader]: + pytest.skip(f"Skipping {loader.resource_cls} because it requires scoped capabilities") + + instance = FakeCogniteResourceGenerator(seed=1337).create_instance(loader.resource_write_cls) + + # special case + if isinstance(instance, TransformationSchedule): + del instance.id # Client validation does not allow id and externalid to be set simultaneously + + mock_read_yaml_file({"dict.yaml": instance.dump()}, monkeypatch) + + loaded = loader.load_resource( + filepath=Path(loader.folder_name) / "dict.yaml", ToolGlobals=cdf_tool_config, skip_validation=True + ) + assert isinstance( + loaded, (loader.resource_write_cls, loader.list_write_cls) + ), f"loaded must be an instance of {loader.list_write_cls} or {loader.resource_write_cls} but is {type(loaded)}" + + @pytest.mark.parametrize("Loader", RESOURCE_LOADER_LIST) + def test_loader_takes_list( + self, Loader: type[ResourceLoader], cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch + ): + loader = Loader.create_loader(cdf_tool_config, None) + + if loader.resource_cls in [Transformation, FileMetadata]: + pytest.skip("Skipped loaders that require secondary files") + elif loader.resource_cls in [Edge, Node]: + pytest.skip(f"Skipping {loader.resource_cls} because it has special properties") + elif Loader in [GroupResourceScopedLoader]: + pytest.skip(f"Skipping {loader.resource_cls} because it requires scoped capabilities") + + instances = FakeCogniteResourceGenerator(seed=1337).create_instances(loader.list_write_cls) + + # special case + if isinstance(loader.resource_cls, TransformationSchedule): + for instance in instances: + del instance.id # Client validation does not allow id and externalid to be set simultaneously + + mock_read_yaml_file({"dict.yaml": instances.dump()}, monkeypatch) + + loaded = loader.load_resource( + filepath=Path(loader.folder_name) / "dict.yaml", ToolGlobals=cdf_tool_config, skip_validation=True + ) + assert isinstance( + loaded, (loader.resource_write_cls, loader.list_write_cls) + ), f"loaded must be an instance of {loader.list_write_cls} or {loader.resource_write_cls} but is {type(loaded)}" + + @staticmethod + def check_url(url) -> bool: + try: + response = requests.get(url, allow_redirects=True) + return response.status_code >= 200 and response.status_code <= 300 + except requests.exceptions.RequestException: + return False + + @pytest.mark.parametrize("Loader", LOADER_LIST) + def test_loader_has_doc_url(self, Loader: type[Loader], cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): + loader = Loader.create_loader(cdf_tool_config, None) + assert loader.doc_url() != loader._doc_base_url, f"{Loader.folder_name} is missing doc_url deep link" + assert self.check_url(loader.doc_url()), f"{Loader.folder_name} doc_url is not accessible" + + +def test_resource_types_is_up_to_date() -> None: + expected = set(LOADER_BY_FOLDER_NAME.keys()) + actual = set(ResourceTypes.__args__) + + missing = expected - actual + extra = actual - expected + assert not missing, f"Missing {missing=}" + assert not extra, f"Extra {extra=}" + + +def cognite_module_files_with_loader() -> Iterable[ParameterSet]: + source_path = REPO_ROOT / "cognite_toolkit" + env = "dev" + with tmp_build_directory() as build_dir: + system_config = SystemYAML.load_from_directory(source_path, env) + config_init = InitConfigYAML( + Environment( + name="not used", + project=os.environ.get("CDF_PROJECT", ""), + build_type="dev", + selected=[], + ) + ).load_defaults(source_path) + config = config_init.as_build_config() + config.set_environment_variables() + # Todo Remove once the new modules in `_cdf_tk/prototypes/_packages` are finished. + config.variables.pop("_cdf_tk", None) + config.environment.selected = config.available_modules + + source_by_build_path = BuildCommand().build_config( + build_dir=build_dir, + source_dir=source_path, + config=config, + system_config=system_config, + clean=True, + verbose=False, + ) + for filepath in build_dir.rglob("*.yaml"): + try: + resource_folder = resource_folder_from_path(filepath) + except ValueError: + # Not a resource file + continue + loaders = LOADER_BY_FOLDER_NAME.get(resource_folder, []) + if not loaders: + continue + loader = next((loader for loader in loaders if loader.is_supported_file(filepath)), None) + if loader is None: + raise ValueError(f"Could not find loader for {filepath}") + if loader is FunctionLoader and filepath.parent.name != loader.folder_name: + # Functions will only accept YAML in root function folder. + continue + if issubclass(loader, ResourceLoader): + raw = yaml.CSafeLoader(filepath.read_text()).get_data() + source_path = source_by_build_path[filepath] + module_name = module_from_path(source_path) + if isinstance(raw, dict): + yield pytest.param(loader, raw, id=f"{module_name} - {filepath.stem} - dict") + elif isinstance(raw, list): + for no, item in enumerate(raw): + yield pytest.param(loader, item, id=f"{module_name} - {filepath.stem} - list {no}") + + +class TestResourceLoaders: + @pytest.mark.parametrize("loader_cls", RESOURCE_LOADER_LIST) + def test_get_write_cls_spec(self, loader_cls: type[ResourceLoader]): + resource = FakeCogniteResourceGenerator(seed=1337, max_list_dict_items=1).create_instance( + loader_cls.resource_write_cls + ) + resource_dump = resource.dump(camel_case=True) + # These two are handled by the toolkit + resource_dump.pop("dataSetId", None) + resource_dump.pop("fileId", None) + dumped = read_parameters_from_dict(resource_dump) + spec = loader_cls.get_write_cls_parameter_spec() + + extra = dumped - spec + + # The spec is calculated based on the resource class __init__ method. + # There can be deviations in the output from the dump. If that is the case, + # the 'get_write_cls_parameter_spec' must be updated in the loader. See, for example, the DataModelLoader. + assert sorted(extra) == [] + + @pytest.mark.parametrize("loader_cls, content", list(cognite_module_files_with_loader())) + def test_write_cls_spec_against_cognite_modules(self, loader_cls: type[ResourceLoader], content: dict) -> None: + spec = loader_cls.get_write_cls_parameter_spec() + + warnings = validate_resource_yaml(content, spec, Path("test.yaml")) + + assert sorted(warnings) == [] + + @pytest.mark.parametrize("loader_cls", RESOURCE_LOADER_LIST) + def test_empty_required_capabilities_when_no_items( + self, loader_cls: type[ResourceLoader], cdf_tool_config: CDFToolConfig + ): + actual = loader_cls.get_required_capability(loader_cls.list_write_cls([])) + + assert actual == [] + + def test_unique_kind_by_folder(self): + kind = defaultdict(list) + for loader_cls in RESOURCE_LOADER_LIST: + kind[loader_cls.folder_name].append(loader_cls.kind) + + duplicated = {folder: Counter(kinds) for folder, kinds in kind.items() if len(set(kinds)) != len(kinds)} + # we have two types Group loaders, one for scoped and one for all + # this is intended and thus not an issue. + duplicated.pop("auth") + + assert not duplicated, f"Duplicated kind by folder: {duplicated!s}" + + +class TestLoaders: + def test_unique_display_names(self, cdf_tool_config: CDFToolConfig): + name_by_count = Counter( + [loader_cls.create_loader(cdf_tool_config, None).display_name for loader_cls in LOADER_LIST] + ) + + duplicates = {name: count for name, count in name_by_count.items() if count > 1} + + assert not duplicates, f"Duplicate display names: {duplicates}" diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_container_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_container_loader.py new file mode 100644 index 000000000..010fcf14b --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_container_loader.py @@ -0,0 +1,37 @@ +import pytest + +from cognite_toolkit._cdf_tk._parameters import read_parameters_from_dict +from cognite_toolkit._cdf_tk.loaders import ContainerLoader + + +class TestContainerLoader: + @pytest.mark.parametrize( + "item", + [ + pytest.param( + { + "properties": { + "myDirectRelation": { + "name": "my direct relation", + "type": { + "type": "direct", + "container": { + "type": "container", + "space": "sp_my_space", + "externalId": "my_container", + }, + }, + } + } + }, + id="Direct relation property with require constraint.", + ), + ], + ) + def test_valid_spec(self, item: dict): + spec = ContainerLoader.get_write_cls_parameter_spec() + dumped = read_parameters_from_dict(item) + + extra = dumped - spec + + assert not extra, f"Extra keys: {extra}" diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_data_model_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_data_model_loader.py new file mode 100644 index 000000000..1e1da433e --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_data_model_loader.py @@ -0,0 +1,54 @@ +from unittest.mock import MagicMock + +from cognite.client.data_classes import data_modeling as dm + +from cognite_toolkit._cdf_tk.commands import DeployCommand +from cognite_toolkit._cdf_tk.loaders import DataModelLoader +from cognite_toolkit._cdf_tk.utils import CDFToolConfig +from tests.tests_unit.approval_client import ApprovalCogniteClient + + +class TestDataModelLoader: + def test_update_data_model_random_view_order(self, cognite_client_approval: ApprovalCogniteClient): + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.client = cognite_client_approval.mock_client + cdf_data_model = dm.DataModel( + space="sp_space", + external_id="my_model", + version="1", + views=[ + dm.ViewId(space="sp_space", external_id="first", version="1"), + dm.ViewId(space="sp_space", external_id="second", version="1"), + ], + last_updated_time=1, + created_time=1, + description=None, + name=None, + is_global=False, + ) + # Simulating that the data model is available in CDF + cognite_client_approval.append(dm.DataModel, cdf_data_model) + + local_data_model = dm.DataModelApply( + space="sp_space", + external_id="my_model", + version="1", + views=[ + dm.ViewId(space="sp_space", external_id="second", version="1"), + dm.ViewId(space="sp_space", external_id="first", version="1"), + ], + description=None, + name=None, + ) + + loader = DataModelLoader.create_loader(cdf_tool, None) + cmd = DeployCommand(print_warning=False) + to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple( + dm.DataModelApplyList([local_data_model]), loader + ) + + assert len(to_create) == 0 + assert len(to_change) == 0 + assert len(unchanged) == 1 diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_data_set_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_data_set_loader.py new file mode 100644 index 000000000..4fd7a0967 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_data_set_loader.py @@ -0,0 +1,35 @@ +from unittest.mock import MagicMock + +from cognite.client.data_classes import DataSet + +from cognite_toolkit._cdf_tk.commands import DeployCommand +from cognite_toolkit._cdf_tk.loaders import DataSetsLoader +from cognite_toolkit._cdf_tk.utils import CDFToolConfig +from tests.tests_unit.approval_client import ApprovalCogniteClient +from tests.tests_unit.data import LOAD_DATA + + +class TestDataSetsLoader: + def test_upsert_data_set(self, cognite_client_approval: ApprovalCogniteClient): + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.client = cognite_client_approval.mock_client + + loader = DataSetsLoader.create_loader(cdf_tool, None) + loaded = loader.load_resource(LOAD_DATA / "data_sets" / "1.my_datasets.yaml", cdf_tool, skip_validation=False) + assert len(loaded) == 2 + + first = DataSet.load(loaded[0].dump()) + # Set the properties that are set on the server side + first.id = 42 + first.created_time = 42 + first.last_updated_time = 42 + # Simulate that the data set is already in CDF + cognite_client_approval.append(DataSet, first) + cmd = DeployCommand(print_warning=False) + to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple(loaded, loader) + + assert len(to_create) == 1 + assert len(to_change) == 0 + assert len(unchanged) == 1 diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_extraction_pipeline_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_extraction_pipeline_loader.py new file mode 100644 index 000000000..ee7b3a194 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_extraction_pipeline_loader.py @@ -0,0 +1,137 @@ +from collections.abc import Hashable +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest +import yaml +from _pytest.monkeypatch import MonkeyPatch +from cognite.client.data_classes import ExtractionPipelineConfig + +from cognite_toolkit._cdf_tk.commands import CleanCommand, DeployCommand +from cognite_toolkit._cdf_tk.loaders import ( + DataSetsLoader, + ExtractionPipelineConfigLoader, + ExtractionPipelineLoader, + RawDatabaseLoader, + RawTableLoader, + ResourceLoader, +) +from cognite_toolkit._cdf_tk.loaders.data_classes import RawDatabaseTable +from cognite_toolkit._cdf_tk.utils import CDFToolConfig +from tests.tests_unit.approval_client import ApprovalCogniteClient +from tests.tests_unit.utils import mock_read_yaml_file + + +class TestExtractionPipelineDependencies: + _yaml = """ + externalId: 'ep_src_asset_hamburg_sap' + name: 'Hamburg SAP' + dataSetId: 12345 + """ + + config_yaml = """ + externalId: 'ep_src_asset' + description: 'DB extractor config reading data from Springfield SAP' + """ + + def test_load_extraction_pipeline_upsert_create_one( + self, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch + ): + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.client = cognite_client_approval.mock_client + + cognite_client_approval.append( + ExtractionPipelineConfig, + ExtractionPipelineConfig( + external_id="ep_src_asset", + description="DB extractor config reading data from Springfield SAP", + ), + ) + + def test_load_extraction_pipeline_upsert_update_one( + self, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch + ): + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.client = cognite_client_approval.mock_client + + cognite_client_approval.append( + ExtractionPipelineConfig, + ExtractionPipelineConfig( + external_id="ep_src_asset", + description="DB extractor config reading data from Springfield SAP", + config="\n logger: \n {level: WARN}", + ), + ) + + mock_read_yaml_file( + {"extraction_pipeline.config.yaml": yaml.CSafeLoader(self.config_yaml).get_data()}, monkeypatch + ) + + cmd = DeployCommand(print_warning=False) + loader = ExtractionPipelineConfigLoader.create_loader(cdf_tool, None) + resources = loader.load_resource(Path("extraction_pipeline.config.yaml"), cdf_tool, skip_validation=False) + to_create, changed, unchanged = cmd.to_create_changed_unchanged_triple([resources], loader) + assert len(to_create) == 0 + assert len(changed) == 1 + assert len(unchanged) == 0 + + def test_load_extraction_pipeline_delete_one( + self, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch + ): + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.client = cognite_client_approval.mock_client + + cognite_client_approval.append( + ExtractionPipelineConfig, + ExtractionPipelineConfig( + external_id="ep_src_asset", + description="DB extractor config reading data from Springfield SAP", + config="\n logger: \n {level: WARN}", + ), + ) + + mock_read_yaml_file( + {"extraction_pipeline.config.yaml": yaml.CSafeLoader(self.config_yaml).get_data()}, monkeypatch + ) + + cmd = CleanCommand(print_warning=False) + loader = ExtractionPipelineConfigLoader.create_loader(cdf_tool, None) + with patch.object( + ExtractionPipelineConfigLoader, "find_files", return_value=[Path("extraction_pipeline.config.yaml")] + ): + res = cmd.clean_resources(loader, cdf_tool, dry_run=True, drop=True) + assert res.deleted == 1 + + +class TestExtractionPipelineLoader: + @pytest.mark.parametrize( + "item, expected", + [ + pytest.param( + { + "dataSetExternalId": "ds_my_dataset", + "rawTables": [ + {"dbName": "my_db", "tableName": "my_table"}, + {"dbName": "my_db", "tableName": "my_table2"}, + ], + }, + [ + (DataSetsLoader, "ds_my_dataset"), + (RawDatabaseLoader, RawDatabaseTable("my_db")), + (RawTableLoader, RawDatabaseTable("my_db", "my_table")), + (RawTableLoader, RawDatabaseTable("my_db", "my_table2")), + ], + id="Extraction pipeline to Table", + ), + ], + ) + def test_get_dependent_items(self, item: dict, expected: list[tuple[type[ResourceLoader], Hashable]]) -> None: + actual = ExtractionPipelineLoader.get_dependent_items(item) + + assert list(actual) == expected diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_function_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_function_loader.py new file mode 100644 index 000000000..12775d32a --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_function_loader.py @@ -0,0 +1,28 @@ +from unittest.mock import MagicMock + +from cognite.client.data_classes import FunctionWrite + +from cognite_toolkit._cdf_tk.loaders import FunctionLoader +from cognite_toolkit._cdf_tk.utils import CDFToolConfig +from tests.tests_unit.approval_client import ApprovalCogniteClient +from tests.tests_unit.data import LOAD_DATA + + +class TestFunctionLoader: + def test_load_functions(self, cognite_client_approval: ApprovalCogniteClient): + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + + loader = FunctionLoader.create_loader(cdf_tool, None) + loaded = loader.load_resource(LOAD_DATA / "functions" / "1.my_functions.yaml", cdf_tool, skip_validation=False) + assert len(loaded) == 2 + + def test_load_function(self, cognite_client_approval: ApprovalCogniteClient): + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + + loader = FunctionLoader.create_loader(cdf_tool, None) + loaded = loader.load_resource(LOAD_DATA / "functions" / "1.my_function.yaml", cdf_tool, skip_validation=False) + assert isinstance(loaded, FunctionWrite) diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_group_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_group_loader.py new file mode 100644 index 000000000..f12b08fe3 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_group_loader.py @@ -0,0 +1,203 @@ +from collections.abc import Hashable + +import pytest +from _pytest.monkeypatch import MonkeyPatch +from cognite.client.data_classes import Group, GroupWrite + +from cognite_toolkit._cdf_tk.commands import DeployCommand +from cognite_toolkit._cdf_tk.loaders import ( + DataSetsLoader, + ExtractionPipelineLoader, + GroupAllScopedLoader, + GroupLoader, + GroupResourceScopedLoader, + RawDatabaseLoader, + RawTableLoader, + ResourceLoader, + SpaceLoader, +) +from cognite_toolkit._cdf_tk.loaders.data_classes import RawDatabaseTable +from cognite_toolkit._cdf_tk.utils import CDFToolConfig +from tests.tests_unit.approval_client import ApprovalCogniteClient +from tests.tests_unit.data import LOAD_DATA + + +class TestGroupLoader: + def test_load_all_scoped_only(self, cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): + loader = GroupAllScopedLoader.create_loader(cdf_tool_config, None) + loaded = loader.load_resource( + LOAD_DATA / "auth" / "1.my_group_unscoped.yaml", cdf_tool_config, skip_validation=False + ) + assert loaded.name == "unscoped_group_name" + + loaded = loader.load_resource( + LOAD_DATA / "auth" / "1.my_group_scoped.yaml", cdf_tool_config, skip_validation=False + ) + assert loaded is None + + def test_load_resource_scoped_only(self, cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): + loader = GroupResourceScopedLoader.create_loader(cdf_tool_config, None) + loaded = loader.load_resource( + LOAD_DATA / "auth" / "1.my_group_unscoped.yaml", cdf_tool_config, skip_validation=False + ) + + assert loaded is None + + loaded = loader.load_resource( + LOAD_DATA / "auth" / "1.my_group_scoped.yaml", cdf_tool_config, skip_validation=False + ) + assert loaded.name == "scoped_group_name" + assert len(loaded.capabilities) == 4 + + caps = {str(type(element).__name__): element for element in loaded.capabilities} + + assert all(isinstance(item, int) for item in caps["DataSetsAcl"].scope.ids) + assert all(isinstance(item, int) for item in caps["AssetsAcl"].scope.ids) + assert all(isinstance(item, int) for item in caps["ExtractionConfigsAcl"].scope.ids) + assert caps["SessionsAcl"].scope._scope_name == "all" + + def test_load_group_list_resource_scoped_only(self, cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): + loader = GroupResourceScopedLoader.create_loader(cdf_tool_config, None) + loaded = loader.load_resource( + LOAD_DATA / "auth" / "1.my_group_list_combined.yaml", cdf_tool_config, skip_validation=True + ) + + assert isinstance(loaded, GroupWrite) + assert loaded.name == "scoped_group_name" + + def test_load_group_list_all_scoped_only(self, cdf_tool_config: CDFToolConfig, monkeypatch: MonkeyPatch): + loader = GroupAllScopedLoader.create_loader(cdf_tool_config, None) + loaded = loader.load_resource( + LOAD_DATA / "auth" / "1.my_group_list_combined.yaml", cdf_tool_config, skip_validation=True + ) + + assert isinstance(loaded, GroupWrite) + assert loaded.name == "unscoped_group_name" + + def test_unchanged_new_group( + self, cdf_tool_config: CDFToolConfig, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch + ): + loader = GroupResourceScopedLoader.create_loader(cdf_tool_config, None) + loaded = loader.load_resource( + LOAD_DATA / "auth" / "1.my_group_scoped.yaml", cdf_tool_config, skip_validation=True + ) + + # Simulate that one group is is already in CDF + cognite_client_approval.append( + Group, + [ + Group( + id=123, + name=loaded.name, + source_id=loaded.source_id, + capabilities=loaded.capabilities, + metadata=loaded.metadata, + is_deleted=False, + ) + ], + ) + + new_group = GroupWrite(name="new_group", source_id="123", capabilities=[]) + cmd = DeployCommand(print_warning=False) + to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple( + resources=[loaded, new_group], loader=loader + ) + + assert len(to_create) == 1 + assert len(to_change) == 0 + assert len(unchanged) == 1 + + def test_upsert_group( + self, cdf_tool_config: CDFToolConfig, cognite_client_approval: ApprovalCogniteClient, monkeypatch: MonkeyPatch + ): + loader = GroupResourceScopedLoader.create_loader(cdf_tool_config, None) + loaded = loader.load_resource( + LOAD_DATA / "auth" / "1.my_group_scoped.yaml", cdf_tool_config, skip_validation=True + ) + cmd = DeployCommand(print_warning=False) + + # Simulate that the group is is already in CDF, but with fewer capabilities + # Simulate that one group is is already in CDF + cognite_client_approval.append( + Group, + [ + Group( + id=123, + name=loaded.name, + source_id=loaded.source_id, + capabilities=loaded.capabilities[0:1], + metadata=loaded.metadata, + is_deleted=False, + ) + ], + ) + + # group exists, no changes + to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple(resources=[loaded], loader=loader) + + assert len(to_create) == 0 + assert len(to_change) == 1 + assert len(unchanged) == 0 + + cmd._update_resources( + to_change, + loader, + ) + + assert cognite_client_approval.create_calls()["Group"] == 1 + assert cognite_client_approval.delete_calls()["Group"] == 1 + + @pytest.mark.parametrize( + "item, expected", + [ + pytest.param( + {"capabilities": [{"dataModelsAcl": {"scope": {"spaceIdScope": {"spaceIds": ["space1", "space2"]}}}}]}, + [(SpaceLoader, "space1"), (SpaceLoader, "space2")], + id="SpaceId scope", + ), + pytest.param( + {"capabilities": [{"timeSeriesAcl": {"scope": {"datasetScope": {"ids": ["ds_dataset1"]}}}}]}, + [ + (DataSetsLoader, "ds_dataset1"), + ], + id="Dataset scope", + ), + pytest.param( + { + "capabilities": [ + {"extractionRunsAcl": {"scope": {"extractionPipelineScope": {"ids": ["ex_my_extraction"]}}}} + ] + }, + [ + (ExtractionPipelineLoader, "ex_my_extraction"), + ], + id="Extraction pipeline scope", + ), + pytest.param( + {"capabilities": [{"rawAcl": {"scope": {"tableScope": {"dbsToTables": {"my_db": ["my_table"]}}}}}]}, + [ + (RawDatabaseLoader, RawDatabaseTable("my_db")), + (RawTableLoader, RawDatabaseTable("my_db", "my_table")), + ], + id="Table scope", + ), + pytest.param( + {"capabilities": [{"datasetsAcl": {"scope": {"idscope": {"ids": ["ds_my_dataset"]}}}}]}, + [ + (DataSetsLoader, "ds_my_dataset"), + ], + id="ID scope dataset", + ), + pytest.param( + {"capabilities": [{"extractionPipelinesAcl": {"scope": {"idscope": {"ids": ["ex_my_extraction"]}}}}]}, + [ + (ExtractionPipelineLoader, "ex_my_extraction"), + ], + id="ID scope extractionpipline ", + ), + ], + ) + def test_get_dependent_items(self, item: dict, expected: list[tuple[type[ResourceLoader], Hashable]]) -> None: + actual_dependent_items = GroupLoader.get_dependent_items(item) + + assert list(actual_dependent_items) == expected diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_node_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_node_loader.py new file mode 100644 index 000000000..920c20973 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_node_loader.py @@ -0,0 +1,80 @@ +from pathlib import Path + +import pytest +import yaml +from _pytest.monkeypatch import MonkeyPatch +from cognite.client.data_classes.data_modeling import NodeApply + +from cognite_toolkit._cdf_tk.loaders import NodeLoader +from cognite_toolkit._cdf_tk.loaders.data_classes import NodeAPICall, NodeApplyListWithCall +from cognite_toolkit._cdf_tk.utils import CDFToolConfig +from tests.tests_unit.utils import mock_read_yaml_file + + +class TestNodeLoader: + @pytest.mark.parametrize( + "yamL_raw, expected", + [ + pytest.param( + """space: my_space +externalId: my_external_id""", + NodeApplyListWithCall([NodeApply("my_space", "my_external_id")]), + id="Single node no API call", + ), + pytest.param( + """- space: my_space + externalId: my_first_node +- space: my_space + externalId: my_second_node +""", + NodeApplyListWithCall( + [ + NodeApply("my_space", "my_first_node"), + NodeApply("my_space", "my_second_node"), + ] + ), + id="Multiple nodes no API call", + ), + pytest.param( + """autoCreateDirectRelations: true +skipOnVersionConflict: false +replace: true +node: + space: my_space + externalId: my_external_id""", + NodeApplyListWithCall([NodeApply("my_space", "my_external_id")], NodeAPICall(True, False, True)), + id="Single node with API call", + ), + pytest.param( + """autoCreateDirectRelations: true +skipOnVersionConflict: false +replace: true +nodes: +- space: my_space + externalId: my_first_node +- space: my_space + externalId: my_second_node + """, + NodeApplyListWithCall( + [ + NodeApply("my_space", "my_first_node"), + NodeApply("my_space", "my_second_node"), + ], + NodeAPICall(True, False, True), + ), + id="Multiple nodes with API call", + ), + ], + ) + def test_load_nodes( + self, + yamL_raw: str, + expected: NodeApplyListWithCall, + cdf_tool_config: CDFToolConfig, + monkeypatch: MonkeyPatch, + ) -> None: + loader = NodeLoader.create_loader(cdf_tool_config, None) + mock_read_yaml_file({"my_node.yaml": yaml.safe_load(yamL_raw)}, monkeypatch) + loaded = loader.load_resource(Path("my_node.yaml"), cdf_tool_config, skip_validation=True) + + assert loaded.dump() == expected.dump() diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_time_series_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_time_series_loader.py new file mode 100644 index 000000000..c5d34d182 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_time_series_loader.py @@ -0,0 +1,53 @@ +from pathlib import Path + +import yaml +from _pytest.monkeypatch import MonkeyPatch +from cognite.client.data_classes import DataSet + +from cognite_toolkit._cdf_tk.loaders import TimeSeriesLoader +from cognite_toolkit._cdf_tk.utils import CDFToolConfig +from tests.tests_unit.approval_client import ApprovalCogniteClient +from tests.tests_unit.utils import mock_read_yaml_file + + +class TestTimeSeriesLoader: + timeseries_yaml = """ +externalId: pi_160696 +name: VAL_23-PT-92504:X.Value +dataSetExternalId: ds_timeseries_oid +isString: false +metadata: + compdev: '0' + location5: '2' +isStep: false +description: PH 1stStgSuctCool Gas Out +""" + + def test_load_skip_validation_no_preexisting_dataset( + self, + cognite_client_approval: ApprovalCogniteClient, + cdf_tool_config_real: CDFToolConfig, + monkeypatch: MonkeyPatch, + ) -> None: + loader = TimeSeriesLoader(cognite_client_approval.mock_client, None) + mock_read_yaml_file({"timeseries.yaml": yaml.safe_load(self.timeseries_yaml)}, monkeypatch) + loaded = loader.load_resource(Path("timeseries.yaml"), cdf_tool_config_real, skip_validation=True) + + assert len(loaded) == 1 + assert loaded[0].data_set_id == -1 + + def test_load_skip_validation_with_preexisting_dataset( + self, + cognite_client_approval: ApprovalCogniteClient, + cdf_tool_config_real: CDFToolConfig, + monkeypatch: MonkeyPatch, + ) -> None: + cognite_client_approval.append(DataSet, DataSet(id=12345, external_id="ds_timeseries_oid")) + loader = TimeSeriesLoader(cognite_client_approval.mock_client, None) + + mock_read_yaml_file({"timeseries.yaml": yaml.safe_load(self.timeseries_yaml)}, monkeypatch) + + loaded = loader.load_resource(Path("timeseries.yaml"), cdf_tool_config_real, skip_validation=True) + + assert len(loaded) == 1 + assert loaded[0].data_set_id == 12345 diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_transformation_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_transformation_loader.py new file mode 100644 index 000000000..2af7e43ee --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_transformation_loader.py @@ -0,0 +1,200 @@ +import pathlib +from collections.abc import Hashable +from pathlib import Path +from unittest.mock import patch + +import pytest +import yaml +from _pytest.monkeypatch import MonkeyPatch +from cognite.client.data_classes import data_modeling as dm + +from cognite_toolkit._cdf_tk.exceptions import ToolkitYAMLFormatError +from cognite_toolkit._cdf_tk.loaders import ( + DataModelLoader, + DataSetsLoader, + RawDatabaseLoader, + RawTableLoader, + ResourceLoader, + SpaceLoader, + TransformationLoader, + ViewLoader, +) +from cognite_toolkit._cdf_tk.loaders.data_classes import RawDatabaseTable +from cognite_toolkit._cdf_tk.utils import CDFToolConfig +from tests.tests_unit.approval_client import ApprovalCogniteClient +from tests.tests_unit.utils import mock_read_yaml_file + + +class TestTransformationLoader: + trafo_yaml = """ +externalId: tr_first_transformation +name: 'example:first:transformation' +interval: '{{scheduleHourly}}' +isPaused: true +query: "INLINE" +destination: + type: 'assets' +ignoreNullFields: true +isPublic: true +conflictMode: upsert +""" + + trafo_sql = "FILE" + + def test_no_auth_load( + self, + cognite_client_approval: ApprovalCogniteClient, + cdf_tool_config_real: CDFToolConfig, + monkeypatch: MonkeyPatch, + ) -> None: + loader = TransformationLoader(cognite_client_approval.mock_client, None) + mock_read_yaml_file({"transformation.yaml": yaml.CSafeLoader(self.trafo_yaml).get_data()}, monkeypatch) + loaded = loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) + assert loaded.destination_oidc_credentials is None + assert loaded.source_oidc_credentials is None + + def test_oidc_auth_load( + self, + cognite_client_approval: ApprovalCogniteClient, + cdf_tool_config_real: CDFToolConfig, + monkeypatch: MonkeyPatch, + ) -> None: + loader = TransformationLoader(cognite_client_approval.mock_client, None) + + resource = yaml.CSafeLoader(self.trafo_yaml).get_data() + + resource["authentication"] = { + "clientId": "{{cicd_clientId}}", + "clientSecret": "{{cicd_clientSecret}}", + "tokenUri": "{{cicd_tokenUri}}", + "cdfProjectName": "{{cdfProjectName}}", + "scopes": "{{cicd_scopes}}", + "audience": "{{cicd_audience}}", + } + + mock_read_yaml_file({"transformation.yaml": resource}, monkeypatch) + + loaded = loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) + assert loaded.destination_oidc_credentials.dump() == loaded.source_oidc_credentials.dump() + assert loaded.destination is not None + + def test_oidc_raise_if_invalid( + self, + cognite_client_approval: ApprovalCogniteClient, + cdf_tool_config_real: CDFToolConfig, + monkeypatch: MonkeyPatch, + ) -> None: + loader = TransformationLoader(cognite_client_approval.mock_client, None) + + resource = yaml.CSafeLoader(self.trafo_yaml).get_data() + + resource["authentication"] = { + "clientId": "{{cicd_clientId}}", + "clientSecret": "{{cicd_clientSecret}}", + } + + mock_read_yaml_file({"transformation.yaml": resource}, monkeypatch) + + with pytest.raises(ToolkitYAMLFormatError): + loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) + + def test_sql_file( + self, + cognite_client_approval: ApprovalCogniteClient, + cdf_tool_config_real: CDFToolConfig, + monkeypatch: MonkeyPatch, + ) -> None: + loader = TransformationLoader(cognite_client_approval.mock_client, None) + + resource = yaml.CSafeLoader(self.trafo_yaml).get_data() + resource.pop("query") + mock_read_yaml_file({"transformation.yaml": resource}, monkeypatch) + + with patch.object(TransformationLoader, "_get_query_file", return_value=Path("transformation.sql")): + with patch.object(pathlib.Path, "read_text", return_value=self.trafo_sql): + loaded = loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) + assert loaded.query == self.trafo_sql + + def test_sql_inline( + self, + cognite_client_approval: ApprovalCogniteClient, + cdf_tool_config_real: CDFToolConfig, + monkeypatch: MonkeyPatch, + ) -> None: + loader = TransformationLoader(cognite_client_approval.mock_client, None) + + resource = yaml.CSafeLoader(self.trafo_yaml).get_data() + + mock_read_yaml_file({"transformation.yaml": resource}, monkeypatch) + + with patch.object(TransformationLoader, "_get_query_file", return_value=None): + loaded = loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) + assert loaded.query == resource["query"] + + def test_if_ambiguous( + self, + cognite_client_approval: ApprovalCogniteClient, + cdf_tool_config_real: CDFToolConfig, + monkeypatch: MonkeyPatch, + ) -> None: + loader = TransformationLoader(cognite_client_approval.mock_client, None) + + mock_read_yaml_file({"transformation.yaml": yaml.CSafeLoader(self.trafo_yaml).get_data()}, monkeypatch) + + with pytest.raises(ToolkitYAMLFormatError): + with patch.object(TransformationLoader, "_get_query_file", return_value=Path("transformation.sql")): + with patch.object(pathlib.Path, "read_text", return_value=self.trafo_sql): + loader.load_resource(Path("transformation.yaml"), cdf_tool_config_real, skip_validation=False) + + @pytest.mark.parametrize( + "item, expected", + [ + pytest.param( + { + "dataSetExternalId": "ds_my_dataset", + "destination": { + "type": "instances", + "dataModel": { + "space": "sp_model_space", + "externalId": "my_model", + "version": "v1", + "destinationType": "assets", + }, + "instanceSpace": "sp_data_space", + }, + }, + [ + (DataSetsLoader, "ds_my_dataset"), + (SpaceLoader, "sp_data_space"), + (DataModelLoader, dm.DataModelId(space="sp_model_space", external_id="my_model", version="v1")), + ], + id="Transformation to data model", + ), + pytest.param( + { + "destination": { + "type": "nodes", + "view": {"space": "sp_space", "externalId": "my_view", "version": "v1"}, + "instanceSpace": "sp_data_space", + } + }, + [ + (SpaceLoader, "sp_data_space"), + (ViewLoader, dm.ViewId(space="sp_space", external_id="my_view", version="v1")), + ], + id="Transformation to nodes ", + ), + pytest.param( + {"destination": {"type": "raw", "database": "my_db", "table": "my_table"}}, + [ + (RawDatabaseLoader, RawDatabaseTable("my_db")), + (RawTableLoader, RawDatabaseTable("my_db", "my_table")), + ], + id="Transformation to RAW table", + ), + ], + ) + def test_get_dependent_items(self, item: dict, expected: list[tuple[type[ResourceLoader], Hashable]]) -> None: + actual = TransformationLoader.get_dependent_items(item) + + assert list(actual) == expected diff --git a/tests/tests_unit/test_cdf_tk/test_loaders/test_view_loader.py b/tests/tests_unit/test_cdf_tk/test_loaders/test_view_loader.py new file mode 100644 index 000000000..26de1d664 --- /dev/null +++ b/tests/tests_unit/test_cdf_tk/test_loaders/test_view_loader.py @@ -0,0 +1,180 @@ +from collections.abc import Hashable +from unittest.mock import MagicMock + +import pytest +from cognite.client.data_classes import data_modeling as dm + +from cognite_toolkit._cdf_tk._parameters import read_parameters_from_dict +from cognite_toolkit._cdf_tk.commands import DeployCommand +from cognite_toolkit._cdf_tk.loaders import ContainerLoader, ResourceLoader, SpaceLoader, ViewLoader +from cognite_toolkit._cdf_tk.utils import CDFToolConfig +from tests.tests_unit.approval_client import ApprovalCogniteClient + + +class TestViewLoader: + @pytest.mark.parametrize( + "item", + [ + pytest.param( + { + "filter": { + "hasData": [ + {"type": "container", "space": "sp_my_space", "externalId": "container_id"}, + {"type": "view", "space": "sp_my_space", "externalId": "view_id"}, + ] + } + }, + id="HasData Filter", + ), + pytest.param( + { + "properties": { + "reverseDirectRelation": { + "connectionType": "multi_reverse_direct_relation", + "source": { + "type": "view", + "space": "sp_my_space", + "externalId": "view_id", + "version": "v42", + }, + "through": { + "source": { + "type": "view", + "space": "sp_my_space", + "externalId": "view_id", + "version": "v42", + }, + "identifier": "view_property", + }, + } + } + }, + id="Reverse Direct Relation Property", + ), + ], + ) + def test_valid_spec(self, item: dict): + spec = ViewLoader.get_write_cls_parameter_spec() + dumped = read_parameters_from_dict(item) + + extra = dumped - spec + + assert not extra, f"Extra keys: {extra}" + + def test_update_view_with_interface(self, cognite_client_approval: ApprovalCogniteClient): + cdf_tool = MagicMock(spec=CDFToolConfig) + cdf_tool.verify_client.return_value = cognite_client_approval.mock_client + cdf_tool.verify_capabilities.return_value = cognite_client_approval.mock_client + cdf_tool.client = cognite_client_approval.mock_client + prop1 = dm.MappedProperty( + dm.ContainerId(space="sp_space", external_id="container_id"), + "prop1", + type=dm.Text(), + nullable=True, + auto_increment=False, + ) + interface = dm.View( + space="sp_space", + external_id="interface", + version="1", + properties={"prop1": prop1}, + last_updated_time=1, + created_time=1, + description=None, + name=None, + filter=None, + implements=None, + writable=True, + used_for="node", + is_global=False, + ) + # Note that child views always contain all properties of their parent interfaces. + child_cdf = dm.View( + space="sp_space", + external_id="child", + version="1", + properties={"prop1": prop1}, + last_updated_time=1, + created_time=1, + description=None, + name=None, + filter=None, + implements=[interface.as_id()], + writable=True, + used_for="node", + is_global=False, + ) + child_local = dm.ViewApply( + space="sp_space", + external_id="child", + version="1", + implements=[interface.as_id()], + ) + # Simulating that the interface and child_cdf are available in CDF + cognite_client_approval.append(dm.View, [interface, child_cdf]) + + loader = ViewLoader.create_loader(cdf_tool, None) + cmd = DeployCommand(print_warning=False) + to_create, to_change, unchanged = cmd.to_create_changed_unchanged_triple( + dm.ViewApplyList([child_local]), loader + ) + + assert len(to_create) == 0 + assert len(to_change) == 0 + assert len(unchanged) == 1 + + @pytest.mark.parametrize( + "item, expected", + [ + pytest.param( + { + "space": "sp_my_space", + "properties": { + "name": { + "container": { + "type": "container", + "space": "my_container_space", + "externalId": "my_container", + } + } + }, + }, + [ + (SpaceLoader, "sp_my_space"), + (ContainerLoader, dm.ContainerId(space="my_container_space", external_id="my_container")), + ], + id="View with one container property", + ), + pytest.param( + { + "space": "sp_my_space", + "properties": { + "toEdge": { + "source": { + "type": "view", + "space": "my_view_space", + "externalId": "my_view", + "version": "1", + }, + "edgeSource": { + "type": "view", + "space": "my_other_view_space", + "externalId": "my_edge_view", + "version": "42", + }, + } + }, + }, + [ + (SpaceLoader, "sp_my_space"), + (ViewLoader, dm.ViewId(space="my_view_space", external_id="my_view", version="1")), + (ViewLoader, dm.ViewId(space="my_other_view_space", external_id="my_edge_view", version="42")), + ], + id="View with one container property", + ), + ], + ) + def test_get_dependent_items(self, item: dict, expected: list[tuple[type[ResourceLoader], Hashable]]) -> None: + actual = ViewLoader.get_dependent_items(item) + + assert list(actual) == expected diff --git a/tests/tests_unit/test_cdf_tk/test_templates.py b/tests/tests_unit/test_cdf_tk/test_templates.py deleted file mode 100644 index 929aad3b8..000000000 --- a/tests/tests_unit/test_cdf_tk/test_templates.py +++ /dev/null @@ -1,365 +0,0 @@ -from __future__ import annotations - -from collections.abc import Iterable -from pathlib import Path -from typing import Any - -import pytest -import yaml - -from cognite_toolkit._cdf_tk.commands.build import BuildCommand, _BuildState, _Helpers -from cognite_toolkit._cdf_tk.data_classes import ( - BuildConfigYAML, - ConfigEntry, - Environment, - InitConfigYAML, - SystemYAML, -) -from cognite_toolkit._cdf_tk.loaders import LOADER_BY_FOLDER_NAME -from cognite_toolkit._cdf_tk.utils import ( - YAMLComment, - flatten_dict, - iterate_modules, - module_from_path, -) -from tests.tests_unit.data import PYTEST_PROJECT -from tests.tests_unit.test_cdf_tk.constants import BUILD_DIR - - -def dict_keys(d: dict[str, Any]) -> set[str]: - keys = set() - for k, v in d.items(): - keys.add(k) - if isinstance(v, dict): - keys.update(dict_keys(v)) - return keys - - -@pytest.fixture(scope="session") -def config_yaml() -> str: - return (PYTEST_PROJECT / "config.dev.yaml").read_text() - - -@pytest.fixture(scope="session") -def dummy_environment() -> Environment: - return Environment( - name="dev", - project="my_project", - build_type="dev", - selected=["none"], - ) - - -class TestConfigYAML: - def test_producing_correct_keys(self, config_yaml: str, dummy_environment: Environment) -> None: - expected_keys = set(flatten_dict(yaml.safe_load(config_yaml))) - # Custom keys are not loaded from the module folder. - # This custom key is added o the dev.config.yaml for other tests. - expected_keys.remove(("variables", "custom_modules", "my_example_module", "transformation_is_paused")) - # Skip all environment variables - expected_keys = {k for k in expected_keys if not k[0] == "environment"} - - config = InitConfigYAML(dummy_environment).load_defaults(PYTEST_PROJECT) - - actual_keys = set(config.keys()) - missing = expected_keys - actual_keys - assert not missing, f"Missing keys: {missing}" - extra = actual_keys - expected_keys - assert not extra, f"Extra keys: {extra}" - - def test_extract_extract_config_yaml_comments(self, config_yaml: str) -> None: - expected_comments = { - ("variables", "cognite_modules", "a_module", "readonly_source_id"): YAMLComment( - above=["This is a comment in the middle of the file"], after=[] - ), - ("variables", "cognite_modules", "another_module", "default_location"): YAMLComment( - above=["This is a comment at the beginning of the module."] - ), - ("variables", "cognite_modules", "another_module", "source_asset"): YAMLComment( - after=["This is an extra comment added to the config only 'lore ipsum'"] - ), - ("variables", "cognite_modules", "another_module", "source_files"): YAMLComment( - after=["This is a comment after a variable"] - ), - } - - actual_comments = InitConfigYAML._extract_comments(config_yaml) - - assert actual_comments == expected_comments - - @pytest.mark.parametrize( - "raw_file, key_prefix, expected_comments", - [ - pytest.param( - """--- -# This is a module comment -variable: value # After variable comment -# Before variable comment -variable2: value2 -variable3: 'value with #in it' -variable4: "value with #in it" # But a comment after -""", - tuple("super_module.module_a".split(".")), - { - ("super_module", "module_a", "variable"): YAMLComment( - after=["After variable comment"], above=["This is a module comment"] - ), - ("super_module", "module_a", "variable2"): YAMLComment(above=["Before variable comment"]), - ("super_module", "module_a", "variable4"): YAMLComment(after=["But a comment after"]), - }, - id="module comments", - ) - ], - ) - def test_extract_default_config_comments( - self, raw_file: str, key_prefix: tuple[str, ...], expected_comments: dict[str, Any] - ): - actual_comments = InitConfigYAML._extract_comments(raw_file, key_prefix) - assert actual_comments == expected_comments - - def test_persist_variable_with_comment(self, config_yaml: str) -> None: - custom_comment = "This is an extra comment added to the config only 'lore ipsum'" - - config = InitConfigYAML.load_existing(config_yaml).load_defaults(PYTEST_PROJECT) - - dumped = config.dump_yaml_with_comments() - loaded = yaml.safe_load(dumped) - assert loaded["variables"]["cognite_modules"]["another_module"]["source_asset"] == "my_new_workmate" - assert custom_comment in dumped - - def test_added_and_removed_variables(self, config_yaml: str) -> None: - existing_config_yaml = yaml.safe_load(config_yaml) - # Added = Exists in the BUILD_CONFIG directory default.config.yaml files but not in config.yaml - existing_config_yaml["variables"]["cognite_modules"]["another_module"].pop("source_asset") - # Removed = Exists in config.yaml but not in the BUILD_CONFIG directory default.config.yaml files - existing_config_yaml["variables"]["cognite_modules"]["another_module"]["removed_variable"] = "old_value" - - config = InitConfigYAML.load_existing(yaml.safe_dump(existing_config_yaml)).load_defaults(PYTEST_PROJECT) - - removed = [v for v in config.values() if v.default_value is None] - # There is already a custom variable in the config.yaml file - assert len(removed) == 2 - assert ("variables", "cognite_modules", "another_module", "removed_variable") in [v.key_path for v in removed] - - added = [v for v in config.values() if v.current_value is None] - assert len(added) == 1 - assert added[0].key_path == ("variables", "cognite_modules", "another_module", "source_asset") - - def test_load_variables(self, dummy_environment: Environment) -> None: - expected = { - ("variables", "cognite_modules", "a_module", "readonly_source_id"), - # default_location is used in two modules and is moved to the top level - ("variables", "cognite_modules", "default_location"), - ("variables", "cognite_modules", "another_module", "source_files"), - ("variables", "cognite_modules", "another_module", "model_space"), - ("variables", "cognite_modules", "parent_module", "child_module", "source_asset"), - } - - config = InitConfigYAML(dummy_environment).load_variables(PYTEST_PROJECT, propagate_reused_variables=True) - - missing = expected - set(config.keys()) - extra = set(config.keys()) - expected - assert not missing, f"Missing keys: {missing}. Got extra {extra}" - assert not extra, f"Extra keys: {extra}" - - def test_load_parent_variables(self, dummy_environment: Environment) -> None: - config = InitConfigYAML( - dummy_environment, - { - ("variables", "cognite_modules", "infield", "shared_variable"): ConfigEntry( - key_path=("variables", "cognite_modules", "infield", "shared_variable"), - default_value="shared_value", - ) - }, - ) - - config._load_variables({"shared_variable": {("cognite_modules", "infield", "cdf_infield_common")}}) - - assert ("variables", "cognite_modules", "infield", "shared_variable") in config.keys() - assert ("variables", "cognite_modules", "infield", "cdf_infield_common", "shared_variable") not in config.keys() - - -@pytest.mark.parametrize( - "input_, expected", - [ - pytest.param({"a": {"b": 1, "c": 2}}, {("a", "b"): 1, ("a", "c"): 2}, id="Simple"), - pytest.param({"a": {"b": {"c": 1}}}, {("a", "b", "c"): 1}, id="Nested"), - ], -) -def test_flatten_dict(input_: dict[str, Any], expected: dict[str, Any]) -> None: - actual = flatten_dict(input_) - - assert actual == expected - - -@pytest.fixture() -def my_config(): - return { - "top_variable": "my_top_variable", - "module_a": { - "readwrite_source_id": "my_readwrite_source_id", - "readonly_source_id": "my_readonly_source_id", - }, - "parent": {"child": {"child_variable": "my_child_variable"}}, - } - - -def test_split_config(my_config: dict[str, Any]) -> None: - expected = { - "": {"top_variable": "my_top_variable"}, - "module_a": { - "readwrite_source_id": "my_readwrite_source_id", - "readonly_source_id": "my_readonly_source_id", - }, - "parent.child": {"child_variable": "my_child_variable"}, - } - actual = _Helpers.to_variables_by_module_path(my_config) - - assert actual == expected - - -def test_create_local_config(my_config: dict[str, Any]): - configs = _Helpers.to_variables_by_module_path(my_config) - - local_config = _Helpers.create_local_config(configs, Path("parent/child/auth/")) - - assert dict(local_config.items()) == {"top_variable": "my_top_variable", "child_variable": "my_child_variable"} - - -def valid_yaml_semantics_test_cases() -> Iterable[pytest.ParameterSet]: - yield pytest.param( - """ -- dbName: src:005:test:rawdb:state -- dbName: src:002:weather:rawdb:state -- dbName: uc:001:demand:rawdb:state -- dbName: in:all:rawdb:state -- dbName: src:001:sap:rawdb -""", - Path("build/raw/raw.yaml"), - id="Multiple Raw Databases", - ) - - yield pytest.param( - """ -dbName: src:005:test:rawdb:state -""", - Path("build/raw/raw.yaml"), - id="Single Raw Database", - ) - - yield pytest.param( - """ -dbName: src:005:test:rawdb:state -tableName: myTable -""", - Path("build/raw/raw.yaml"), - id="Single Raw Database with table", - ) - - yield pytest.param( - """ -- dbName: src:005:test:rawdb:state - tableName: myTable -- dbName: src:002:weather:rawdb:state - tableName: myOtherTable -""", - Path("build/raw/raw.yaml"), - id="Multiple Raw Databases with table", - ) - - -class TestCheckYamlSemantics: - @pytest.mark.parametrize("raw_yaml, source_path", list(valid_yaml_semantics_test_cases())) - def test_valid_yaml(self, raw_yaml: str, source_path: Path, dummy_environment: Environment): - state = _BuildState.create(BuildConfigYAML(dummy_environment, filepath=Path("dummy"), variables={})) - cmd = BuildCommand(print_warning=False) - # Only used in error messages - destination = Path("build/raw/raw.yaml") - yaml_warnings = cmd.validate(raw_yaml, source_path, destination, state, False) - assert not yaml_warnings - - -class TestIterateModules: - def test_modules_project_for_tests(self): - expected_modules = { - PYTEST_PROJECT / "cognite_modules" / "a_module", - PYTEST_PROJECT / "cognite_modules" / "another_module", - PYTEST_PROJECT / "cognite_modules" / "parent_module" / "child_module", - } - - actual_modules = {module for module, _ in iterate_modules(PYTEST_PROJECT)} - - assert actual_modules == expected_modules - - -class TestModuleFromPath: - @pytest.mark.parametrize( - "path, expected", - [ - pytest.param(Path("cognite_modules/a_module/data_models/my_model.datamodel.yaml"), "a_module"), - pytest.param(Path("cognite_modules/another_module/data_models/views/my_view.view.yaml"), "another_module"), - pytest.param( - Path("cognite_modules/parent_module/child_module/data_models/containers/my_container.container.yaml"), - "child_module", - ), - pytest.param( - Path("cognite_modules/parent_module/child_module/data_models/auth/my_group.group.yaml"), "child_module" - ), - pytest.param(Path("custom_modules/child_module/functions/functions.yaml"), "child_module"), - pytest.param(Path("custom_modules/parent_module/child_module/functions/functions.yaml"), "child_module"), - ], - ) - def test_module_from_path(self, path: Path, expected: str): - assert module_from_path(path) == expected - - -class TestBuildConfigYAML: - def test_build_config_create_valid_build_folder(self, config_yaml: str) -> None: - build_env_name = "dev" - system_config = SystemYAML.load_from_directory(PYTEST_PROJECT, build_env_name) - config = BuildConfigYAML.load_from_directory(PYTEST_PROJECT, build_env_name) - available_modules = {module.name for module, _ in iterate_modules(PYTEST_PROJECT)} - config.environment.selected = list(available_modules) - - BuildCommand().build_config( - BUILD_DIR, PYTEST_PROJECT, config=config, system_config=system_config, clean=True, verbose=False - ) - - # The resulting build folder should only have subfolders that are matching the folder name - # used by the loaders. - invalid_resource_folders = [ - dir_.name for dir_ in BUILD_DIR.iterdir() if dir_.is_dir() and dir_.name not in LOADER_BY_FOLDER_NAME - ] - assert not invalid_resource_folders, f"Invalid resource folders after build: {invalid_resource_folders}" - - @pytest.mark.parametrize( - "modules, expected_available_modules", - [ - pytest.param({"another_module": {}}, ["another_module"], id="Single module"), - pytest.param( - { - "cognite_modules": { - "top_variable": "my_top_variable", - "a_module": { - "source_id": "123-456-789", - }, - "parent_module": { - "parent_variable": "my_parent_variable", - "child_module": { - "dataset_external_id": "ds_my_dataset", - }, - }, - "module_without_variables": {}, - } - }, - ["a_module", "child_module", "module_without_variables"], - id="Multiple nested modules", - ), - ], - ) - def test_available_modules( - self, modules: dict[str, Any], expected_available_modules: list[str], dummy_environment: Environment - ) -> None: - config = BuildConfigYAML(dummy_environment, filepath=Path("dummy"), variables=modules) - - assert sorted(config.available_modules) == sorted(expected_available_modules) diff --git a/tests/tests_unit/test_cdf_tk/test_utils.py b/tests/tests_unit/test_cdf_tk/test_utils.py index 033b21246..dda6d7baa 100644 --- a/tests/tests_unit/test_cdf_tk/test_utils.py +++ b/tests/tests_unit/test_cdf_tk/test_utils.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import os import shutil import tempfile @@ -21,15 +23,19 @@ from cognite.client.testing import CogniteClientMock, monkeypatch_cognite_client from pytest import MonkeyPatch +from cognite_toolkit._cdf_tk.exceptions import AuthenticationError from cognite_toolkit._cdf_tk.tk_warnings import TemplateVariableWarning from cognite_toolkit._cdf_tk.utils import ( AuthVariables, CDFToolConfig, calculate_directory_hash, + flatten_dict, + iterate_modules, load_yaml_inject_variables, + module_from_path, ) from cognite_toolkit._cdf_tk.validation import validate_modules_variables -from tests.tests_unit.data import DATA_FOLDER +from tests.tests_unit.data import DATA_FOLDER, PYTEST_PROJECT from tests.tests_unit.utils import PrintCapture @@ -322,18 +328,6 @@ def auth_variables_validate_test_cases(): id="Happy path Client credentials login", ) - yield pytest.param( - { - "CDF_CLUSTER": "my_cluster", - "CDF_PROJECT": "", - }, - False, - "error", - [" CDF project URL name is not set.", " [bold red]ERROR[/]: CDF Cluster and project are required."], - {}, - id="Missing project", - ) - class TestEnvironmentVariables: def test_env_variable(self): @@ -370,3 +364,56 @@ def test_validate( if expected_vars: assert vars(auth_var) == expected_vars + + def test_missing_project_raise_authentication_error(self): + with mock.patch.dict(os.environ, {"CDF_CLUSTER": "my_cluster"}, clear=True): + with pytest.raises(AuthenticationError) as exc_info: + AuthVariables.from_env().validate(False) + assert str(exc_info.value) == "CDF Cluster and project are required. Missing: project." + + +class TestModuleFromPath: + @pytest.mark.parametrize( + "path, expected", + [ + pytest.param(Path("cognite_modules/a_module/data_models/my_model.datamodel.yaml"), "a_module"), + pytest.param(Path("cognite_modules/another_module/data_models/views/my_view.view.yaml"), "another_module"), + pytest.param( + Path("cognite_modules/parent_module/child_module/data_models/containers/my_container.container.yaml"), + "child_module", + ), + pytest.param( + Path("cognite_modules/parent_module/child_module/data_models/auth/my_group.group.yaml"), "child_module" + ), + pytest.param(Path("custom_modules/child_module/functions/functions.yaml"), "child_module"), + pytest.param(Path("custom_modules/parent_module/child_module/functions/functions.yaml"), "child_module"), + ], + ) + def test_module_from_path(self, path: Path, expected: str): + assert module_from_path(path) == expected + + +class TestIterateModules: + def test_modules_project_for_tests(self): + expected_modules = { + PYTEST_PROJECT / "cognite_modules" / "a_module", + PYTEST_PROJECT / "cognite_modules" / "another_module", + PYTEST_PROJECT / "cognite_modules" / "parent_module" / "child_module", + } + + actual_modules = {module for module, _ in iterate_modules(PYTEST_PROJECT)} + + assert actual_modules == expected_modules + + +@pytest.mark.parametrize( + "input_, expected", + [ + pytest.param({"a": {"b": 1, "c": 2}}, {("a", "b"): 1, ("a", "c"): 2}, id="Simple"), + pytest.param({"a": {"b": {"c": 1}}}, {("a", "b", "c"): 1}, id="Nested"), + ], +) +def test_flatten_dict(input_: dict[str, Any], expected: dict[str, Any]) -> None: + actual = flatten_dict(input_) + + assert actual == expected diff --git a/tests_migrations/calculate_hashes.py b/tests_migrations/calculate_hashes.py index 52c7f12b9..e39372e90 100644 --- a/tests_migrations/calculate_hashes.py +++ b/tests_migrations/calculate_hashes.py @@ -1,4 +1,4 @@ -from constants import SUPPORTED_TOOLKIT_VERSIONS, TEST_DIR_ROOT +from constants import PROJECT_INIT_DIR, SUPPORTED_TOOLKIT_VERSIONS from cognite_toolkit._cdf_tk.utils import calculate_directory_hash @@ -6,14 +6,9 @@ def calculate_hashes(): exclude_prefixes = set() for version in SUPPORTED_TOOLKIT_VERSIONS: - cognite_module = ( - TEST_DIR_ROOT / f".venv{version}" / "Lib" / "site-packages" / "cognite_toolkit" / "cognite_modules" - ) - if version == "0.1.0b7": - # From version 0.1.0b7, the default files are no longer copied into the user's project - exclude_prefixes = {"default."} + project_init = PROJECT_INIT_DIR / f"project_{version}" - version_hash = calculate_directory_hash(cognite_module, exclude_prefixes=exclude_prefixes) + version_hash = calculate_directory_hash(project_init / "cognite_modules", exclude_prefixes=exclude_prefixes) print(f"Cognite Module Hash for version {version!r}: {version_hash!r}") diff --git a/tests_migrations/constants.py b/tests_migrations/constants.py index 29adc3d39..286960afa 100644 --- a/tests_migrations/constants.py +++ b/tests_migrations/constants.py @@ -4,8 +4,15 @@ from pathlib import Path TEST_DIR_ROOT = Path(__file__).resolve().parent +PROJECT_INIT_DIR = TEST_DIR_ROOT / "project_inits" +PROJECT_INIT_DIR.mkdir(exist_ok=True) SUPPORTED_TOOLKIT_VERSIONS = [ + "0.1.0", + "0.1.1", + "0.1.2", + "0.1.3", + "0.1.4", "0.2.0a1", "0.2.0a2", "0.2.0a3", @@ -15,6 +22,7 @@ "0.2.0b2", "0.2.0b3", "0.2.0b4", + "0.2.0", ] diff --git a/tests_migrations/create_environments.py b/tests_migrations/create_environments.py deleted file mode 100644 index ac9d27656..000000000 --- a/tests_migrations/create_environments.py +++ /dev/null @@ -1,26 +0,0 @@ -"""This is required to run the migration tests in this folder.""" - -import platform -import subprocess - -from constants import SUPPORTED_TOOLKIT_VERSIONS, TEST_DIR_ROOT, chdir - - -def create_environments(): - with chdir(TEST_DIR_ROOT): - for version in SUPPORTED_TOOLKIT_VERSIONS: - print(f"Creating environment for version {version}") - environment_directory = f".venv{version}" - if (TEST_DIR_ROOT / environment_directory).exists(): - print(f"Environment for version {version} already exists") - continue - subprocess.run(["python", "-m", "venv", environment_directory]) - if platform.system() == "Windows": - subprocess.run([f"{environment_directory}/Scripts/pip", "install", f"cognite-toolkit=={version}"]) - else: - subprocess.run([f"{environment_directory}/bin/pip", "install", f"cognite-toolkit=={version}"]) - print(f"Environment for version {version} created") - - -if __name__ == "__main__": - create_environments() diff --git a/tests_migrations/run_module_upgrade.py b/tests_migrations/run_module_upgrade.py new file mode 100644 index 000000000..c0f1c6da6 --- /dev/null +++ b/tests_migrations/run_module_upgrade.py @@ -0,0 +1,253 @@ +import contextlib +import os +import platform +import shutil +import subprocess +import sys +from collections.abc import Iterator +from contextlib import contextmanager +from pathlib import Path +from unittest.mock import patch + +import yaml +from dotenv import load_dotenv +from packaging.version import Version +from packaging.version import parse as parse_version +from rich import print +from rich.panel import Panel + +from cognite_toolkit._cdf_tk.commands import BuildCommand, DeployCommand +from cognite_toolkit._cdf_tk.constants import ROOT_MODULES, SUPPORT_MODULE_UPGRADE_FROM_VERSION +from cognite_toolkit._cdf_tk.loaders import LOADER_BY_FOLDER_NAME +from cognite_toolkit._cdf_tk.prototypes.commands import ModulesCommand +from cognite_toolkit._cdf_tk.prototypes.commands._changes import ManualChange +from cognite_toolkit._cdf_tk.prototypes.commands.modules import CLICommands +from cognite_toolkit._cdf_tk.utils import CDFToolConfig, module_from_path +from cognite_toolkit._version import __version__ + +TEST_DIR_ROOT = Path(__file__).resolve().parent +PROJECT_INIT_DIR = TEST_DIR_ROOT / "project_inits" +PROJECT_INIT_DIR.mkdir(exist_ok=True) + + +def run() -> None: + only_first = len(sys.argv) > 1 and sys.argv[1] == "--only-first" + + versions = get_versions_since(SUPPORT_MODULE_UPGRADE_FROM_VERSION) + for version in versions: + create_project_init(str(version)) + + print( + Panel( + "All projects inits created successfully.", + expand=False, + title="cdf-tk init executed for all past versions.", + ) + ) + + print( + Panel( + "Running module upgrade for all supported versions.", + expand=False, + title="cdf-tk module upgrade", + ) + ) + if only_first: + versions = versions[-1:] + for version in versions: + with local_tmp_project_path() as project_path, local_build_path() as build_path, tool_globals() as cdf_tool_config: + run_modules_upgrade(version, project_path, build_path, cdf_tool_config) + + +def get_versions_since(support_upgrade_from_version: str) -> list[Version]: + result = subprocess.run("pip index versions cognite-toolkit --pre".split(), stdout=subprocess.PIPE) + lines = result.stdout.decode().split("\n") + for line in lines: + if line.startswith("Available versions:"): + raw_version_str = line.split(":", maxsplit=1)[1] + supported_from = parse_version(support_upgrade_from_version) + return [ + parsed + for version in raw_version_str.split(",") + if (parsed := parse_version(version.strip())) >= supported_from + ] + else: + raise ValueError("Could not find available versions.") + + +def create_project_init(version: str) -> None: + project_init = PROJECT_INIT_DIR / f"project_{version}" + if project_init.exists(): + print(f"Project init for version {version} already exists.") + return + + environment_directory = f".venv{version}" + if (TEST_DIR_ROOT / environment_directory).exists(): + print(f"Environment for version {version} already exists") + else: + print(f"Creating environment for version {version}") + subprocess.run(["python", "-m", "venv", environment_directory]) + if platform.system() == "Windows": + subprocess.run([f"{environment_directory}/Scripts/pip", "install", f"cognite-toolkit=={version}"]) + else: + subprocess.run([f"{environment_directory}/bin/pip", "install", f"cognite-toolkit=={version}"]) + print(f"Environment for version {version} created") + + modified_env_variables = os.environ.copy() + repo_root = TEST_DIR_ROOT.parent + if "PYTHONPATH" in modified_env_variables: + # Need to remove the repo root from PYTHONPATH to avoid importing the wrong version of the toolkit + # (This is typically set by the IDE, for example, PyCharm sets it when running tests). + modified_env_variables["PYTHONPATH"] = modified_env_variables["PYTHONPATH"].replace(str(repo_root), "") + if platform.system() == "Windows": + old_version_script_dir = Path(f"{environment_directory}/Scripts/") + else: + old_version_script_dir = Path(f"{environment_directory}/bin/") + cmd = [str(old_version_script_dir / "cdf-tk"), "init", str(project_init), "--clean"] + _ = subprocess.run(cmd, capture_output=True, shell=True, env=modified_env_variables) + print(f"Project init for version {version} created.") + shutil.rmtree(environment_directory) + + +def run_modules_upgrade( + previous_version: Version, project_path: Path, build_path: Path, cdf_tool_config: CDFToolConfig +) -> None: + project_init = PROJECT_INIT_DIR / f"project_{previous_version!s}" + # Copy the project to a temporary location as the upgrade command modifies the project. + shutil.copytree(project_init, project_path, dirs_exist_ok=True) + + with chdir(TEST_DIR_ROOT): + modules = ModulesCommand(print_warning=False) + # This is to allow running the function with having uncommitted changes in the repository. + with patch.object(CLICommands, "has_uncommitted_changes", lambda: False): + changes = modules.upgrade(project_path) + + delete_modules_requiring_manual_changes(changes) + + # Update the config file to run include all modules. + update_config_yaml_to_select_all_modules(project_path) + + if previous_version < parse_version("0.2.0a4"): + # Bug in pre 0.2.0a4 versions + pump_view = ( + project_path + / "cognite_modules" + / "experimental" + / "example_pump_data_model" + / "data_models" + / "4.Pump.view.yaml" + ) + pump_view.write_text(pump_view.read_text().replace("external_id", "externalId")) + + build = BuildCommand(print_warning=False) + build.execute(False, project_path, build_path, build_env_name="dev", no_clean=False) + + deploy = DeployCommand(print_warning=False) + deploy.execute( + cdf_tool_config, + str(build_path), + build_env_name="dev", + dry_run=True, + drop=False, + drop_data=False, + include=list(LOADER_BY_FOLDER_NAME), + verbose=False, + ) + + print( + Panel( + f"Module upgrade for version {previous_version!s} to {__version__} completed successfully.", + expand=False, + style="green", + ) + ) + + +def delete_modules_requiring_manual_changes(changes): + for change in changes: + if not isinstance(change, ManualChange): + continue + for file in change.needs_to_change(): + if file.is_dir(): + shutil.rmtree(file) + else: + module = module_from_path(file) + for part in reversed(file.parts): + if part == module: + break + file = file.parent + if file.exists(): + shutil.rmtree(file) + + +def update_config_yaml_to_select_all_modules(project_path): + config_yaml = project_path / "config.dev.yaml" + assert config_yaml.exists() + yaml_data = yaml.safe_load(config_yaml.read_text()) + yaml_data["environment"]["selected"] = [] + for root_module in ROOT_MODULES: + if (project_path / root_module).exists() and any( + yaml_file for yaml_file in (project_path / root_module).rglob("*.yaml") + ): + yaml_data["environment"]["selected"].append(f"{root_module}/") + config_yaml.write_text(yaml.dump(yaml_data)) + + +@contextlib.contextmanager +def chdir(new_dir: Path) -> Iterator[None]: + """ + Change directory to new_dir and return to the original directory when exiting the context. + + Args: + new_dir: The new directory to change to. + + """ + current_working_dir = Path.cwd() + os.chdir(new_dir) + + try: + yield + + finally: + os.chdir(current_working_dir) + + +@contextmanager +def tool_globals() -> Iterator[CDFToolConfig]: + load_dotenv(TEST_DIR_ROOT.parent / ".env") + + try: + yield CDFToolConfig() + finally: + ... + + +@contextmanager +def local_tmp_project_path() -> Path: + project_path = TEST_DIR_ROOT / "tmp-project" + if project_path.exists(): + shutil.rmtree(project_path) + project_path.mkdir(exist_ok=True) + try: + yield project_path + finally: + ... + + +@contextmanager +def local_build_path() -> Path: + build_path = TEST_DIR_ROOT / "build" + if build_path.exists(): + shutil.rmtree(build_path) + + build_path.mkdir(exist_ok=True) + # This is a small hack to get 0.1.0b1-4 working + (build_path / "file.txt").touch(exist_ok=True) + try: + yield build_path + finally: + ... + + +if __name__ == "__main__": + run()