diff --git a/.github/workflows/data/core/tracked.txt b/.github/workflows/data/core/tracked.txt index 5b2a3ca4d..855cb884e 100644 --- a/.github/workflows/data/core/tracked.txt +++ b/.github/workflows/data/core/tracked.txt @@ -3,6 +3,5 @@ onetl/plugins/** onetl/impl/** onetl/hwm/** onetl/_util/** -onetl/_internal.py onetl/log.py .github/workflows/data/core/** diff --git a/.github/workflows/test-core.yml b/.github/workflows/test-core.yml index 65d681dc1..6008f9250 100644 --- a/.github/workflows/test-core.yml +++ b/.github/workflows/test-core.yml @@ -72,7 +72,7 @@ jobs: - name: Run tests run: | ./run_tests.sh -m 'not connection' - ./run_tests.sh onetl/_util onetl/_internal.py onetl/hooks onetl/file/filter onetl/file/limit onetl/hwm/store/hwm_class_registry.py + ./run_tests.sh onetl/_util onetl/hooks onetl/file/filter onetl/file/limit onetl/hwm/store/hwm_class_registry.py - name: Upload coverage results uses: actions/upload-artifact@v4 diff --git a/onetl/_internal.py b/onetl/_internal.py deleted file mode 100644 index 361bb3e82..000000000 --- a/onetl/_internal.py +++ /dev/null @@ -1,172 +0,0 @@ -# SPDX-FileCopyrightText: 2021-2024 MTS (Mobile Telesystems) -# SPDX-License-Identifier: Apache-2.0 -""" - Helpers -""" - -from __future__ import annotations - -import os -from datetime import datetime -from typing import TYPE_CHECKING, Any - -try: - from pydantic.v1 import SecretStr -except (ImportError, AttributeError): - from pydantic import SecretStr # type: ignore[no-redef, assignment] - -if TYPE_CHECKING: - from pathlib import PurePath - -# e.g. 20230524122150 -DATETIME_FORMAT = "%Y%m%d%H%M%S" - - -def clear_statement(statement: str) -> str: - """ - Clear unnecessary spaces and semicolons at the statement end. - - Oracle-specific: adds semicolon after END statement. - - Examples - -------- - - >>> clear_statement("SELECT * FROM mytable") - 'SELECT * FROM mytable' - >>> clear_statement("SELECT * FROM mytable ; ") - 'SELECT * FROM mytable' - >>> clear_statement("CREATE TABLE mytable (id NUMBER)") - 'CREATE TABLE mytable (id NUMBER)' - >>> clear_statement("BEGIN ... END") - 'BEGIN ... END;' - """ - - statement = statement.rstrip().lstrip("\n\r").rstrip(";").rstrip() - if statement.lower().endswith("end"): - statement += ";" - return statement - - -def uniq_ignore_case(orig_list: list[str]) -> list[str]: - """ - Return only uniq values from a list, case ignore. - - Examples - -------- - - >>> uniq_ignore_case(["a", "c"]) - ['a', 'c'] - >>> uniq_ignore_case(["A", "a", "c"]) - ['A', 'c'] - >>> uniq_ignore_case(["a", "A", "c"]) - ['a', 'c'] - """ - - result: list[str] = [] - already_visited: set[str] = set() - - for orig_value in orig_list: - if orig_value.casefold() not in already_visited: - result.append(orig_value) - already_visited.add(orig_value.casefold()) - - return result - - -def stringify(value: Any, quote: bool = False) -> Any: # noqa: WPS212 - """ - Convert values to strings. - - Values ``True``, ``False`` and ``None`` become ``"true"``, ``"false"`` and ``"null"``. - - If input is dict, return dict with stringified values and keys (recursive). - - If input is list, return list with stringified values (recursive). - - If ``quote=True``, wrap string values with double quotes. - - Examples - -------- - - >>> stringify(1) - '1' - >>> stringify(True) - 'true' - >>> stringify(False) - 'false' - >>> stringify(None) - 'null' - >>> stringify("string") - 'string' - >>> stringify("string", quote=True) - '"string"' - >>> stringify({"abc": 1}) - {'abc': '1'} - >>> stringify([1, True, False, None, "string"]) - ['1', 'true', 'false', 'null', 'string'] - """ - - if isinstance(value, dict): - return {stringify(k): stringify(v, quote) for k, v in value.items()} - - if isinstance(value, list): - return [stringify(v, quote) for v in value] - - if value is None: - return "null" - - if isinstance(value, bool): - return "true" if value else "false" - - if isinstance(value, SecretStr): - value = value.get_secret_value() - - if isinstance(value, os.PathLike): - value = os.fspath(value) - - if isinstance(value, str): - return f'"{value}"' if quote else value - - return str(value) - - -def to_camel(string: str) -> str: - """ - Convert ``snake_case`` strings to ``camelCase`` (with first symbol in lowercase) - - Examples - -------- - - >>> to_camel("some_value") - 'someValue' - """ - - return "".join(word.capitalize() if index > 0 else word for index, word in enumerate(string.split("_"))) - - -def generate_temp_path(root: PurePath) -> PurePath: - """ - Returns prefix which will be used for creating temp directory - - Returns - ------- - RemotePath - Temp path, containing current host name, process name and datetime - - Examples - -------- - - >>> from etl_entities.process import Process - >>> from pathlib import Path - >>> generate_temp_path(Path("/tmp")) # doctest: +SKIP - Path("/tmp/onetl/currenthost/myprocess/20230524122150") - >>> with Process(dag="mydag", task="mytask"): # doctest: +SKIP - ... generate_temp_path(Path("/abc")) - Path("/abc/onetl/currenthost/mydag.mytask.myprocess/20230524122150") - """ - - from etl_entities.process import ProcessStackManager - - current_process = ProcessStackManager.get_current() - current_dt = datetime.now().strftime(DATETIME_FORMAT) - return root / "onetl" / current_process.host / current_process.full_name / current_dt diff --git a/onetl/_util/file.py b/onetl/_util/file.py index 06e6ef047..ee27c57f4 100644 --- a/onetl/_util/file.py +++ b/onetl/_util/file.py @@ -5,11 +5,15 @@ import hashlib import io import os -from pathlib import Path +from datetime import datetime +from pathlib import Path, PurePath from onetl.exception import NotAFileError from onetl.impl import path_repr +# e.g. 20230524122150 +DATETIME_FORMAT = "%Y%m%d%H%M%S" + def get_file_hash( path: os.PathLike | str, @@ -41,3 +45,31 @@ def is_file_readable(path: str | os.PathLike) -> Path: raise OSError(f"No read access to file {path_repr(path)}") return path + + +def generate_temp_path(root: PurePath) -> PurePath: + """ + Returns prefix which will be used for creating temp directory + + Returns + ------- + RemotePath + Temp path, containing current host name, process name and datetime + + Examples + -------- + + >>> from etl_entities.process import Process + >>> from pathlib import Path + >>> generate_temp_path(Path("/tmp")) # doctest: +SKIP + Path("/tmp/onetl/currenthost/myprocess/20230524122150") + >>> with Process(dag="mydag", task="mytask"): # doctest: +SKIP + ... generate_temp_path(Path("/abc")) + Path("/abc/onetl/currenthost/mydag.mytask.myprocess/20230524122150") + """ + + from etl_entities.process import ProcessStackManager + + current_process = ProcessStackManager.get_current() + current_dt = datetime.now().strftime(DATETIME_FORMAT) + return root / "onetl" / current_process.host / current_process.full_name / current_dt diff --git a/onetl/_util/spark.py b/onetl/_util/spark.py index 230abe80e..f172b1c98 100644 --- a/onetl/_util/spark.py +++ b/onetl/_util/spark.py @@ -2,6 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 from __future__ import annotations +import os import textwrap from contextlib import contextmanager from math import inf @@ -9,11 +10,73 @@ from onetl._util.version import Version +try: + from pydantic.v1 import SecretStr +except (ImportError, AttributeError): + from pydantic import SecretStr # type: ignore[no-redef, assignment] + if TYPE_CHECKING: from pyspark.sql import SparkSession from pyspark.sql.conf import RuntimeConfig +def stringify(value: Any, quote: bool = False) -> Any: # noqa: WPS212 + """ + Convert values to strings. + + Values ``True``, ``False`` and ``None`` become ``"true"``, ``"false"`` and ``"null"``. + + If input is dict, return dict with stringified values and keys (recursive). + + If input is list, return list with stringified values (recursive). + + If ``quote=True``, wrap string values with double quotes. + + Examples + -------- + + >>> stringify(1) + '1' + >>> stringify(True) + 'true' + >>> stringify(False) + 'false' + >>> stringify(None) + 'null' + >>> stringify("string") + 'string' + >>> stringify("string", quote=True) + '"string"' + >>> stringify({"abc": 1}) + {'abc': '1'} + >>> stringify([1, True, False, None, "string"]) + ['1', 'true', 'false', 'null', 'string'] + """ + + if isinstance(value, dict): + return {stringify(k): stringify(v, quote) for k, v in value.items()} + + if isinstance(value, list): + return [stringify(v, quote) for v in value] + + if value is None: + return "null" + + if isinstance(value, bool): + return "true" if value else "false" + + if isinstance(value, SecretStr): + value = value.get_secret_value() + + if isinstance(value, os.PathLike): + value = os.fspath(value) + + if isinstance(value, str): + return f'"{value}"' if quote else value + + return str(value) + + @contextmanager def inject_spark_param(conf: RuntimeConfig, name: str, value: Any): """ diff --git a/onetl/_util/sql.py b/onetl/_util/sql.py new file mode 100644 index 000000000..37aa09a78 --- /dev/null +++ b/onetl/_util/sql.py @@ -0,0 +1,25 @@ +# SPDX-FileCopyrightText: 2021-2024 MTS (Mobile Telesystems) +# SPDX-License-Identifier: Apache-2.0 +def clear_statement(statement: str) -> str: + """ + Clear unnecessary spaces and semicolons at the statement end. + + Oracle-specific: adds semicolon after END statement. + + Examples + -------- + + >>> clear_statement("SELECT * FROM mytable") + 'SELECT * FROM mytable' + >>> clear_statement("SELECT * FROM mytable ; ") + 'SELECT * FROM mytable' + >>> clear_statement("CREATE TABLE mytable (id NUMBER)") + 'CREATE TABLE mytable (id NUMBER)' + >>> clear_statement("BEGIN ... END") + 'BEGIN ... END;' + """ + + statement = statement.rstrip().lstrip("\n\r").rstrip(";").rstrip() + if statement.lower().endswith("end"): + statement += ";" + return statement diff --git a/onetl/connection/db_connection/hive/connection.py b/onetl/connection/db_connection/hive/connection.py index fbedebefa..7fcb4dce2 100644 --- a/onetl/connection/db_connection/hive/connection.py +++ b/onetl/connection/db_connection/hive/connection.py @@ -13,8 +13,8 @@ except (ImportError, AttributeError): from pydantic import validator # type: ignore[no-redef, assignment] -from onetl._internal import clear_statement from onetl._util.spark import inject_spark_param +from onetl._util.sql import clear_statement from onetl.connection.db_connection.db_connection import DBConnection from onetl.connection.db_connection.hive.dialect import HiveDialect from onetl.connection.db_connection.hive.options import ( diff --git a/onetl/connection/db_connection/jdbc_connection/connection.py b/onetl/connection/db_connection/jdbc_connection/connection.py index e6716ae5e..5b0aebeb8 100644 --- a/onetl/connection/db_connection/jdbc_connection/connection.py +++ b/onetl/connection/db_connection/jdbc_connection/connection.py @@ -7,7 +7,7 @@ import warnings from typing import TYPE_CHECKING, Any -from onetl._internal import clear_statement +from onetl._util.sql import clear_statement from onetl.connection.db_connection.db_connection import DBConnection from onetl.connection.db_connection.jdbc_connection.dialect import JDBCDialect from onetl.connection.db_connection.jdbc_connection.options import ( diff --git a/onetl/connection/db_connection/jdbc_connection/options.py b/onetl/connection/db_connection/jdbc_connection/options.py index cd4538f29..a2aa39adb 100644 --- a/onetl/connection/db_connection/jdbc_connection/options.py +++ b/onetl/connection/db_connection/jdbc_connection/options.py @@ -15,7 +15,6 @@ from typing_extensions import deprecated -from onetl._internal import to_camel from onetl.impl import GenericOptions # options from spark.read.jdbc which are populated by JDBCConnection methods @@ -144,10 +143,9 @@ class Config: known_options = READ_OPTIONS | READ_WRITE_OPTIONS prohibited_options = GENERIC_PROHIBITED_OPTIONS | WRITE_OPTIONS extra = "allow" - alias_generator = to_camel # Options in DataFrameWriter.jdbc() method - partition_column: Optional[str] = None + partition_column: Optional[str] = Field(default=None, alias="partitionColumn") """Column used to parallelize reading from a table. .. warning:: @@ -164,17 +162,17 @@ class Config: See documentation for :obj:`~partitioning_mode` for more details""" - num_partitions: PositiveInt = 1 + num_partitions: PositiveInt = Field(default=1, alias="numPartitions") """Number of jobs created by Spark to read the table content in parallel. See documentation for :obj:`~partitioning_mode` for more details""" - lower_bound: Optional[int] = None + lower_bound: Optional[int] = Field(default=None, alias="lowerBound") """See documentation for :obj:`~partitioning_mode` for more details""" # noqa: WPS322 - upper_bound: Optional[int] = None + upper_bound: Optional[int] = Field(default=None, alias="upperBound") """See documentation for :obj:`~partitioning_mode` for more details""" # noqa: WPS322 - session_init_statement: Optional[str] = None + session_init_statement: Optional[str] = Field(default=None, alias="sessionInitStatement") '''After each database session is opened to the remote DB and before starting to read data, this option executes a custom SQL statement (or a PL/SQL block). @@ -423,7 +421,6 @@ class Config: known_options = WRITE_OPTIONS | READ_WRITE_OPTIONS prohibited_options = GENERIC_PROHIBITED_OPTIONS | READ_OPTIONS extra = "allow" - alias_generator = to_camel if_exists: JDBCTableExistBehavior = Field(default=JDBCTableExistBehavior.APPEND, alias="mode") """Behavior of writing data into existing table. @@ -528,7 +525,7 @@ class Config: Changed default value from 1000 to 20_000 """ - isolation_level: str = "READ_UNCOMMITTED" + isolation_level: str = Field(default="READ_UNCOMMITTED", alias="isolationLevel") """The transaction isolation level, which applies to current connection. Possible values: @@ -571,7 +568,7 @@ class JDBCSQLOptions(GenericOptions): Split up ``ReadOptions`` to ``SQLOptions`` """ - partition_column: Optional[str] = None + partition_column: Optional[str] = Field(default=None, alias="partitionColumn") """Column used to partition data across multiple executors for parallel query processing. .. warning:: @@ -600,16 +597,16 @@ class JDBCSQLOptions(GenericOptions): -- Where ``stride`` is calculated as ``(upper_bound - lower_bound) / num_partitions``. """ - num_partitions: Optional[int] = None + num_partitions: Optional[int] = Field(default=None, alias="numPartitions") """Number of jobs created by Spark to read the table content in parallel.""" # noqa: WPS322 - lower_bound: Optional[int] = None + lower_bound: Optional[int] = Field(default=None, alias="lowerBound") """Defines the starting boundary for partitioning the query's data. Mandatory if :obj:`~partition_column` is set""" # noqa: WPS322 - upper_bound: Optional[int] = None + upper_bound: Optional[int] = Field(default=None, alias="upperBound") """Sets the ending boundary for data partitioning. Mandatory if :obj:`~partition_column` is set""" # noqa: WPS322 - session_init_statement: Optional[str] = None + session_init_statement: Optional[str] = Field(default=None, alias="sessionInitStatement") '''After each database session is opened to the remote DB and before starting to read data, this option executes a custom SQL statement (or a PL/SQL block). @@ -658,7 +655,6 @@ class Config: known_options = READ_OPTIONS - {"partitioning_mode"} prohibited_options = GENERIC_PROHIBITED_OPTIONS | WRITE_OPTIONS | {"partitioning_mode"} extra = "allow" - alias_generator = to_camel @root_validator(pre=True) def _check_partition_fields(cls, values): diff --git a/onetl/connection/db_connection/jdbc_mixin/connection.py b/onetl/connection/db_connection/jdbc_mixin/connection.py index 8ab430751..e8c19e38b 100644 --- a/onetl/connection/db_connection/jdbc_mixin/connection.py +++ b/onetl/connection/db_connection/jdbc_mixin/connection.py @@ -16,9 +16,9 @@ except (ImportError, AttributeError): from pydantic import Field, PrivateAttr, SecretStr, validator # type: ignore[no-redef, assignment] -from onetl._internal import clear_statement, stringify from onetl._util.java import get_java_gateway, try_import_java_class -from onetl._util.spark import get_spark_version +from onetl._util.spark import get_spark_version, stringify +from onetl._util.sql import clear_statement from onetl._util.version import Version from onetl.connection.db_connection.jdbc_mixin.options import ( JDBCExecuteOptions, diff --git a/onetl/connection/db_connection/kafka/connection.py b/onetl/connection/db_connection/kafka/connection.py index b64fff143..ce3829e49 100644 --- a/onetl/connection/db_connection/kafka/connection.py +++ b/onetl/connection/db_connection/kafka/connection.py @@ -14,10 +14,9 @@ except (ImportError, AttributeError): from pydantic import root_validator, validator # type: ignore[no-redef, assignment] -from onetl._internal import stringify from onetl._util.java import try_import_java_class from onetl._util.scala import get_default_scala_version -from onetl._util.spark import get_spark_version +from onetl._util.spark import get_spark_version, stringify from onetl._util.version import Version from onetl.connection.db_connection.db_connection import DBConnection from onetl.connection.db_connection.kafka.dialect import KafkaDialect diff --git a/onetl/connection/db_connection/kafka/kafka_kerberos_auth.py b/onetl/connection/db_connection/kafka/kafka_kerberos_auth.py index 6a20a31af..40e9aa55a 100644 --- a/onetl/connection/db_connection/kafka/kafka_kerberos_auth.py +++ b/onetl/connection/db_connection/kafka/kafka_kerberos_auth.py @@ -12,8 +12,8 @@ except (ImportError, AttributeError): from pydantic import Field, PrivateAttr, root_validator, validator # type: ignore[no-redef, assignment] -from onetl._internal import stringify from onetl._util.file import get_file_hash, is_file_readable +from onetl._util.spark import stringify from onetl.connection.db_connection.kafka.kafka_auth import KafkaAuth from onetl.impl import GenericOptions, LocalPath, path_repr diff --git a/onetl/connection/db_connection/kafka/kafka_scram_auth.py b/onetl/connection/db_connection/kafka/kafka_scram_auth.py index add09f349..823d0f82f 100644 --- a/onetl/connection/db_connection/kafka/kafka_scram_auth.py +++ b/onetl/connection/db_connection/kafka/kafka_scram_auth.py @@ -11,7 +11,7 @@ from typing_extensions import Literal -from onetl._internal import stringify +from onetl._util.spark import stringify from onetl.connection.db_connection.kafka.kafka_auth import KafkaAuth from onetl.impl import GenericOptions diff --git a/onetl/connection/db_connection/kafka/kafka_ssl_protocol.py b/onetl/connection/db_connection/kafka/kafka_ssl_protocol.py index 6149f5aa0..24dd52f6e 100644 --- a/onetl/connection/db_connection/kafka/kafka_ssl_protocol.py +++ b/onetl/connection/db_connection/kafka/kafka_ssl_protocol.py @@ -10,8 +10,8 @@ except (ImportError, AttributeError): from pydantic import Field, SecretStr, validator # type: ignore[no-redef, assignment] -from onetl._internal import stringify from onetl._util.file import is_file_readable +from onetl._util.spark import stringify from onetl.impl import GenericOptions, LocalPath if TYPE_CHECKING: diff --git a/onetl/connection/db_connection/oracle/connection.py b/onetl/connection/db_connection/oracle/connection.py index 8ca1b6ef1..043989500 100644 --- a/onetl/connection/db_connection/oracle/connection.py +++ b/onetl/connection/db_connection/oracle/connection.py @@ -19,8 +19,8 @@ from etl_entities.instance import Host -from onetl._internal import clear_statement from onetl._util.classproperty import classproperty +from onetl._util.sql import clear_statement from onetl._util.version import Version from onetl.connection.db_connection.jdbc_connection import JDBCConnection from onetl.connection.db_connection.jdbc_connection.options import JDBCReadOptions diff --git a/onetl/connection/db_connection/teradata/connection.py b/onetl/connection/db_connection/teradata/connection.py index d6ea76acc..6ef2637b4 100644 --- a/onetl/connection/db_connection/teradata/connection.py +++ b/onetl/connection/db_connection/teradata/connection.py @@ -7,8 +7,8 @@ from etl_entities.instance import Host -from onetl._internal import stringify from onetl._util.classproperty import classproperty +from onetl._util.spark import stringify from onetl._util.version import Version from onetl.connection.db_connection.jdbc_connection import JDBCConnection from onetl.connection.db_connection.teradata.dialect import TeradataDialect diff --git a/onetl/connection/file_df_connection/spark_s3/connection.py b/onetl/connection/file_df_connection/spark_s3/connection.py index 04da89e0e..1efe39d4e 100644 --- a/onetl/connection/file_df_connection/spark_s3/connection.py +++ b/onetl/connection/file_df_connection/spark_s3/connection.py @@ -16,11 +16,10 @@ from typing_extensions import Literal -from onetl._internal import stringify from onetl._util.hadoop import get_hadoop_config, get_hadoop_version from onetl._util.java import try_import_java_class from onetl._util.scala import get_default_scala_version -from onetl._util.spark import get_spark_version +from onetl._util.spark import get_spark_version, stringify from onetl._util.version import Version from onetl.base import ( BaseReadableFileFormat, diff --git a/onetl/file/file_downloader/file_downloader.py b/onetl/file/file_downloader/file_downloader.py index 3fe45ff40..069f8c69b 100644 --- a/onetl/file/file_downloader/file_downloader.py +++ b/onetl/file/file_downloader/file_downloader.py @@ -22,7 +22,7 @@ except (ImportError, AttributeError): from pydantic import Field, PrivateAttr, root_validator, validator # type: ignore[no-redef, assignment] -from onetl._internal import generate_temp_path +from onetl._util.file import generate_temp_path from onetl.base import BaseFileConnection, BaseFileFilter, BaseFileLimit from onetl.base.path_protocol import PathProtocol from onetl.file.file_downloader.options import FileDownloaderOptions diff --git a/onetl/file/file_uploader/file_uploader.py b/onetl/file/file_uploader/file_uploader.py index 9ab5f088f..fc6709ce3 100644 --- a/onetl/file/file_uploader/file_uploader.py +++ b/onetl/file/file_uploader/file_uploader.py @@ -15,7 +15,7 @@ except (ImportError, AttributeError): from pydantic import PrivateAttr, validator # type: ignore[no-redef, assignment] -from onetl._internal import generate_temp_path +from onetl._util.file import generate_temp_path from onetl.base import BaseFileConnection from onetl.base.path_protocol import PathWithStatsProtocol from onetl.base.pure_path_protocol import PurePathProtocol diff --git a/onetl/file/format/csv.py b/onetl/file/format/csv.py index 353a8e987..1c4442fd5 100644 --- a/onetl/file/format/csv.py +++ b/onetl/file/format/csv.py @@ -10,8 +10,7 @@ except (ImportError, AttributeError): from pydantic import Field # type: ignore[no-redef, assignment] -from onetl._internal import stringify -from onetl._util.spark import get_spark_version +from onetl._util.spark import get_spark_version, stringify from onetl.file.format.file_format import ReadWriteFileFormat from onetl.hooks import slot, support_hooks diff --git a/onetl/file/format/json.py b/onetl/file/format/json.py index 698874424..085d125e4 100644 --- a/onetl/file/format/json.py +++ b/onetl/file/format/json.py @@ -6,7 +6,7 @@ from typing_extensions import Literal -from onetl._internal import stringify +from onetl._util.spark import stringify from onetl.file.format.file_format import ReadOnlyFileFormat from onetl.hooks import slot, support_hooks diff --git a/tests/tests_unit/test_internal_unit/test_generate_temp_path.py b/tests/tests_unit/test_internal_unit/test_generate_temp_path.py index faad170fe..0b8f98853 100644 --- a/tests/tests_unit/test_internal_unit/test_generate_temp_path.py +++ b/tests/tests_unit/test_internal_unit/test_generate_temp_path.py @@ -3,13 +3,14 @@ from pathlib import PurePath import pytest -from etl_entities.process import Process -from onetl._internal import generate_temp_path +from onetl._util.file import generate_temp_path @pytest.mark.flaky(reruns=5) def test_generate_temp_path(): + from etl_entities.process import Process + root = PurePath("/path") dt_prefix = datetime.now().strftime("%Y%m%d%H%M") # up to minutes, not seconds diff --git a/tests/tests_unit/tests_db_connection_unit/test_jdbc_options_unit.py b/tests/tests_unit/tests_db_connection_unit/test_jdbc_options_unit.py index 699838889..6d3ff1322 100644 --- a/tests/tests_unit/tests_db_connection_unit/test_jdbc_options_unit.py +++ b/tests/tests_unit/tests_db_connection_unit/test_jdbc_options_unit.py @@ -2,7 +2,6 @@ import pytest -from onetl._internal import to_camel from onetl.connection import MSSQL, Clickhouse, MySQL, Oracle, Postgres, Teradata from onetl.connection.db_connection.jdbc_connection import JDBCTableExistBehavior @@ -181,7 +180,8 @@ def test_jdbc_old_options_allowed_but_deprecated(arg, value): with pytest.warns(UserWarning, match=warning_msg): options = Postgres.Options.parse({arg: value}) - assert options.dict(by_alias=True)[to_camel(arg)] == value + parsed_value = options.dict().get(arg) or options.dict(by_alias=True).get(arg) + assert parsed_value == value @pytest.mark.parametrize(