diff --git a/.changes/unreleased/Features-20220423-231756.yaml b/.changes/unreleased/Features-20220423-231756.yaml new file mode 100644 index 00000000000..3fe8917ae58 --- /dev/null +++ b/.changes/unreleased/Features-20220423-231756.yaml @@ -0,0 +1,7 @@ +kind: Features +body: Add set and zip function to contexts +time: 2022-04-23T23:17:56.851793+12:00 +custom: + Author: jeremyyeo + Issue: "2345" + PR: "5107" diff --git a/core/dbt/context/base.py b/core/dbt/context/base.py index c2bb6417304..e6e14fcbc03 100644 --- a/core/dbt/context/base.py +++ b/core/dbt/context/base.py @@ -1,6 +1,6 @@ import json import os -from typing import Any, Dict, NoReturn, Optional, Mapping +from typing import Any, Dict, NoReturn, Optional, Mapping, Iterable, Set from dbt import flags from dbt import tracking @@ -8,8 +8,9 @@ from dbt.clients.yaml_helper import yaml, safe_load, SafeLoader, Loader, Dumper # noqa: F401 from dbt.contracts.graph.compiled import CompiledResource from dbt.exceptions import ( - raise_compiler_error, + CompilationException, MacroReturn, + raise_compiler_error, raise_parsing_error, disallow_secret_env_var, ) @@ -482,6 +483,94 @@ def toyaml( except (ValueError, yaml.YAMLError): return default + @contextmember("set") + @staticmethod + def _set(value: Iterable[Any], default: Any = None) -> Optional[Set[Any]]: + """The `set` context method can be used to convert any iterable + to a sequence of iterable elements that are unique (a set). + + :param value: The iterable + :param default: A default value to return if the `value` argument + is not an iterable + + Usage: + {% set my_list = [1, 2, 2, 3] %} + {% set my_set = set(my_list) %} + {% do log(my_set) %} {# {1, 2, 3} #} + """ + try: + return set(value) + except TypeError: + return default + + @contextmember + @staticmethod + def try_set(value: Iterable[Any]) -> Set[Any]: + """The `try_set` context method can be used to convert any iterable + to a sequence of iterable elements that are unique (a set). The + difference to the `set` context method is that the `try_set` method + will raise an exception on a TypeError. + + :param value: The iterable + :param default: A default value to return if the `value` argument + is not an iterable + + Usage: + {% set my_list = [1, 2, 2, 3] %} + {% set my_set = try_set(my_list) %} + {% do log(my_set) %} {# {1, 2, 3} #} + """ + try: + return set(value) + except TypeError as e: + raise CompilationException(e) + + @contextmember("zip") + @staticmethod + def _zip(*args: Iterable[Any], default: Any = None) -> Optional[Iterable[Any]]: + """The `try_zip` context method can be used to used to return + an iterator of tuples, where the i-th tuple contains the i-th + element from each of the argument iterables. + + :param *args: Any number of iterables + :param default: A default value to return if `*args` is not + iterable + + Usage: + {% set my_list_a = [1, 2] %} + {% set my_list_b = ['alice', 'bob'] %} + {% set my_zip = zip(my_list_a, my_list_b) | list %} + {% do log(my_set) %} {# [(1, 'alice'), (2, 'bob')] #} + """ + try: + return zip(*args) + except TypeError: + return default + + @contextmember + @staticmethod + def try_zip(*args: Iterable[Any]) -> Iterable[Any]: + """The `try_zip` context method can be used to used to return + an iterator of tuples, where the i-th tuple contains the i-th + element from each of the argument iterables. The difference to the + `zip` context method is that the `try_zip` method will raise an + exception on a TypeError. + + :param *args: Any number of iterables + :param default: A default value to return if `*args` is not + iterable + + Usage: + {% set my_list_a = [1, 2] %} + {% set my_list_b = ['alice', 'bob'] %} + {% set my_zip = try_zip(my_list_a, my_list_b) | list %} + {% do log(my_set) %} {# [(1, 'alice'), (2, 'bob')] #} + """ + try: + return zip(*args) + except TypeError as e: + raise CompilationException(e) + @contextmember @staticmethod def log(msg: str, info: bool = False) -> str: diff --git a/test/unit/test_context.py b/test/unit/test_context.py index ee4f6240310..8b8c62e1c80 100644 --- a/test/unit/test_context.py +++ b/test/unit/test_context.py @@ -11,95 +11,105 @@ from dbt.adapters.base import AdapterConfig from dbt.clients.jinja import MacroStack from dbt.contracts.graph.parsed import ( - ParsedModelNode, NodeConfig, DependsOn, ParsedMacro + ParsedModelNode, + NodeConfig, + DependsOn, + ParsedMacro, ) from dbt.config.project import VarProvider from dbt.context import base, target, configured, providers, docs, manifest, macros from dbt.contracts.files import FileHash from dbt.node_types import NodeType import dbt.exceptions -from .utils import profile_from_dict, config_from_parts_or_dicts, inject_adapter, clear_plugin +from .utils import ( + profile_from_dict, + config_from_parts_or_dicts, + inject_adapter, + clear_plugin, +) from .mock_adapter import adapter_factory class TestVar(unittest.TestCase): def setUp(self): self.model = ParsedModelNode( - alias='model_one', - name='model_one', - database='dbt', - schema='analytics', + alias="model_one", + name="model_one", + database="dbt", + schema="analytics", resource_type=NodeType.Model, - unique_id='model.root.model_one', - fqn=['root', 'model_one'], - package_name='root', - original_file_path='model_one.sql', - root_path='/usr/src/app', + unique_id="model.root.model_one", + fqn=["root", "model_one"], + package_name="root", + original_file_path="model_one.sql", + root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), - config=NodeConfig.from_dict({ - 'enabled': True, - 'materialized': 'view', - 'persist_docs': {}, - 'post-hook': [], - 'pre-hook': [], - 'vars': {}, - 'quoting': {}, - 'column_types': {}, - 'tags': [], - }), + config=NodeConfig.from_dict( + { + "enabled": True, + "materialized": "view", + "persist_docs": {}, + "post-hook": [], + "pre-hook": [], + "vars": {}, + "quoting": {}, + "column_types": {}, + "tags": [], + } + ), tags=[], - path='model_one.sql', - raw_sql='', - description='', + path="model_one.sql", + raw_sql="", + description="", columns={}, - checksum=FileHash.from_contents(''), + checksum=FileHash.from_contents(""), ) self.context = mock.MagicMock() self.provider = VarProvider({}) self.config = mock.MagicMock( - config_version=2, vars=self.provider, cli_vars={}, project_name='root' + config_version=2, vars=self.provider, cli_vars={}, project_name="root" ) def test_var_default_something(self): - self.config.cli_vars = {'foo': 'baz'} + self.config.cli_vars = {"foo": "baz"} var = providers.RuntimeVar(self.context, self.config, self.model) - self.assertEqual(var('foo'), 'baz') - self.assertEqual(var('foo', 'bar'), 'baz') + self.assertEqual(var("foo"), "baz") + self.assertEqual(var("foo", "bar"), "baz") def test_var_default_none(self): - self.config.cli_vars = {'foo': None} + self.config.cli_vars = {"foo": None} var = providers.RuntimeVar(self.context, self.config, self.model) - self.assertEqual(var('foo'), None) - self.assertEqual(var('foo', 'bar'), None) + self.assertEqual(var("foo"), None) + self.assertEqual(var("foo", "bar"), None) def test_var_not_defined(self): var = providers.RuntimeVar(self.context, self.config, self.model) - self.assertEqual(var('foo', 'bar'), 'bar') + self.assertEqual(var("foo", "bar"), "bar") with self.assertRaises(dbt.exceptions.CompilationException): - var('foo') + var("foo") def test_parser_var_default_something(self): - self.config.cli_vars = {'foo': 'baz'} + self.config.cli_vars = {"foo": "baz"} var = providers.ParseVar(self.context, self.config, self.model) - self.assertEqual(var('foo'), 'baz') - self.assertEqual(var('foo', 'bar'), 'baz') + self.assertEqual(var("foo"), "baz") + self.assertEqual(var("foo", "bar"), "baz") def test_parser_var_default_none(self): - self.config.cli_vars = {'foo': None} + self.config.cli_vars = {"foo": None} var = providers.ParseVar(self.context, self.config, self.model) - self.assertEqual(var('foo'), None) - self.assertEqual(var('foo', 'bar'), None) + self.assertEqual(var("foo"), None) + self.assertEqual(var("foo", "bar"), None) def test_parser_var_not_defined(self): # at parse-time, we should not raise if we encounter a missing var # that way disabled models don't get parse errors var = providers.ParseVar(self.context, self.config, self.model) - self.assertEqual(var('foo', 'bar'), 'bar') - self.assertEqual(var('foo'), None) + self.assertEqual(var("foo", "bar"), "bar") + self.assertEqual(var("foo"), None) class TestParseWrapper(unittest.TestCase): @@ -108,16 +118,15 @@ def setUp(self): adapter_class = adapter_factory() self.mock_adapter = adapter_class(self.mock_config) self.namespace = mock.MagicMock() - self.wrapper = providers.ParseDatabaseWrapper( - self.mock_adapter, self.namespace) + self.wrapper = providers.ParseDatabaseWrapper(self.mock_adapter, self.namespace) self.responder = self.mock_adapter.responder def test_unwrapped_method(self): - self.assertEqual(self.wrapper.quote('test_value'), '"test_value"') - self.responder.quote.assert_called_once_with('test_value') + self.assertEqual(self.wrapper.quote("test_value"), '"test_value"') + self.responder.quote.assert_called_once_with("test_value") def test_wrapped_method(self): - found = self.wrapper.get_relation('database', 'schema', 'identifier') + found = self.wrapper.get_relation("database", "schema", "identifier") self.assertEqual(found, None) self.responder.get_relation.assert_not_called() @@ -126,163 +135,169 @@ class TestRuntimeWrapper(unittest.TestCase): def setUp(self): self.mock_config = mock.MagicMock() self.mock_config.quoting = { - 'database': True, 'schema': True, 'identifier': True} + "database": True, + "schema": True, + "identifier": True, + } adapter_class = adapter_factory() self.mock_adapter = adapter_class(self.mock_config) self.namespace = mock.MagicMock() - self.wrapper = providers.RuntimeDatabaseWrapper( - self.mock_adapter, self.namespace) + self.wrapper = providers.RuntimeDatabaseWrapper(self.mock_adapter, self.namespace) self.responder = self.mock_adapter.responder def test_unwrapped_method(self): # the 'quote' method isn't wrapped, we should get our expected inputs - self.assertEqual(self.wrapper.quote('test_value'), '"test_value"') - self.responder.quote.assert_called_once_with('test_value') + self.assertEqual(self.wrapper.quote("test_value"), '"test_value"') + self.responder.quote.assert_called_once_with("test_value") def test_wrapped_method(self): rel = mock.MagicMock() rel.matches.return_value = True self.responder.list_relations_without_caching.return_value = [rel] - found = self.wrapper.get_relation('database', 'schema', 'identifier') + found = self.wrapper.get_relation("database", "schema", "identifier") self.assertEqual(found, rel) - self.responder.list_relations_without_caching.assert_called_once_with( - mock.ANY) + self.responder.list_relations_without_caching.assert_called_once_with(mock.ANY) # extract the argument assert len(self.responder.list_relations_without_caching.mock_calls) == 1 - assert len( - self.responder.list_relations_without_caching.call_args[0]) == 1 + assert len(self.responder.list_relations_without_caching.call_args[0]) == 1 arg = self.responder.list_relations_without_caching.call_args[0][0] - assert arg.database == 'database' - assert arg.schema == 'schema' + assert arg.database == "database" + assert arg.schema == "schema" -def assert_has_keys( - required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str, Any] -): +def assert_has_keys(required_keys: Set[str], maybe_keys: Set[str], ctx: Dict[str, Any]): keys = set(ctx) for key in required_keys: - assert key in keys, f'{key} in required keys but not in context' + assert key in keys, f"{key} in required keys but not in context" keys.remove(key) extras = keys.difference(maybe_keys) - assert not extras, f'got extra keys in context: {extras}' - - -REQUIRED_BASE_KEYS = frozenset({ - 'context', - 'builtins', - 'dbt_version', - 'var', - 'env_var', - 'return', - 'fromjson', - 'tojson', - 'fromyaml', - 'toyaml', - 'log', - 'run_started_at', - 'invocation_id', - 'modules', - 'flags', - 'print', -}) - -REQUIRED_TARGET_KEYS = REQUIRED_BASE_KEYS | {'target'} -REQUIRED_DOCS_KEYS = REQUIRED_TARGET_KEYS | {'project_name'} | {'doc'} -MACROS = frozenset({'macro_a', 'macro_b', 'root', 'dbt'}) -REQUIRED_QUERY_HEADER_KEYS = REQUIRED_TARGET_KEYS | {'project_name'} | MACROS + assert not extras, f"got extra keys in context: {extras}" + + +REQUIRED_BASE_KEYS = frozenset( + { + "context", + "builtins", + "dbt_version", + "var", + "env_var", + "return", + "fromjson", + "tojson", + "fromyaml", + "toyaml", + "set", + "try_set", + "zip", + "try_zip", + "log", + "run_started_at", + "invocation_id", + "modules", + "flags", + "print", + } +) + +REQUIRED_TARGET_KEYS = REQUIRED_BASE_KEYS | {"target"} +REQUIRED_DOCS_KEYS = REQUIRED_TARGET_KEYS | {"project_name"} | {"doc"} +MACROS = frozenset({"macro_a", "macro_b", "root", "dbt"}) +REQUIRED_QUERY_HEADER_KEYS = REQUIRED_TARGET_KEYS | {"project_name"} | MACROS REQUIRED_MACRO_KEYS = REQUIRED_QUERY_HEADER_KEYS | { - '_sql_results', - 'load_result', - 'store_result', - 'store_raw_result', - 'validation', - 'write', - 'render', - 'try_or_compiler_error', - 'load_agate_table', - 'ref', - 'source', - 'config', - 'execute', - 'exceptions', - 'database', - 'schema', - 'adapter', - 'api', - 'column', - 'env', - 'graph', - 'model', - 'pre_hooks', - 'post_hooks', - 'sql', - 'sql_now', - 'adapter_macro', - 'selected_resources' + "_sql_results", + "load_result", + "store_result", + "store_raw_result", + "validation", + "write", + "render", + "try_or_compiler_error", + "load_agate_table", + "ref", + "source", + "config", + "execute", + "exceptions", + "database", + "schema", + "adapter", + "api", + "column", + "env", + "graph", + "model", + "pre_hooks", + "post_hooks", + "sql", + "sql_now", + "adapter_macro", + "selected_resources", } -REQUIRED_MODEL_KEYS = REQUIRED_MACRO_KEYS | {'this'} -MAYBE_KEYS = frozenset({'debug'}) +REQUIRED_MODEL_KEYS = REQUIRED_MACRO_KEYS | {"this"} +MAYBE_KEYS = frozenset({"debug"}) POSTGRES_PROFILE_DATA = { - 'target': 'test', - 'quoting': {}, - 'outputs': { - 'test': { - 'type': 'postgres', - 'host': 'localhost', - 'schema': 'analytics', - 'user': 'test', - 'pass': 'test', - 'dbname': 'test', - 'port': 1, + "target": "test", + "quoting": {}, + "outputs": { + "test": { + "type": "postgres", + "host": "localhost", + "schema": "analytics", + "user": "test", + "pass": "test", + "dbname": "test", + "port": 1, } }, } PROJECT_DATA = { - 'name': 'root', - 'version': '0.1', - 'profile': 'test', - 'project-root': os.getcwd(), - 'config-version': 2, + "name": "root", + "version": "0.1", + "profile": "test", + "project-root": os.getcwd(), + "config-version": 2, } def model(): return ParsedModelNode( - alias='model_one', - name='model_one', - database='dbt', - schema='analytics', + alias="model_one", + name="model_one", + database="dbt", + schema="analytics", resource_type=NodeType.Model, - unique_id='model.root.model_one', - fqn=['root', 'model_one'], - package_name='root', - original_file_path='model_one.sql', - root_path='/usr/src/app', + unique_id="model.root.model_one", + fqn=["root", "model_one"], + package_name="root", + original_file_path="model_one.sql", + root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), - config=NodeConfig.from_dict({ - 'enabled': True, - 'materialized': 'view', - 'persist_docs': {}, - 'post-hook': [], - 'pre-hook': [], - 'vars': {}, - 'quoting': {}, - 'column_types': {}, - 'tags': [], - }), + config=NodeConfig.from_dict( + { + "enabled": True, + "materialized": "view", + "persist_docs": {}, + "post-hook": [], + "pre-hook": [], + "vars": {}, + "quoting": {}, + "column_types": {}, + "tags": [], + } + ), tags=[], - path='model_one.sql', - raw_sql='', - description='', - columns={} + path="model_one.sql", + raw_sql="", + description="", + columns={}, ) @@ -295,8 +310,8 @@ def mock_macro(name, package_name): macro = mock.MagicMock( __class__=ParsedMacro, package_name=package_name, - resource_type='macro', - unique_id=f'macro.{package_name}.{name}', + resource_type="macro", + unique_id=f"macro.{package_name}.{name}", ) # Mock(name=...) does not set the `name` attribute, this does. macro.name = name @@ -305,7 +320,7 @@ def mock_macro(name, package_name): def mock_manifest(config): manifest_macros = {} - for name in ['macro_a', 'macro_b']: + for name in ["macro_a", "macro_b"]: macro = mock_macro(name, config.project_name) manifest_macros[macro.unique_id] = macro return mock.MagicMock(macros=manifest_macros) @@ -314,47 +329,49 @@ def mock_manifest(config): def mock_model(): return mock.MagicMock( __class__=ParsedModelNode, - alias='model_one', - name='model_one', - database='dbt', - schema='analytics', + alias="model_one", + name="model_one", + database="dbt", + schema="analytics", resource_type=NodeType.Model, - unique_id='model.root.model_one', - fqn=['root', 'model_one'], - package_name='root', - original_file_path='model_one.sql', - root_path='/usr/src/app', + unique_id="model.root.model_one", + fqn=["root", "model_one"], + package_name="root", + original_file_path="model_one.sql", + root_path="/usr/src/app", refs=[], sources=[], depends_on=DependsOn(), - config=NodeConfig.from_dict({ - 'enabled': True, - 'materialized': 'view', - 'persist_docs': {}, - 'post-hook': [], - 'pre-hook': [], - 'vars': {}, - 'quoting': {}, - 'column_types': {}, - 'tags': [], - }), + config=NodeConfig.from_dict( + { + "enabled": True, + "materialized": "view", + "persist_docs": {}, + "post-hook": [], + "pre-hook": [], + "vars": {}, + "quoting": {}, + "column_types": {}, + "tags": [], + } + ), tags=[], - path='model_one.sql', - raw_sql='', - description='', + path="model_one.sql", + raw_sql="", + description="", columns={}, ) @pytest.fixture def get_adapter(): - with mock.patch.object(providers, 'get_adapter') as patch: + with mock.patch.object(providers, "get_adapter") as patch: yield patch @pytest.fixture def get_include_paths(): - with mock.patch.object(factory, 'get_include_paths') as patch: + with mock.patch.object(factory, "get_include_paths") as patch: patch.return_value = [] yield patch @@ -388,10 +405,10 @@ def test_query_header_context(config_postgres, manifest_fx): def test_macro_runtime_context(config_postgres, manifest_fx, get_adapter, get_include_paths): ctx = providers.generate_runtime_macro_context( - macro=manifest_fx.macros['macro.root.macro_a'], + macro=manifest_fx.macros["macro.root.macro_a"], config=config_postgres, manifest=manifest_fx, - package_name='root', + package_name="root", ) assert_has_keys(REQUIRED_MACRO_KEYS, MAYBE_KEYS, ctx) @@ -416,52 +433,47 @@ def test_model_runtime_context(config_postgres, manifest_fx, get_adapter, get_in def test_docs_runtime_context(config_postgres): - ctx = docs.generate_runtime_docs_context(config_postgres, mock_model(), [], 'root') + ctx = docs.generate_runtime_docs_context(config_postgres, mock_model(), [], "root") assert_has_keys(REQUIRED_DOCS_KEYS, MAYBE_KEYS, ctx) def test_macro_namespace_duplicates(config_postgres, manifest_fx): - mn = macros.MacroNamespaceBuilder( - 'root', 'search', MacroStack(), ['dbt_postgres', 'dbt'] - ) + mn = macros.MacroNamespaceBuilder("root", "search", MacroStack(), ["dbt_postgres", "dbt"]) mn.add_macros(manifest_fx.macros.values(), {}) # same pkg, same name: error with pytest.raises(dbt.exceptions.CompilationException): - mn.add_macro(mock_macro('macro_a', 'root'), {}) + mn.add_macro(mock_macro("macro_a", "root"), {}) # different pkg, same name: no error - mn.add_macros(mock_macro('macro_a', 'dbt'), {}) + mn.add_macros(mock_macro("macro_a", "dbt"), {}) def test_macro_namespace(config_postgres, manifest_fx): - mn = macros.MacroNamespaceBuilder( - 'root', 'search', MacroStack(), ['dbt_postgres', 'dbt']) + mn = macros.MacroNamespaceBuilder("root", "search", MacroStack(), ["dbt_postgres", "dbt"]) - dbt_macro = mock_macro('some_macro', 'dbt') + dbt_macro = mock_macro("some_macro", "dbt") # same namespace, same name, different pkg! - pg_macro = mock_macro('some_macro', 'dbt_postgres') + pg_macro = mock_macro("some_macro", "dbt_postgres") # same name, different package - package_macro = mock_macro('some_macro', 'root') + package_macro = mock_macro("some_macro", "root") - all_macros = itertools.chain(manifest_fx.macros.values(), [ - dbt_macro, pg_macro, package_macro]) + all_macros = itertools.chain(manifest_fx.macros.values(), [dbt_macro, pg_macro, package_macro]) namespace = mn.build_namespace(all_macros, {}) dct = dict(namespace) for result in [dct, namespace]: - assert 'dbt' in result - assert 'root' in result - assert 'some_macro' in result - assert 'dbt_postgres' not in result + assert "dbt" in result + assert "root" in result + assert "some_macro" in result + assert "dbt_postgres" not in result # tests __len__ assert len(result) == 5 # tests __iter__ - assert set(result) == {'dbt', 'root', - 'some_macro', 'macro_a', 'macro_b'} - assert len(result['dbt']) == 1 + assert set(result) == {"dbt", "root", "some_macro", "macro_a", "macro_b"} + assert len(result["dbt"]) == 1 # from the regular manifest + some_macro - assert len(result['root']) == 3 - assert result['dbt']['some_macro'].macro is pg_macro - assert result['root']['some_macro'].macro is package_macro - assert result['some_macro'].macro is package_macro + assert len(result["root"]) == 3 + assert result["dbt"]["some_macro"].macro is pg_macro + assert result["root"]["some_macro"].macro is package_macro + assert result["some_macro"].macro is package_macro diff --git a/tests/functional/context_methods/test_builtin_functions.py b/tests/functional/context_methods/test_builtin_functions.py new file mode 100644 index 00000000000..b59807093ea --- /dev/null +++ b/tests/functional/context_methods/test_builtin_functions.py @@ -0,0 +1,67 @@ +import pytest + +from dbt.tests.util import run_dbt, run_dbt_and_capture, write_file +from dbt.exceptions import CompilationException + +macros__validate_set_sql = """ +{% macro validate_set() %} + {% set set_result = set([1, 2, 2, 3, 'foo', False]) %} + {{ log("set_result: " ~ set_result) }} + {% set try_set_result = try_set([1, 2, 2, 3, 'foo', False]) %} + {{ log("try_set_result: " ~ try_set_result) }} +{% endmacro %} +""" + +macros__validate_zip_sql = """ +{% macro validate_zip() %} + {% set list_a = [1, 2] %} + {% set list_b = ['foo', 'bar'] %} + {% set zip_result = zip(list_a, list_b) | list %} + {{ log("zip_result: " ~ zip_result) }} + {% set try_zip_result = try_zip(list_a, list_b) | list %} + {{ log("try_zip_result: " ~ try_zip_result) }} +{% endmacro %} +""" + +models__set_exception_sql = """ +{% set try_set_result = try_set(1) %} +""" + +models__zip_exception_sql = """ +{% set try_set_result = try_zip(1) %} +""" + + +class TestContextBuiltins: + @pytest.fixture(scope="class") + def macros(self): + return { + "validate_set.sql": macros__validate_set_sql, + "validate_zip.sql": macros__validate_zip_sql, + } + + def test_builtin_set_function(self, project): + _, log_output = run_dbt_and_capture(["--debug", "run-operation", "validate_set"]) + + expected_set = {False, 1, 2, 3, "foo"} + assert f"set_result: {expected_set}" in log_output + assert f"try_set_result: {expected_set}" in log_output + + def test_builtin_zip_function(self, project): + _, log_output = run_dbt_and_capture(["--debug", "run-operation", "validate_zip"]) + + expected_zip = [(1, "foo"), (2, "bar")] + assert f"zip_result: {expected_zip}" in log_output + assert f"try_zip_result: {expected_zip}" in log_output + + +class TestContextBuiltinExceptions: + # Assert compilation errors are raised with try_ equivalents + def test_builtin_function_exception(self, project): + write_file(models__set_exception_sql, project.project_root, "models", "raise.sql") + with pytest.raises(CompilationException): + run_dbt(["compile"]) + + write_file(models__zip_exception_sql, project.project_root, "models", "raise.sql") + with pytest.raises(CompilationException): + run_dbt(["compile"])