diff --git a/splitio/engine/evaluator.py b/splitio/engine/evaluator.py index d3e05f78..5cbbd205 100644 --- a/splitio/engine/evaluator.py +++ b/splitio/engine/evaluator.py @@ -7,6 +7,7 @@ from splitio.models.grammar.matchers.misc import DependencyMatcher from splitio.models.grammar.matchers.keys import UserDefinedSegmentMatcher from splitio.models.grammar.matchers import RuleBasedSegmentMatcher +from splitio.models.grammar.matchers.prerequisites import PrerequisitesMatcher from splitio.models.rule_based_segments import SegmentType from splitio.optional.loaders import asyncio @@ -56,12 +57,22 @@ def eval_with_context(self, key, bucketing, feature_name, attrs, ctx): label = Label.KILLED _treatment = feature.default_treatment else: - treatment, label = self._treatment_for_flag(feature, key, bucketing, attrs, ctx) - if treatment is None: - label = Label.NO_CONDITION_MATCHED - _treatment = feature.default_treatment - else: - _treatment = treatment + if feature.prerequisites is not None: + prerequisites_matcher = PrerequisitesMatcher(feature.prerequisites) + if not prerequisites_matcher.match(key, attrs, { + 'evaluator': self, + 'bucketing_key': bucketing, + 'ec': ctx}): + label = Label.PREREQUISITES_NOT_MET + _treatment = feature.default_treatment + + if _treatment == CONTROL: + treatment, label = self._treatment_for_flag(feature, key, bucketing, attrs, ctx) + if treatment is None: + label = Label.NO_CONDITION_MATCHED + _treatment = feature.default_treatment + else: + _treatment = treatment return { 'treatment': _treatment, @@ -133,7 +144,6 @@ def context_for(self, key, feature_names): rb_segments ) - class AsyncEvaluationDataFactory: def __init__(self, split_storage, segment_storage, rbs_segment_storage): @@ -199,6 +209,7 @@ def get_pending_objects(features, splits, rbsegments, rb_segments, pending_membe pending_rbs = set() for feature in features.values(): cf, cs, crbs = get_dependencies(feature) + cf.extend(get_prerequisites(feature)) pending.update(filter(lambda f: f not in splits, cf)) pending_memberships.update(cs) pending_rbs.update(filter(lambda f: f not in rb_segments, crbs)) @@ -223,4 +234,6 @@ def update_objects(fetched, fetched_rbs, splits, rb_segments): rb_segments.update(rbsegments) return features, rbsegments, splits, rb_segments - \ No newline at end of file + +def get_prerequisites(feature): + return [prerequisite.feature_flag_name for prerequisite in feature.prerequisites] diff --git a/splitio/models/grammar/matchers/prerequisites.py b/splitio/models/grammar/matchers/prerequisites.py new file mode 100644 index 00000000..799df5c4 --- /dev/null +++ b/splitio/models/grammar/matchers/prerequisites.py @@ -0,0 +1,38 @@ +"""Prerequisites matcher classes.""" + +class PrerequisitesMatcher(object): + + def __init__(self, prerequisites): + """ + Build a PrerequisitesMatcher. + + :param prerequisites: prerequisites + :type raw_matcher: List of Prerequisites + """ + self._prerequisites = prerequisites + + def match(self, key, attributes=None, context=None): + """ + Evaluate user input against a matcher and return whether the match is successful. + + :param key: User key. + :type key: str. + :param attributes: Custom user attributes. + :type attributes: dict. + :param context: Evaluation context + :type context: dict + + :returns: Wheter the match is successful. + :rtype: bool + """ + if self._prerequisites == None: + return True + + evaluator = context.get('evaluator') + bucketing_key = context.get('bucketing_key') + for prerequisite in self._prerequisites: + result = evaluator.eval_with_context(key, bucketing_key, prerequisite.feature_flag_name, attributes, context['ec']) + if result['treatment'] not in prerequisite.treatments: + return False + + return True \ No newline at end of file diff --git a/splitio/models/impressions.py b/splitio/models/impressions.py index 9bdfb3a9..9224d15b 100644 --- a/splitio/models/impressions.py +++ b/splitio/models/impressions.py @@ -60,3 +60,8 @@ class Label(object): # pylint: disable=too-few-public-methods # Treatment: control # Label: not ready NOT_READY = 'not ready' + + # Condition: Prerequisites not met + # Treatment: Default treatment + # Label: prerequisites not met + PREREQUISITES_NOT_MET = "prerequisites not met" diff --git a/splitio/models/splits.py b/splitio/models/splits.py index 92a277c4..47e69284 100644 --- a/splitio/models/splits.py +++ b/splitio/models/splits.py @@ -10,7 +10,7 @@ SplitView = namedtuple( 'SplitView', - ['name', 'traffic_type', 'killed', 'treatments', 'change_number', 'configs', 'default_treatment', 'sets', 'impressions_disabled'] + ['name', 'traffic_type', 'killed', 'treatments', 'change_number', 'configs', 'default_treatment', 'sets', 'impressions_disabled', 'prerequisites'] ) _DEFAULT_CONDITIONS_TEMPLATE = { @@ -40,7 +40,28 @@ "label": "targeting rule type unsupported by sdk" } +class Prerequisites(object): + """Prerequisites.""" + def __init__(self, feature_flag_name, treatments): + self._feature_flag_name = feature_flag_name + self._treatments = treatments + + @property + def feature_flag_name(self): + """Return featur eflag name.""" + return self._feature_flag_name + @property + def treatments(self): + """Return treatments.""" + return self._treatments + + def to_json(self): + to_return = [] + for feature_flag_name in self._feature_flag_name: + to_return.append({"n": feature_flag_name, "ts": [treatment for treatment in self._treatments]}) + + return to_return class Status(Enum): """Split status.""" @@ -74,7 +95,8 @@ def __init__( # pylint: disable=too-many-arguments traffic_allocation_seed=None, configurations=None, sets=None, - impressions_disabled=None + impressions_disabled=None, + prerequisites = None ): """ Class constructor. @@ -99,6 +121,8 @@ def __init__( # pylint: disable=too-many-arguments :type sets: list :pram impressions_disabled: track impressions flag :type impressions_disabled: boolean + :pram prerequisites: prerequisites + :type prerequisites: List of Preqreuisites """ self._name = name self._seed = seed @@ -129,6 +153,7 @@ def __init__( # pylint: disable=too-many-arguments self._configurations = configurations self._sets = set(sets) if sets is not None else set() self._impressions_disabled = impressions_disabled if impressions_disabled is not None else False + self._prerequisites = prerequisites if prerequisites is not None else [] @property def name(self): @@ -194,6 +219,11 @@ def sets(self): def impressions_disabled(self): """Return impressions_disabled of the split.""" return self._impressions_disabled + + @property + def prerequisites(self): + """Return prerequisites of the split.""" + return self._prerequisites def get_configurations_for(self, treatment): """Return the mapping of treatments to configurations.""" @@ -224,7 +254,8 @@ def to_json(self): 'conditions': [c.to_json() for c in self.conditions], 'configurations': self._configurations, 'sets': list(self._sets), - 'impressionsDisabled': self._impressions_disabled + 'impressionsDisabled': self._impressions_disabled, + 'prerequisites': [prerequisite.to_json() for prerequisite in self._prerequisites] } def to_split_view(self): @@ -243,7 +274,8 @@ def to_split_view(self): self._configurations if self._configurations is not None else {}, self._default_treatment, list(self._sets) if self._sets is not None else [], - self._impressions_disabled + self._impressions_disabled, + self._prerequisites ) def local_kill(self, default_treatment, change_number): @@ -300,5 +332,13 @@ def from_raw(raw_split): traffic_allocation_seed=raw_split.get('trafficAllocationSeed'), configurations=raw_split.get('configurations'), sets=set(raw_split.get('sets')) if raw_split.get('sets') is not None else [], - impressions_disabled=raw_split.get('impressionsDisabled') if raw_split.get('impressionsDisabled') is not None else False + impressions_disabled=raw_split.get('impressionsDisabled') if raw_split.get('impressionsDisabled') is not None else False, + prerequisites=from_raw_prerequisites(raw_split.get('prerequisites')) if raw_split.get('prerequisites') is not None else [] ) + +def from_raw_prerequisites(raw_prerequisites): + to_return = [] + for prerequisite in raw_prerequisites: + to_return.append(Prerequisites(prerequisite['n'], prerequisite['ts'])) + + return to_return \ No newline at end of file diff --git a/splitio/sync/split.py b/splitio/sync/split.py index 1d1722f6..e5d1f645 100644 --- a/splitio/sync/split.py +++ b/splitio/sync/split.py @@ -433,7 +433,8 @@ def _make_feature_flag(feature_flag_name, conditions, configs=None): 'defaultTreatment': 'control', 'algo': 2, 'conditions': conditions, - 'configurations': configs + 'configurations': configs, + 'prerequisites': [] }) @staticmethod @@ -542,6 +543,8 @@ def _sanitize_feature_flag_elements(self, parsed_feature_flags): if 'sets' not in feature_flag: feature_flag['sets'] = [] feature_flag['sets'] = validate_flag_sets(feature_flag['sets'], 'Localhost Validator') + if 'prerequisites' not in feature_flag: + feature_flag['prerequisites'] = [] sanitized_feature_flags.append(feature_flag) return sanitized_feature_flags @@ -560,6 +563,7 @@ def _sanitize_rb_segment_elements(self, parsed_rb_segments): if 'name' not in rb_segment or rb_segment['name'].strip() == '': _LOGGER.warning("A rule based segment in json file does not have (Name) or property is empty, skipping.") continue + for element in [('trafficTypeName', 'user', None, None, None, None), ('status', splits.Status.ACTIVE.value, None, None, [e.value for e in splits.Status], None), ('changeNumber', 0, 0, None, None, None)]: diff --git a/tests/client/test_input_validator.py b/tests/client/test_input_validator.py index 2f15d038..0659ee43 100644 --- a/tests/client/test_input_validator.py +++ b/tests/client/test_input_validator.py @@ -28,6 +28,7 @@ def test_get_treatment(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=SplitStorage) storage_mock.fetch_many.return_value = {'some_feature': split_mock} rbs_storage = mocker.Mock(spec=InMemoryRuleBasedSegmentStorage) @@ -264,6 +265,7 @@ def test_get_treatment_with_config(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] def _configs(treatment): return '{"some": "property"}' if treatment == 'default_treatment' else None @@ -819,6 +821,8 @@ def test_get_treatments(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] + storage_mock = mocker.Mock(spec=SplitStorage) storage_mock.fetch_many.return_value = { 'some_feature': split_mock @@ -965,6 +969,7 @@ def test_get_treatments_with_config(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=SplitStorage) storage_mock.fetch_many.return_value = { @@ -1113,6 +1118,7 @@ def test_get_treatments_by_flag_set(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=InMemorySplitStorage) storage_mock.fetch_many.return_value = { 'some_feature': split_mock @@ -1231,6 +1237,7 @@ def test_get_treatments_by_flag_sets(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=InMemorySplitStorage) storage_mock.fetch_many.return_value = { 'some_feature': split_mock @@ -1358,6 +1365,7 @@ def _configs(treatment): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=InMemorySplitStorage) storage_mock.fetch_many.return_value = { 'some_feature': split_mock @@ -1481,6 +1489,7 @@ def _configs(treatment): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=InMemorySplitStorage) storage_mock.fetch_many.return_value = { 'some_feature': split_mock @@ -1632,6 +1641,7 @@ async def test_get_treatment(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=SplitStorage) async def fetch_many(*_): return { @@ -1889,6 +1899,7 @@ async def test_get_treatment_with_config(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] def _configs(treatment): return '{"some": "property"}' if treatment == 'default_treatment' else None @@ -2423,6 +2434,7 @@ async def test_get_treatments(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=SplitStorage) async def get(*_): return split_mock @@ -2586,6 +2598,7 @@ async def test_get_treatments_with_config(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=SplitStorage) async def get(*_): @@ -2749,6 +2762,7 @@ async def test_get_treatments_by_flag_set(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=SplitStorage) async def get(*_): return split_mock @@ -2893,6 +2907,7 @@ async def test_get_treatments_by_flag_sets(self, mocker): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=SplitStorage) async def get(*_): return split_mock @@ -3048,6 +3063,7 @@ def _configs(treatment): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=SplitStorage) async def get(*_): return split_mock @@ -3195,6 +3211,7 @@ def _configs(treatment): conditions_mock = mocker.PropertyMock() conditions_mock.return_value = [] type(split_mock).conditions = conditions_mock + type(split_mock).prerequisites = [] storage_mock = mocker.Mock(spec=SplitStorage) async def get(*_): return split_mock diff --git a/tests/engine/test_evaluator.py b/tests/engine/test_evaluator.py index a2937126..3ec7e136 100644 --- a/tests/engine/test_evaluator.py +++ b/tests/engine/test_evaluator.py @@ -5,7 +5,7 @@ import pytest import copy -from splitio.models.splits import Split, Status +from splitio.models.splits import Split, Status, from_raw, Prerequisites from splitio.models import segments from splitio.models.grammar.condition import Condition, ConditionType from splitio.models.impressions import Label @@ -127,6 +127,7 @@ def test_evaluate_treatment_killed_split(self, mocker): mocked_split.killed = True mocked_split.change_number = 123 mocked_split.get_configurations_for.return_value = '{"some_property": 123}' + mocked_split.prerequisites = [] ctx = EvaluationContext(flags={'some': mocked_split}, segment_memberships=set(), rbs_segments={}) result = e.eval_with_context('some_key', 'some_bucketing_key', 'some', {}, ctx) @@ -146,6 +147,8 @@ def test_evaluate_treatment_ok(self, mocker): mocked_split.killed = False mocked_split.change_number = 123 mocked_split.get_configurations_for.return_value = '{"some_property": 123}' + mocked_split.prerequisites = [] + ctx = EvaluationContext(flags={'some': mocked_split}, segment_memberships=set(), rbs_segments={}) result = e.eval_with_context('some_key', 'some_bucketing_key', 'some', {}, ctx) assert result['treatment'] == 'on' @@ -165,6 +168,8 @@ def test_evaluate_treatment_ok_no_config(self, mocker): mocked_split.killed = False mocked_split.change_number = 123 mocked_split.get_configurations_for.return_value = None + mocked_split.prerequisites = [] + ctx = EvaluationContext(flags={'some': mocked_split}, segment_memberships=set(), rbs_segments={}) result = e.eval_with_context('some_key', 'some_bucketing_key', 'some', {}, ctx) assert result['treatment'] == 'on' @@ -184,6 +189,7 @@ def test_evaluate_treatments(self, mocker): mocked_split.killed = False mocked_split.change_number = 123 mocked_split.get_configurations_for.return_value = '{"some_property": 123}' + mocked_split.prerequisites = [] mocked_split2 = mocker.Mock(spec=Split) mocked_split2.name = 'feature4' @@ -191,6 +197,7 @@ def test_evaluate_treatments(self, mocker): mocked_split2.killed = False mocked_split2.change_number = 123 mocked_split2.get_configurations_for.return_value = None + mocked_split2.prerequisites = [] ctx = EvaluationContext(flags={'feature2': mocked_split, 'feature4': mocked_split2}, segment_memberships=set(), rbs_segments={}) results = e.eval_many_with_context('some_key', 'some_bucketing_key', ['feature2', 'feature4'], {}, ctx) @@ -215,6 +222,8 @@ def test_get_gtreatment_for_split_no_condition_matches(self, mocker): mocked_split.change_number = '123' mocked_split.conditions = [] mocked_split.get_configurations_for = None + mocked_split.prerequisites = [] + ctx = EvaluationContext(flags={'some': mocked_split}, segment_memberships=set(), rbs_segments={}) assert e._treatment_for_flag(mocked_split, 'some_key', 'some_bucketing', {}, ctx) == ( 'off', @@ -232,6 +241,8 @@ def test_get_gtreatment_for_split_non_rollout(self, mocker): mocked_split = mocker.Mock(spec=Split) mocked_split.killed = False mocked_split.conditions = [mocked_condition_1] + mocked_split.prerequisites = [] + treatment, label = e._treatment_for_flag(mocked_split, 'some_key', 'some_bucketing', {}, EvaluationContext(None, None, None)) assert treatment == 'on' assert label == 'some_label' @@ -240,7 +251,7 @@ def test_evaluate_treatment_with_rule_based_segment(self, mocker): """Test that a non-killed split returns the appropriate treatment.""" e = evaluator.Evaluator(splitters.Splitter()) - mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False) + mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False, []) ctx = EvaluationContext(flags={'some': mocked_split}, segment_memberships=set(), rbs_segments={'sample_rule_based_segment': rule_based_segments.from_raw(rbs_raw)}) result = e.eval_with_context('bilal@split.io', 'bilal@split.io', 'some', {'email': 'bilal@split.io'}, ctx) @@ -257,7 +268,7 @@ def test_evaluate_treatment_with_rbs_in_condition(self): with open(rbs_segments, 'r') as flo: data = json.loads(flo.read()) - mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False) + mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False, []) rbs = rule_based_segments.from_raw(data["rbs"]["d"][0]) rbs2 = rule_based_segments.from_raw(data["rbs"]["d"][1]) rbs_storage.update([rbs, rbs2], [], 12) @@ -279,7 +290,7 @@ def test_using_segment_in_excluded(self): segment_storage = InMemorySegmentStorage() evaluation_facctory = EvaluationDataFactory(splits_storage, segment_storage, rbs_storage) - mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False) + mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False, []) rbs = rule_based_segments.from_raw(data["rbs"]["d"][0]) rbs_storage.update([rbs], [], 12) splits_storage.update([mocked_split], [], 12) @@ -303,7 +314,7 @@ def test_using_rbs_in_excluded(self): segment_storage = InMemorySegmentStorage() evaluation_facctory = EvaluationDataFactory(splits_storage, segment_storage, rbs_storage) - mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False) + mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False, []) rbs = rule_based_segments.from_raw(data["rbs"]["d"][0]) rbs2 = rule_based_segments.from_raw(data["rbs"]["d"][1]) rbs_storage.update([rbs, rbs2], [], 12) @@ -315,7 +326,52 @@ def test_using_rbs_in_excluded(self): assert e.eval_with_context('bilal', 'bilal', 'some', {'email': 'bilal'}, ctx)['treatment'] == "on" ctx = evaluation_facctory.context_for('bilal2@split.io', ['some']) assert e.eval_with_context('bilal2@split.io', 'bilal2@split.io', 'some', {'email': 'bilal2@split.io'}, ctx)['treatment'] == "on" - + + def test_prerequisites(self): + splits_load = os.path.join(os.path.dirname(__file__), '../models/grammar/files', 'splits_prereq.json') + with open(splits_load, 'r') as flo: + data = json.loads(flo.read()) + e = evaluator.Evaluator(splitters.Splitter()) + splits_storage = InMemorySplitStorage() + rbs_storage = InMemoryRuleBasedSegmentStorage() + segment_storage = InMemorySegmentStorage() + evaluation_facctory = EvaluationDataFactory(splits_storage, segment_storage, rbs_storage) + + rbs = rule_based_segments.from_raw(data["rbs"]["d"][0]) + split1 = from_raw(data["ff"]["d"][0]) + split2 = from_raw(data["ff"]["d"][1]) + split3 = from_raw(data["ff"]["d"][2]) + split4 = from_raw(data["ff"]["d"][3]) + rbs_storage.update([rbs], [], 12) + splits_storage.update([split1, split2, split3, split4], [], 12) + segment = segments.from_raw({'name': 'segment-test', 'added': ['pato@split.io'], 'removed': [], 'till': 123}) + segment_storage.put(segment) + + ctx = evaluation_facctory.context_for('bilal@split.io', ['test_prereq']) + assert e.eval_with_context('bilal@split.io', 'bilal@split.io', 'test_prereq', {'email': 'bilal@split.io'}, ctx)['treatment'] == "on" + assert e.eval_with_context('bilal@split.io', 'bilal@split.io', 'test_prereq', {}, ctx)['treatment'] == "def_treatment" + + ctx = evaluation_facctory.context_for('mauro@split.io', ['test_prereq']) + assert e.eval_with_context('mauro@split.io', 'mauro@split.io', 'test_prereq', {'email': 'mauro@split.io'}, ctx)['treatment'] == "def_treatment" + + ctx = evaluation_facctory.context_for('pato@split.io', ['test_prereq']) + assert e.eval_with_context('pato@split.io', 'pato@split.io', 'test_prereq', {'email': 'pato@split.io'}, ctx)['treatment'] == "def_treatment" + + ctx = evaluation_facctory.context_for('nico@split.io', ['test_prereq']) + assert e.eval_with_context('nico@split.io', 'nico@split.io', 'test_prereq', {'email': 'nico@split.io'}, ctx)['treatment'] == "on" + + ctx = evaluation_facctory.context_for('bilal@split.io', ['prereq_chain']) + assert e.eval_with_context('bilal@split.io', 'bilal@split.io', 'prereq_chain', {'email': 'bilal@split.io'}, ctx)['treatment'] == "on_whitelist" + + ctx = evaluation_facctory.context_for('nico@split.io', ['prereq_chain']) + assert e.eval_with_context('nico@split.io', 'nico@split.io', 'test_prereq', {'email': 'nico@split.io'}, ctx)['treatment'] == "on" + + ctx = evaluation_facctory.context_for('pato@split.io', ['prereq_chain']) + assert e.eval_with_context('pato@split.io', 'pato@split.io', 'prereq_chain', {'email': 'pato@split.io'}, ctx)['treatment'] == "on_default" + + ctx = evaluation_facctory.context_for('mauro@split.io', ['prereq_chain']) + assert e.eval_with_context('mauro@split.io', 'mauro@split.io', 'prereq_chain', {'email': 'mauro@split.io'}, ctx)['treatment'] == "on_default" + @pytest.mark.asyncio async def test_evaluate_treatment_with_rbs_in_condition_async(self): e = evaluator.Evaluator(splitters.Splitter()) @@ -388,16 +444,63 @@ async def test_using_rbs_in_excluded_async(self): ctx = await evaluation_facctory.context_for('bilal2@split.io', ['some']) assert e.eval_with_context('bilal2@split.io', 'bilal2@split.io', 'some', {'email': 'bilal2@split.io'}, ctx)['treatment'] == "on" + @pytest.mark.asyncio + async def test_prerequisites(self): + splits_load = os.path.join(os.path.dirname(__file__), '../models/grammar/files', 'splits_prereq.json') + with open(splits_load, 'r') as flo: + data = json.loads(flo.read()) + e = evaluator.Evaluator(splitters.Splitter()) + splits_storage = InMemorySplitStorageAsync() + rbs_storage = InMemoryRuleBasedSegmentStorageAsync() + segment_storage = InMemorySegmentStorageAsync() + evaluation_facctory = AsyncEvaluationDataFactory(splits_storage, segment_storage, rbs_storage) + + rbs = rule_based_segments.from_raw(data["rbs"]["d"][0]) + split1 = from_raw(data["ff"]["d"][0]) + split2 = from_raw(data["ff"]["d"][1]) + split3 = from_raw(data["ff"]["d"][2]) + split4 = from_raw(data["ff"]["d"][3]) + await rbs_storage.update([rbs], [], 12) + await splits_storage.update([split1, split2, split3, split4], [], 12) + segment = segments.from_raw({'name': 'segment-test', 'added': ['pato@split.io'], 'removed': [], 'till': 123}) + await segment_storage.put(segment) + + ctx = await evaluation_facctory.context_for('bilal@split.io', ['test_prereq']) + assert e.eval_with_context('bilal@split.io', 'bilal@split.io', 'test_prereq', {'email': 'bilal@split.io'}, ctx)['treatment'] == "on" + assert e.eval_with_context('bilal@split.io', 'bilal@split.io', 'test_prereq', {}, ctx)['treatment'] == "def_treatment" + + ctx = await evaluation_facctory.context_for('mauro@split.io', ['test_prereq']) + assert e.eval_with_context('mauro@split.io', 'mauro@split.io', 'test_prereq', {'email': 'mauro@split.io'}, ctx)['treatment'] == "def_treatment" + + ctx = await evaluation_facctory.context_for('pato@split.io', ['test_prereq']) + assert e.eval_with_context('pato@split.io', 'pato@split.io', 'test_prereq', {'email': 'pato@split.io'}, ctx)['treatment'] == "def_treatment" + + ctx = await evaluation_facctory.context_for('nico@split.io', ['test_prereq']) + assert e.eval_with_context('nico@split.io', 'nico@split.io', 'test_prereq', {'email': 'nico@split.io'}, ctx)['treatment'] == "on" + + ctx = await evaluation_facctory.context_for('bilal@split.io', ['prereq_chain']) + assert e.eval_with_context('bilal@split.io', 'bilal@split.io', 'prereq_chain', {'email': 'bilal@split.io'}, ctx)['treatment'] == "on_whitelist" + + ctx = await evaluation_facctory.context_for('nico@split.io', ['prereq_chain']) + assert e.eval_with_context('nico@split.io', 'nico@split.io', 'test_prereq', {'email': 'nico@split.io'}, ctx)['treatment'] == "on" + + ctx = await evaluation_facctory.context_for('pato@split.io', ['prereq_chain']) + assert e.eval_with_context('pato@split.io', 'pato@split.io', 'prereq_chain', {'email': 'pato@split.io'}, ctx)['treatment'] == "on_default" + + ctx = await evaluation_facctory.context_for('mauro@split.io', ['prereq_chain']) + assert e.eval_with_context('mauro@split.io', 'mauro@split.io', 'prereq_chain', {'email': 'mauro@split.io'}, ctx)['treatment'] == "on_default" + class EvaluationDataFactoryTests(object): """Test evaluation factory class.""" def test_get_context(self): """Test context.""" - mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False) + mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False, [Prerequisites('split2', ['on'])]) + split2 = Split('split2', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False, []) flag_storage = InMemorySplitStorage([]) segment_storage = InMemorySegmentStorage() rbs_segment_storage = InMemoryRuleBasedSegmentStorage() - flag_storage.update([mocked_split], [], -1) + flag_storage.update([mocked_split, split2], [], -1) rbs = copy.deepcopy(rbs_raw) rbs['conditions'].append( {"matcherGroup": { @@ -421,6 +524,7 @@ def test_get_context(self): ec = eval_factory.context_for('bilal@split.io', ['some']) assert ec.rbs_segments == {'sample_rule_based_segment': rbs} assert ec.segment_memberships == {"employees": False} + assert ec.flags.get("split2").name == "split2" segment_storage.update("employees", {"mauro@split.io"}, {}, 1234) ec = eval_factory.context_for('mauro@split.io', ['some']) @@ -433,11 +537,12 @@ class EvaluationDataFactoryAsyncTests(object): @pytest.mark.asyncio async def test_get_context(self): """Test context.""" - mocked_split = Split('some', 123, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False) + mocked_split = Split('some', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False, [Prerequisites('split2', ['on'])]) + split2 = Split('split2', 12345, False, 'off', 'user', Status.ACTIVE, 12, split_conditions, 1.2, 100, 1234, {}, None, False, []) flag_storage = InMemorySplitStorageAsync([]) segment_storage = InMemorySegmentStorageAsync() rbs_segment_storage = InMemoryRuleBasedSegmentStorageAsync() - await flag_storage.update([mocked_split], [], -1) + await flag_storage.update([mocked_split, split2], [], -1) rbs = copy.deepcopy(rbs_raw) rbs['conditions'].append( {"matcherGroup": { @@ -461,6 +566,7 @@ async def test_get_context(self): ec = await eval_factory.context_for('bilal@split.io', ['some']) assert ec.rbs_segments == {'sample_rule_based_segment': rbs} assert ec.segment_memberships == {"employees": False} + assert ec.flags.get("split2").name == "split2" await segment_storage.update("employees", {"mauro@split.io"}, {}, 1234) ec = await eval_factory.context_for('mauro@split.io', ['some']) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index bec5cd6f..845e8c72 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -3,7 +3,7 @@ rbsegments_json = [{"changeNumber": 12, "name": "some_segment", "status": "ACTIVE","trafficTypeName": "user","excluded":{"keys":[],"segments":[]},"conditions": []}] split11 = {"ff": {"t": 1675443569027, "s": -1, "d": [ - {"trafficTypeName": "user", "name": "SPLIT_2","trafficAllocation": 100,"trafficAllocationSeed": 1057590779, "seed": -113875324, "status": "ACTIVE","killed": False, "defaultTreatment": "off", "changeNumber": 1675443569027,"algo": 2, "configurations": {},"conditions": [{"conditionType": "ROLLOUT","matcherGroup": {"combiner": "AND","matchers": [{"keySelector": { "trafficType": "user", "attribute": None },"matcherType": "ALL_KEYS","negate": False,"userDefinedSegmentMatcherData": None,"whitelistMatcherData": None,"unaryNumericMatcherData": None,"betweenMatcherData": None,"booleanMatcherData": None,"dependencyMatcherData": None,"stringMatcherData": None}]},"partitions": [{ "treatment": "on", "size": 100 },{ "treatment": "off", "size": 0 }],"label": "default rule"}], "sets": ["set_1"], "impressionsDisabled": False}, + {"trafficTypeName": "user", "name": "SPLIT_2","trafficAllocation": 100,"trafficAllocationSeed": 1057590779, "seed": -113875324, "status": "ACTIVE","killed": False, "defaultTreatment": "off", "changeNumber": 1675443569027,"algo": 2, "configurations": {},"conditions": [{"conditionType": "ROLLOUT","matcherGroup": {"combiner": "AND","matchers": [{"keySelector": { "trafficType": "user", "attribute": None },"matcherType": "ALL_KEYS","negate": False,"userDefinedSegmentMatcherData": None,"whitelistMatcherData": None,"unaryNumericMatcherData": None,"betweenMatcherData": None,"booleanMatcherData": None,"dependencyMatcherData": None,"stringMatcherData": None}]},"partitions": [{ "treatment": "on", "size": 100 },{ "treatment": "off", "size": 0 }],"label": "default rule"}], "sets": ["set_1"], "impressionsDisabled": False, 'prerequisites': []}, {"trafficTypeName": "user", "name": "SPLIT_1", "trafficAllocation": 100, "trafficAllocationSeed": -1780071202,"seed": -1442762199, "status": "ACTIVE","killed": False, "defaultTreatment": "off", "changeNumber": 1675443537882,"algo": 2, "configurations": {},"conditions": [{"conditionType": "ROLLOUT", "matcherGroup": {"combiner": "AND","matchers": [{"keySelector": { "trafficType": "user", "attribute": None },"matcherType": "ALL_KEYS","negate": False,"userDefinedSegmentMatcherData": None,"whitelistMatcherData": None,"unaryNumericMatcherData": None,"betweenMatcherData": None,"booleanMatcherData": None,"dependencyMatcherData": None,"stringMatcherData": None}]},"partitions": [{ "treatment": "on", "size": 0 },{ "treatment": "off", "size": 100 }],"label": "default rule"}], "sets": ["set_1", "set_2"]}, {"trafficTypeName": "user", "name": "SPLIT_3","trafficAllocation": 100,"trafficAllocationSeed": 1057590779, "seed": -113875324, "status": "ACTIVE","killed": False, "defaultTreatment": "off", "changeNumber": 1675443569027,"algo": 2, "configurations": {},"conditions": [{"conditionType": "ROLLOUT","matcherGroup": {"combiner": "AND","matchers": [{"keySelector": { "trafficType": "user", "attribute": None },"matcherType": "ALL_KEYS","negate": False,"userDefinedSegmentMatcherData": None,"whitelistMatcherData": None,"unaryNumericMatcherData": None,"betweenMatcherData": None,"booleanMatcherData": None,"dependencyMatcherData": None,"stringMatcherData": None}]},"partitions": [{ "treatment": "on", "size": 100 },{ "treatment": "off", "size": 0 }],"label": "default rule"}], "sets": ["set_1"], "impressionsDisabled": True} ]}, "rbs": {"t": -1, "s": -1, "d": rbsegments_json}} diff --git a/tests/integration/files/splitChanges.json b/tests/integration/files/splitChanges.json index d9ab1c24..84f7c2cd 100644 --- a/tests/integration/files/splitChanges.json +++ b/tests/integration/files/splitChanges.json @@ -23,7 +23,8 @@ "userDefinedSegmentMatcherData": null, "whitelistMatcherData": { "whitelist": [ - "whitelisted_user" + "whitelisted_user", + "user1234" ] } } @@ -394,7 +395,50 @@ "configurations": {}, "sets": [], "impressionsDisabled": false - } + }, + { + "orgId": null, + "environment": null, + "trafficTypeId": null, + "trafficTypeName": null, + "name": "prereq_feature", + "seed": 1699838640, + "status": "ACTIVE", + "killed": false, + "changeNumber": 123, + "defaultTreatment": "off_default", + "conditions": [ + { + "conditionType": "ROLLOUT", + "matcherGroup": { + "combiner": "AND", + "matchers": [ + { + "matcherType": "ALL_KEYS", + "negate": false, + "userDefinedSegmentMatcherData": null, + "whitelistMatcherData": null + } + ] + }, + "partitions": [ + { + "treatment": "on", + "size": 100 + }, + { + "treatment": "off", + "size": 0 + } + ] + } + ], + "sets": [], + "prerequisites": [ + {"n": "regex_test", "ts": ["on"]}, + {"n": "whitelist_feature", "ts": ["off"]} + ] + } ], "s": -1, "t": 1457726098069 diff --git a/tests/integration/test_client_e2e.py b/tests/integration/test_client_e2e.py index f16352e3..f50869cf 100644 --- a/tests/integration/test_client_e2e.py +++ b/tests/integration/test_client_e2e.py @@ -171,6 +171,16 @@ def _get_treatment(factory, skip_rbs=False): if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): _validate_last_impressions(client, ('rbs_feature_flag', 'mauro@split.io', 'off')) + # test prerequisites matcher + assert client.get_treatment('abc4', 'prereq_feature') == 'on' + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + _validate_last_impressions(client, ('prereq_feature', 'abc4', 'on')) + + # test prerequisites matcher + assert client.get_treatment('user1234', 'prereq_feature') == 'off_default' + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + _validate_last_impressions(client, ('prereq_feature', 'user1234', 'off_default')) + def _get_treatment_with_config(factory): """Test client.get_treatment_with_config().""" try: @@ -460,8 +470,8 @@ def _manager_methods(factory, skip_rbs=False): assert len(manager.splits()) == 7 return - assert len(manager.split_names()) == 8 - assert len(manager.splits()) == 8 + assert len(manager.split_names()) == 9 + assert len(manager.splits()) == 9 class InMemoryDebugIntegrationTests(object): """Inmemory storage-based integration tests.""" @@ -4458,6 +4468,16 @@ async def _get_treatment_async(factory, skip_rbs=False): if skip_rbs: return + + # test prerequisites matcher + assert await client.get_treatment('abc4', 'prereq_feature') == 'on' + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + await _validate_last_impressions_async(client, ('prereq_feature', 'abc4', 'on')) + + # test prerequisites matcher + assert await client.get_treatment('user1234', 'prereq_feature') == 'off_default' + if not isinstance(factory._recorder._impressions_manager._strategy, StrategyNoneMode): + await _validate_last_impressions_async(client, ('prereq_feature', 'user1234', 'off_default')) # test rule based segment matcher assert await client.get_treatment('bilal@split.io', 'rbs_feature_flag', {'email': 'bilal@split.io'}) == 'on' @@ -4758,5 +4778,5 @@ async def _manager_methods_async(factory, skip_rbs=False): assert len(await manager.splits()) == 7 return - assert len(await manager.split_names()) == 8 - assert len(await manager.splits()) == 8 + assert len(await manager.split_names()) == 9 + assert len(await manager.splits()) == 9 diff --git a/tests/models/grammar/files/splits_prereq.json b/tests/models/grammar/files/splits_prereq.json new file mode 100644 index 00000000..5efa7fed --- /dev/null +++ b/tests/models/grammar/files/splits_prereq.json @@ -0,0 +1,293 @@ +{"ff": { + "d": [ + { + "trafficTypeName": "user", + "name": "test_prereq", + "prerequisites": [ + { "n": "feature_segment", "ts": ["off", "def_test"] }, + { "n": "rbs_flag", "ts": ["on"] } + ], + "trafficAllocation": 100, + "trafficAllocationSeed": 1582960494, + "seed": 1842944006, + "status": "ACTIVE", + "killed": false, + "defaultTreatment": "def_treatment", + "changeNumber": 1582741588594, + "algo": 2, + "configurations": {}, + "conditions": [ + { + "conditionType": "ROLLOUT", + "matcherGroup": { + "combiner": "AND", + "matchers": [ + { + "keySelector": { + "trafficType": "user", + "attribute": null + }, + "matcherType": "ALL_KEYS", + "negate": false, + "userDefinedSegmentMatcherData": null, + "whitelistMatcherData": null, + "unaryNumericMatcherData": null, + "betweenMatcherData": null, + "booleanMatcherData": null, + "dependencyMatcherData": null, + "stringMatcherData": null + } + ] + }, + "partitions": [ + { + "treatment": "on", + "size": 100 + }, + { + "treatment": "off", + "size": 0 + } + ], + "label": "default rule" + } + ] + }, + { + "name":"feature_segment", + "trafficTypeId":"u", + "trafficTypeName":"User", + "trafficAllocation": 100, + "trafficAllocationSeed": 1582960494, + "seed":-1177551240, + "status":"ACTIVE", + "killed":false, + "defaultTreatment":"def_test", + "changeNumber": 1582741588594, + "algo": 2, + "configurations": {}, + "conditions":[ + { + "matcherGroup":{ + "combiner":"AND", + "matchers":[ + { + "matcherType":"IN_SEGMENT", + "negate":false, + "userDefinedSegmentMatcherData":{ + "segmentName":"segment-test" + }, + "whitelistMatcherData":null + } + ] + }, + "partitions":[ + { + "treatment":"on", + "size":100 + }, + { + "treatment":"off", + "size":0 + } + ], + "label": "default label" + } + ] + }, + { + "changeNumber": 10, + "trafficTypeName": "user", + "name": "rbs_flag", + "trafficAllocation": 100, + "trafficAllocationSeed": 1828377380, + "seed": -286617921, + "status": "ACTIVE", + "killed": false, + "defaultTreatment": "off", + "algo": 2, + "conditions": [ + { + "conditionType": "ROLLOUT", + "matcherGroup": { + "combiner": "AND", + "matchers": [ + { + "keySelector": { + "trafficType": "user" + }, + "matcherType": "IN_RULE_BASED_SEGMENT", + "negate": false, + "userDefinedSegmentMatcherData": { + "segmentName": "sample_rule_based_segment" + } + } + ] + }, + "partitions": [ + { + "treatment": "on", + "size": 100 + }, + { + "treatment": "off", + "size": 0 + } + ], + "label": "in rule based segment sample_rule_based_segment" + }, + { + "conditionType": "ROLLOUT", + "matcherGroup": { + "combiner": "AND", + "matchers": [ + { + "keySelector": { + "trafficType": "user" + }, + "matcherType": "ALL_KEYS", + "negate": false + } + ] + }, + "partitions": [ + { + "treatment": "on", + "size": 0 + }, + { + "treatment": "off", + "size": 100 + } + ], + "label": "default rule" + } + ], + "configurations": {}, + "sets": [], + "impressionsDisabled": false + }, + { + "trafficTypeName": "user", + "name": "prereq_chain", + "prerequisites": [ + { "n": "test_prereq", "ts": ["on"] } + ], + "trafficAllocation": 100, + "trafficAllocationSeed": -2092979940, + "seed": 105482719, + "status": "ACTIVE", + "killed": false, + "defaultTreatment": "on_default", + "changeNumber": 1585948850109, + "algo": 2, + "configurations": {}, + "conditions": [ + { + "conditionType": "WHITELIST", + "matcherGroup": { + "combiner": "AND", + "matchers": [ + { + "keySelector": null, + "matcherType": "WHITELIST", + "negate": false, + "userDefinedSegmentMatcherData": null, + "whitelistMatcherData": { + "whitelist": [ + "bilal@split.io" + ] + }, + "unaryNumericMatcherData": null, + "betweenMatcherData": null, + "booleanMatcherData": null, + "dependencyMatcherData": null, + "stringMatcherData": null + } + ] + }, + "partitions": [ + { + "treatment": "on_whitelist", + "size": 100 + } + ], + "label": "whitelisted" + }, + { + "conditionType": "ROLLOUT", + "matcherGroup": { + "combiner": "AND", + "matchers": [ + { + "keySelector": { + "trafficType": "user", + "attribute": null + }, + "matcherType": "ALL_KEYS", + "negate": false, + "userDefinedSegmentMatcherData": null, + "whitelistMatcherData": null, + "unaryNumericMatcherData": null, + "betweenMatcherData": null, + "booleanMatcherData": null, + "dependencyMatcherData": null, + "stringMatcherData": null + } + ] + }, + "partitions": [ + { + "treatment": "on", + "size": 100 + }, + { + "treatment": "off", + "size": 0 + }, + { + "treatment": "V1", + "size": 0 + } + ], + "label": "default rule" + } + ] + } + ], + "s": -1, + "t": 1585948850109 +}, "rbs":{"d": [ + { + "changeNumber": 5, + "name": "sample_rule_based_segment", + "status": "ACTIVE", + "trafficTypeName": "user", + "excluded":{ + "keys":["mauro@split.io","gaston@split.io"], + "segments":[] + }, + "conditions": [ + { + "matcherGroup": { + "combiner": "AND", + "matchers": [ + { + "keySelector": { + "trafficType": "user", + "attribute": "email" + }, + "matcherType": "ENDS_WITH", + "negate": false, + "whitelistMatcherData": { + "whitelist": [ + "@split.io" + ] + } + } + ] + } + } + ] + }], "s": -1, "t": 1585948850109} +} diff --git a/tests/models/grammar/test_matchers.py b/tests/models/grammar/test_matchers.py index 680a8cc7..71922431 100644 --- a/tests/models/grammar/test_matchers.py +++ b/tests/models/grammar/test_matchers.py @@ -11,6 +11,7 @@ from datetime import datetime from splitio.models.grammar import matchers +from splitio.models.grammar.matchers.prerequisites import PrerequisitesMatcher from splitio.models import splits from splitio.models import rule_based_segments from splitio.models.grammar import condition @@ -1136,4 +1137,33 @@ def test_matcher_behaviour(self, mocker): )} assert matcher._match(None, context=ec) is False assert matcher._match('bilal@split.io', context=ec) is False - assert matcher._match('bilal@split.io', {'email': 'bilal@split.io'}, context=ec) is True \ No newline at end of file + assert matcher._match('bilal@split.io', {'email': 'bilal@split.io'}, context=ec) is True + +class PrerequisitesMatcherTests(MatcherTestsBase): + """tests for prerequisites matcher.""" + + def test_init(self, mocker): + """Test init.""" + split_load = os.path.join(os.path.dirname(__file__), 'files', 'splits_prereq.json') + with open(split_load, 'r') as flo: + data = json.loads(flo.read()) + + prereq = splits.from_raw_prerequisites(data['ff']['d'][0]['prerequisites']) + parsed = PrerequisitesMatcher(prereq) + assert parsed._prerequisites == prereq + + def test_matcher_behaviour(self, mocker): + """Test if the matcher works properly.""" + split_load = os.path.join(os.path.dirname(__file__), 'files', 'splits_prereq.json') + with open(split_load, 'r') as flo: + data = json.loads(flo.read()) + prereq = splits.from_raw_prerequisites(data['ff']['d'][3]['prerequisites']) + parsed = PrerequisitesMatcher(prereq) + evaluator = mocker.Mock(spec=Evaluator) + + + evaluator.eval_with_context.return_value = {'treatment': 'on'} + assert parsed.match('SPLIT_2', {}, {'evaluator': evaluator, 'ec': [{'flags': ['prereq_chain'], 'segment_memberships': {}}]}) is True + + evaluator.eval_with_context.return_value = {'treatment': 'off'} + assert parsed.match('SPLIT_2', {}, {'evaluator': evaluator, 'ec': [{'flags': ['prereq_chain'], 'segment_memberships': {}}]}) is False \ No newline at end of file diff --git a/tests/models/test_splits.py b/tests/models/test_splits.py index 442a18d0..472ecde9 100644 --- a/tests/models/test_splits.py +++ b/tests/models/test_splits.py @@ -11,6 +11,10 @@ class SplitTests(object): 'changeNumber': 123, 'trafficTypeName': 'user', 'name': 'some_name', + 'prerequisites': [ + { 'n': 'flag1', 'ts': ['on','v1'] }, + { 'n': 'flag2', 'ts': ['off'] } + ], 'trafficAllocation': 100, 'trafficAllocationSeed': 123456, 'seed': 321654, @@ -83,14 +87,26 @@ def test_from_raw(self): assert parsed._configurations == {'on': '{"color": "blue", "size": 13}'} assert parsed.sets == {'set1', 'set2'} assert parsed.impressions_disabled == False - + assert len(parsed.prerequisites) == 2 + flag1 = False + flag2 = False + for prerequisite in parsed.prerequisites: + if prerequisite.feature_flag_name == 'flag1': + flag1 = True + assert prerequisite.treatments == ['on','v1'] + if prerequisite.feature_flag_name == 'flag2': + flag2 = True + assert prerequisite.treatments == ['off'] + assert flag1 + assert flag2 + def test_get_segment_names(self, mocker): """Test fetching segment names.""" cond1 = mocker.Mock(spec=Condition) cond2 = mocker.Mock(spec=Condition) cond1.get_segment_names.return_value = ['segment1', 'segment2'] cond2.get_segment_names.return_value = ['segment3', 'segment4'] - split1 = splits.Split( 'some_split', 123, False, 'off', 'user', 'ACTIVE', 123, [cond1, cond2]) + split1 = splits.Split( 'some_split', 123, False, 'off', 'user', 'ACTIVE', 123, [cond1, cond2], None) assert split1.get_segment_names() == ['segment%d' % i for i in range(1, 5)] def test_to_json(self): diff --git a/tests/sync/test_splits_synchronizer.py b/tests/sync/test_splits_synchronizer.py index c0ea38fb..fd9ac585 100644 --- a/tests/sync/test_splits_synchronizer.py +++ b/tests/sync/test_splits_synchronizer.py @@ -1185,6 +1185,10 @@ def test_elements_sanitization(self, mocker): split[0]['algo'] = 1 assert (split_synchronizer._sanitize_feature_flag_elements(split)[0]['algo'] == 2) + split = splits_json["splitChange1_1"]['ff']['d'].copy() + del split[0]['prerequisites'] + assert (split_synchronizer._sanitize_feature_flag_elements(split)[0]['prerequisites'] == []) + # test 'status' is set to ACTIVE when None rbs = copy.deepcopy(json_body["rbs"]["d"]) rbs[0]['status'] = None