From 96a1cc30737e13108d9a5211a6b998db1096e8fe Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 4 Oct 2018 19:17:42 -0700 Subject: [PATCH 001/356] add option to reduce front-end metadata for untracked flags --- ldclient/client.py | 10 +++-- ldclient/flags_state.py | 12 ++++-- testing/test_flags_state.py | 21 +++++----- testing/test_ldclient_evaluation.py | 59 +++++++++++++++++++++++++++-- 4 files changed, 81 insertions(+), 21 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 683a5c3b..039fad52 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -296,7 +296,10 @@ def all_flags_state(self, user, **kwargs): :param kwargs: optional parameters affecting how the state is computed: set `client_side_only=True` to limit it to only flags that are marked for use with the client-side SDK (by default, all flags are included); set `with_reasons=True` to - include evaluation reasons in the state (see `variation_detail`) + include evaluation reasons in the state (see `variation_detail`); set + `details_only_for_tracked_flags=True` to omit any metadata that is normally only + used for event generation, such as flag versions and evaluation reasons, unless + the flag has event tracking or debugging turned on :return: a FeatureFlagsState object (will never be None; its 'valid' property will be False if the client is offline, has not been initialized, or the user is None or has no key) :rtype: FeatureFlagsState @@ -319,6 +322,7 @@ def all_flags_state(self, user, **kwargs): state = FeatureFlagsState(True) client_only = kwargs.get('client_side_only', False) with_reasons = kwargs.get('with_reasons', False) + details_only_if_tracked = kwargs.get('details_only_for_tracked_flags', False) try: flags_map = self._store.all(FEATURES, lambda x: x) if flags_map is None: @@ -333,12 +337,12 @@ def all_flags_state(self, user, **kwargs): try: detail = evaluate(flag, user, self._store, False).detail state.add_flag(flag, detail.value, detail.variation_index, - detail.reason if with_reasons else None) + detail.reason if with_reasons else None, details_only_if_tracked) except Exception as e: log.error("Error evaluating flag \"%s\" in all_flags_state: %s" % (key, e)) log.debug(traceback.format_exc()) reason = {'kind': 'ERROR', 'errorKind': 'EXCEPTION'} - state.add_flag(flag, None, None, reason if with_reasons else None) + state.add_flag(flag, None, None, reason if with_reasons else None, details_only_if_tracked) return state diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index c76b4908..cbfde1ec 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -12,15 +12,19 @@ def __init__(self, valid): self.__flag_metadata = {} self.__valid = valid - def add_flag(self, flag, value, variation, reason): + def add_flag(self, flag, value, variation, reason, details_only_if_tracked): """Used internally to build the state map.""" key = flag['key'] self.__flag_values[key] = value - meta = { 'version': flag.get('version'), 'trackEvents': flag.get('trackEvents') } + meta = {} + if (not details_only_if_tracked) or flag.get('trackEvents') or flag.get('debugEventsUntilDate'): + meta['version'] = flag.get('version') + if reason is not None: + meta['reason'] = reason if variation is not None: meta['variation'] = variation - if reason is not None: - meta['reason'] = reason + if flag.get('trackEvents'): + meta['trackEvents'] = True if flag.get('debugEventsUntilDate') is not None: meta['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') self.__flag_metadata[key] = meta diff --git a/testing/test_flags_state.py b/testing/test_flags_state.py index 2fe5b123..45ea6404 100644 --- a/testing/test_flags_state.py +++ b/testing/test_flags_state.py @@ -6,7 +6,7 @@ def test_can_get_flag_value(): state = FeatureFlagsState(True) flag = { 'key': 'key' } - state.add_flag(flag, 'value', 1, None) + state.add_flag(flag, 'value', 1, None, False) assert state.get_flag_value('key') == 'value' def test_returns_none_for_unknown_flag(): @@ -17,16 +17,16 @@ def test_can_convert_to_values_map(): state = FeatureFlagsState(True) flag1 = { 'key': 'key1' } flag2 = { 'key': 'key2' } - state.add_flag(flag1, 'value1', 0, None) - state.add_flag(flag2, 'value2', 1, None) + state.add_flag(flag1, 'value1', 0, None, False) + state.add_flag(flag2, 'value2', 1, None, False) assert state.to_values_map() == { 'key1': 'value1', 'key2': 'value2' } def test_can_convert_to_json_dict(): state = FeatureFlagsState(True) flag1 = { 'key': 'key1', 'version': 100, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } flag2 = { 'key': 'key2', 'version': 200, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True, 'debugEventsUntilDate': 1000 } - state.add_flag(flag1, 'value1', 0, None) - state.add_flag(flag2, 'value2', 1, None) + state.add_flag(flag1, 'value1', 0, None, False) + state.add_flag(flag2, 'value2', 1, None, False) result = state.to_json_dict() assert result == { @@ -35,8 +35,7 @@ def test_can_convert_to_json_dict(): '$flagsState': { 'key1': { 'variation': 0, - 'version': 100, - 'trackEvents': False + 'version': 100 }, 'key2': { 'variation': 1, @@ -52,8 +51,8 @@ def test_can_convert_to_json_string(): state = FeatureFlagsState(True) flag1 = { 'key': 'key1', 'version': 100, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } flag2 = { 'key': 'key2', 'version': 200, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True, 'debugEventsUntilDate': 1000 } - state.add_flag(flag1, 'value1', 0, None) - state.add_flag(flag2, 'value2', 1, None) + state.add_flag(flag1, 'value1', 0, None, False) + state.add_flag(flag2, 'value2', 1, None, False) obj = state.to_json_dict() str = state.to_json_string() @@ -63,8 +62,8 @@ def test_can_serialize_with_jsonpickle(): state = FeatureFlagsState(True) flag1 = { 'key': 'key1', 'version': 100, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } flag2 = { 'key': 'key2', 'version': 200, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True, 'debugEventsUntilDate': 1000 } - state.add_flag(flag1, 'value1', 0, None) - state.add_flag(flag2, 'value2', 1, None) + state.add_flag(flag1, 'value1', 0, None, False) + state.add_flag(flag2, 'value2', 1, None, False) obj = state.to_json_dict() str = jsonpickle.encode(state, unpicklable=False) diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 9183034b..81719564 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -149,8 +149,7 @@ def test_all_flags_state_returns_state(): '$flagsState': { 'key1': { 'variation': 0, - 'version': 100, - 'trackEvents': False + 'version': 100 }, 'key2': { 'variation': 1, @@ -176,7 +175,6 @@ def test_all_flags_state_returns_state_with_reasons(): 'key1': { 'variation': 0, 'version': 100, - 'trackEvents': False, 'reason': {'kind': 'OFF'} }, 'key2': { @@ -229,6 +227,61 @@ def test_all_flags_state_can_be_filtered_for_client_side_flags(): values = state.to_values_map() assert values == { 'client-side-1': 'value1', 'client-side-2': 'value2' } +def test_all_flags_state_can_omit_details_for_untracked_flags(): + flag1 = { + 'key': 'key1', + 'version': 100, + 'on': False, + 'offVariation': 0, + 'variations': [ 'value1' ], + 'trackEvents': False + } + flag2 = { + 'key': 'key2', + 'version': 200, + 'on': False, + 'offVariation': 1, + 'variations': [ 'x', 'value2' ], + 'trackEvents': True + } + flag3 = { + 'key': 'key3', + 'version': 300, + 'on': False, + 'offVariation': 1, + 'variations': [ 'x', 'value3' ], + 'debugEventsUntilDate': 1000 + } + store = InMemoryFeatureStore() + store.init({ FEATURES: { 'key1': flag1, 'key2': flag2, 'key3': flag3 } }) + client = make_client(store) + state = client.all_flags_state(user, with_reasons=True, details_only_for_tracked_flags=True) + assert state.valid == True + result = state.to_json_dict() + assert result == { + 'key1': 'value1', + 'key2': 'value2', + 'key3': 'value3', + '$flagsState': { + 'key1': { + 'variation': 0 + }, + 'key2': { + 'variation': 1, + 'version': 200, + 'trackEvents': True, + 'reason': {'kind': 'OFF'} + }, + 'key3': { + 'variation': 1, + 'version': 300, + 'debugEventsUntilDate': 1000, + 'reason': {'kind': 'OFF'} + } + }, + '$valid': True + } + def test_all_flags_state_returns_empty_state_if_user_is_none(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) From 89056fc7587ba16f9573e707c82be42af14a7b20 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 8 Oct 2018 16:33:39 -0700 Subject: [PATCH 002/356] fix logic for whether a flag is tracked in all_flags_state --- ldclient/flags_state.py | 8 +++++++- testing/test_ldclient_evaluation.py | 6 ++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index cbfde1ec..c5a8ab41 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -1,4 +1,5 @@ import json +import time class FeatureFlagsState(object): """ @@ -17,7 +18,12 @@ def add_flag(self, flag, value, variation, reason, details_only_if_tracked): key = flag['key'] self.__flag_values[key] = value meta = {} - if (not details_only_if_tracked) or flag.get('trackEvents') or flag.get('debugEventsUntilDate'): + with_details = (not details_only_if_tracked) or flag.get('trackEvents') + if not with_details: + if flag.get('debugEventsUntilDate'): + now = int(time.time() * 1000) + with_details = (flag.get('debugEventsUntilDate') > now) + if with_details: meta['version'] = flag.get('version') if reason is not None: meta['reason'] = reason diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 81719564..46c48756 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -1,5 +1,6 @@ import pytest import json +import time from ldclient.client import LDClient, Config from ldclient.feature_store import InMemoryFeatureStore from ldclient.flag import EvaluationDetail @@ -228,6 +229,7 @@ def test_all_flags_state_can_be_filtered_for_client_side_flags(): assert values == { 'client-side-1': 'value1', 'client-side-2': 'value2' } def test_all_flags_state_can_omit_details_for_untracked_flags(): + future_time = (time.time() * 1000) + 100000 flag1 = { 'key': 'key1', 'version': 100, @@ -250,7 +252,7 @@ def test_all_flags_state_can_omit_details_for_untracked_flags(): 'on': False, 'offVariation': 1, 'variations': [ 'x', 'value3' ], - 'debugEventsUntilDate': 1000 + 'debugEventsUntilDate': future_time } store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2, 'key3': flag3 } }) @@ -275,7 +277,7 @@ def test_all_flags_state_can_omit_details_for_untracked_flags(): 'key3': { 'variation': 1, 'version': 300, - 'debugEventsUntilDate': 1000, + 'debugEventsUntilDate': future_time, 'reason': {'kind': 'OFF'} } }, From 1fc23e4e5a1e4d0a1f4256df5faf1c36bf85c4eb Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Sun, 14 Oct 2018 00:25:44 -0700 Subject: [PATCH 003/356] use expiringdict from PyPi --- NOTICE.txt | 2 - ldclient/expiringdict.py | 155 -------------------------------- ldclient/redis_feature_store.py | 2 +- requirements.txt | 1 + 4 files changed, 2 insertions(+), 158 deletions(-) delete mode 100644 NOTICE.txt delete mode 100644 ldclient/expiringdict.py diff --git a/NOTICE.txt b/NOTICE.txt deleted file mode 100644 index 24f9d0e4..00000000 --- a/NOTICE.txt +++ /dev/null @@ -1,2 +0,0 @@ -This product includes software (ExpiringDict) developed by -Mailgun (https://github.com/mailgun/expiringdict). \ No newline at end of file diff --git a/ldclient/expiringdict.py b/ldclient/expiringdict.py deleted file mode 100644 index 4b244c21..00000000 --- a/ldclient/expiringdict.py +++ /dev/null @@ -1,155 +0,0 @@ -''' -Dictionary with auto-expiring values for caching purposes. - -Expiration happens on any access, object is locked during cleanup from expired -values. Can not store more than max_len elements - the oldest will be deleted. - ->>> ExpiringDict(max_len=100, max_age_seconds=10) - -The values stored in the following way: -{ - key1: (value1, created_time1), - key2: (value2, created_time2) -} - -NOTE: iteration over dict and also keys() do not remove expired values! - -Copied from https://github.com/mailgun/expiringdict/commit/d17d071721dd12af6829819885a74497492d7fb7 under the APLv2 - -TODO - Use PyPI version once https://github.com/mailgun/expiringdict/issues/13 has been fixed so that -https://github.com/mailgun/expiringdict/commit/62c50ce7083a1557a1140dae19145f3a0a7a1a14 is patched -''' - -import time -from threading import RLock - -from collections import OrderedDict - - -class ExpiringDict(OrderedDict): - - def __init__(self, max_len, max_age_seconds): - assert max_age_seconds >= 0 - assert max_len >= 1 - - OrderedDict.__init__(self) - self.max_len = max_len - self.max_age = max_age_seconds - self.lock = RLock() - - def __contains__(self, key): - """ Return True if the dict has a key, else return False. """ - try: - with self.lock: - item = OrderedDict.__getitem__(self, key) - if time.time() - item[1] < self.max_age: - return True - else: - del self[key] - except KeyError: - pass - return False - - def __getitem__(self, key, with_age=False): - """ Return the item of the dict. - - Raises a KeyError if key is not in the map. - """ - with self.lock: - item = OrderedDict.__getitem__(self, key) - item_age = time.time() - item[1] - if item_age < self.max_age: - if with_age: - return item[0], item_age - else: - return item[0] - else: - del self[key] - raise KeyError(key) - - def __setitem__(self, key, value): - """ Set d[key] to value. """ - with self.lock: - if len(self) == self.max_len: - self.popitem(last=False) - OrderedDict.__setitem__(self, key, (value, time.time())) - - def pop(self, key, default=None): - """ Get item from the dict and remove it. - - Return default if expired or does not exist. Never raise KeyError. - """ - with self.lock: - try: - item = OrderedDict.__getitem__(self, key) - del self[key] - return item[0] - except KeyError: - return default - - def ttl(self, key): - """ Return TTL of the `key` (in seconds). - - Returns None for non-existent or expired keys. - """ - key_value, key_age = self.get(key, with_age=True) - if key_age: - key_ttl = self.max_age - key_age - if key_ttl > 0: - return key_ttl - return None - - def get(self, key, default=None, with_age=False): - " Return the value for key if key is in the dictionary, else default. " - try: - return self.__getitem__(key, with_age) - except KeyError: - if with_age: - return default, None - else: - return default - - def items(self): - """ Return a copy of the dictionary's list of (key, value) pairs. """ - r = [] - for key in self: - try: - r.append((key, self[key])) - except KeyError: - pass - return r - - def values(self): - """ Return a copy of the dictionary's list of values. - See the note for dict.items(). """ - r = [] - for key in self: - try: - r.append(self[key]) - except KeyError: - pass - return r - - def fromkeys(self): - " Create a new dictionary with keys from seq and values set to value. " - raise NotImplementedError() - - def iteritems(self): - """ Return an iterator over the dictionary's (key, value) pairs. """ - raise NotImplementedError() - - def itervalues(self): - """ Return an iterator over the dictionary's values. """ - raise NotImplementedError() - - def viewitems(self): - " Return a new view of the dictionary's items ((key, value) pairs). " - raise NotImplementedError() - - def viewkeys(self): - """ Return a new view of the dictionary's keys. """ - raise NotImplementedError() - - def viewvalues(self): - """ Return a new view of the dictionary's values. """ - raise NotImplementedError() diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py index b016a1eb..71b7261b 100644 --- a/ldclient/redis_feature_store.py +++ b/ldclient/redis_feature_store.py @@ -1,10 +1,10 @@ import json from pprint import pprint +from expiringdict import ExpiringDict import redis from ldclient import log -from ldclient.expiringdict import ExpiringDict from ldclient.interfaces import FeatureStore from ldclient.memoized_value import MemoizedValue from ldclient.versioned_data_kind import FEATURES diff --git a/requirements.txt b/requirements.txt index 90a5ef51..8787ac53 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,6 @@ backoff>=1.4.3 certifi>=2018.4.16 +expiringdict>=1.1.4 future>=0.16.0 six>=1.10.0 pyRFC3339>=1.0 From ae8b25eb33ad3dbca21231f22ece4a96694f731c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 1 Nov 2018 15:04:39 -0700 Subject: [PATCH 004/356] implement file data source, not including auto-update --- ldclient/file_data_source.py | 95 ++++++++++++++ test-requirements.txt | 3 +- testing/test_file_data_source.py | 205 +++++++++++++++++++++++++++++++ 3 files changed, 302 insertions(+), 1 deletion(-) create mode 100644 ldclient/file_data_source.py create mode 100644 testing/test_file_data_source.py diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py new file mode 100644 index 00000000..d82930d9 --- /dev/null +++ b/ldclient/file_data_source.py @@ -0,0 +1,95 @@ +import json +import six +import traceback + +have_yaml = False +try: + import yaml + have_yaml = True +except ImportError: + pass + +from ldclient.interfaces import UpdateProcessor +from ldclient.util import log +from ldclient.versioned_data_kind import FEATURES, SEGMENTS + + +class FileDataSource(UpdateProcessor): + @classmethod + def factory(cls, **kwargs): + return lambda config, store, ready : FileDataSource(store, kwargs, ready) + + def __init__(self, store, options, ready): + self._store = store + self._ready = ready + self._inited = False + self._paths = options.get('paths', []) + if isinstance(self._paths, six.string_types): + self._paths = [ self._paths ] + + def start(self): + self._load_all() + + # We will signal readiness immediately regardless of whether the file load succeeded or failed - + # the difference can be detected by checking initialized() + self._ready.set() + + def stop(self): + pass + + def initialized(self): + return self._inited + + def _load_all(self): + all_data = { FEATURES: {}, SEGMENTS: {} } + print "Loading: %s" % self._paths + for path in self._paths: + try: + self._load_file(path, all_data) + except Exception as e: + log.error('Unable to load flag data from "%s": %s' % (path, repr(e))) + traceback.print_exc() + return + print "Initing: %s" % all_data + self._store.init(all_data) + self._inited = True + + def _load_file(self, path, all_data): + content = None + with open(path, 'r') as f: + content = f.read() + parsed = self._parse_content(content) + for key, flag in six.iteritems(parsed.get('flags', {})): + self._add_item(all_data, FEATURES, flag) + for key, value in six.iteritems(parsed.get('flagValues', {})): + self._add_item(all_data, FEATURES, self._make_flag_with_value(key, value)) + for key, segment in six.iteritems(parsed.get('segments', {})): + self._add_item(all_data, SEGMENTS, segment) + + def _parse_content(self, content): + if have_yaml: + if content.strip().startswith("{"): + print("json: %s" % content) + return json.loads(content) + else: + return yaml.load(content) + print("json: %s" % content) + return json.loads(content) + + def _add_item(self, all_data, kind, item): + items = all_data[kind] + key = item.get('key') + if items.get(key) is None: + items[key] = item + else: + raise Exception('In %s, key "%s" was used more than once' % (kind.namespace, key)) + + def _make_flag_with_value(self, key, value): + return { + 'key': key, + 'on': True, + 'fallthrough': { + 'variation': 0 + }, + 'variations': [ value ] + } diff --git a/test-requirements.txt b/test-requirements.txt index ee547312..1aa5903e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,4 +4,5 @@ redis>=2.10.5 coverage>=4.4 pytest-capturelog>=0.7 pytest-cov>=2.4.0 -codeclimate-test-reporter>=0.2.1 \ No newline at end of file +codeclimate-test-reporter>=0.2.1 +pyyaml>=3.0 diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py new file mode 100644 index 00000000..de4d9393 --- /dev/null +++ b/testing/test_file_data_source.py @@ -0,0 +1,205 @@ +import json +import os +import pytest +import tempfile +import threading +import time + +from ldclient.client import LDClient +from ldclient.config import Config +from ldclient.feature_store import InMemoryFeatureStore +from ldclient.file_data_source import FileDataSource +from ldclient.versioned_data_kind import FEATURES, SEGMENTS + + +all_flag_keys = [ 'flag1', 'flag2' ] +all_properties_json = ''' + { + "flags": { + "flag1": { + "key": "flag1", + "on": true, + "fallthrough": { + "variation": 2 + }, + "variations": [ "fall", "off", "on" ] + } + }, + "flagValues": { + "flag2": "value2" + }, + "segments": { + "seg1": { + "key": "seg1", + "include": ["user1"] + } + } + } +''' + +all_properties_yaml = ''' +--- +flags: + flag1: + key: flag1 + "on": true +flagValues: + flag2: value2 +segments: + seg1: + key: seg1 + include: ["user1"] +''' + +flag_only_json = ''' + { + "flags": { + "flag1": { + "key": "flag1", + "on": true, + "fallthrough": { + "variation": 2 + }, + "variations": [ "fall", "off", "on" ] + } + } + } +''' + +segment_only_json = ''' + { + "segments": { + "seg1": { + "key": "seg1", + "include": ["user1"] + } + } + } +''' + +fds = None +store = None +ready = None + + +def setup_function(): + global fds, store, ready + store = InMemoryFeatureStore() + ready = threading.Event() + +def teardown_function(): + if fds is not None: + fds.stop() + +def make_temp_file(content): + f, path = tempfile.mkstemp() + os.write(f, content) + os.close(f) + return path + +def replace_file(path, content): + with open(path, 'w') as f: + f.write(content) + +def test_does_not_load_data_prior_to_start(): + path = make_temp_file('{"flagValues":{"key":"value"}}') + try: + fds = FileDataSource.factory(paths = path)(Config(), store, ready) + assert ready.is_set() is False + assert fds.initialized() is False + assert store.initialized is False + finally: + os.remove(path) + +def test_loads_flags_on_start_from_json(): + path = make_temp_file(all_properties_json) + try: + fds = FileDataSource.factory(paths = path)(Config(), store, ready) + fds.start() + assert store.initialized is True + assert sorted(list(store.all(FEATURES, lambda x: x).keys())) == all_flag_keys + finally: + os.remove(path) + +def test_loads_flags_on_start_from_yaml(): + path = make_temp_file(all_properties_yaml) + try: + fds = FileDataSource.factory(paths = path)(Config(), store, ready) + fds.start() + assert store.initialized is True + assert sorted(list(store.all(FEATURES, lambda x: x).keys())) == all_flag_keys + finally: + os.remove(path) + +def test_sets_ready_event_and_initialized_on_successful_load(): + path = make_temp_file(all_properties_json) + try: + fds = FileDataSource.factory(paths = path)(Config(), store, ready) + fds.start() + assert fds.initialized() is True + assert ready.is_set() is True + finally: + os.remove(path) + +def test_sets_ready_event_and_does_not_set_initialized_on_unsuccessful_load(): + bad_file_path = 'no-such-file' + fds = FileDataSource.factory(paths = bad_file_path)(Config(), store, ready) + fds.start() + assert fds.initialized() is False + assert ready.is_set() is True + +def test_can_load_multiple_files(): + path1 = make_temp_file(flag_only_json) + path2 = make_temp_file(segment_only_json) + try: + fds = FileDataSource.factory(paths = [ path1, path2 ])(Config(), store, ready) + fds.start() + assert len(store.all(FEATURES, lambda x: x)) == 1 + assert len(store.all(SEGMENTS, lambda x: x)) == 1 + finally: + os.remove(path1) + os.remove(path2) + +def test_does_not_allow_duplicate_keys(): + path1 = make_temp_file(flag_only_json) + path2 = make_temp_file(flag_only_json) + try: + fds = FileDataSource.factory(paths = [ path1, path2 ])(Config(), store, ready) + fds.start() + assert len(store.all(FEATURES, lambda x: x)) == 0 + finally: + os.remove(path1) + os.remove(path2) + +def test_does_not_reload_modified_file_if_auto_update_is_off(): + path = make_temp_file(flag_only_json) + try: + fds = FileDataSource.factory(paths = path)(Config(), store, ready) + fds.start() + assert len(store.all(SEGMENTS, lambda x: x)) == 0 + time.sleep(0.5) + replace_file(path, segment_only_json) + time.sleep(0.5) + assert len(store.all(SEGMENTS, lambda x: x)) == 0 + finally: + os.remove(path) + +def test_evaluates_full_flag_with_client_as_expected(): + path = make_temp_file(all_properties_json) + try: + fds = FileDataSource.factory(paths = path) + client = LDClient(config=Config(update_processor_class = fds, send_events = False)) + value = client.variation('flag1', { 'key': 'user' }, '') + assert value == 'on' + finally: + os.remove(path) + +def test_evaluates_simplified_flag_with_client_as_expected(): + path = make_temp_file(all_properties_json) + try: + fds = FileDataSource.factory(paths = path) + client = LDClient(config=Config(update_processor_class = fds, send_events = False)) + value = client.variation('flag2', { 'key': 'user' }, '') + assert value == 'value2' + finally: + os.remove(path) From 850837d72794b6e5e175590304fe844b808213d8 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 1 Nov 2018 15:53:56 -0700 Subject: [PATCH 005/356] rm debugging --- ldclient/file_data_source.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index d82930d9..5ebb062d 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -42,7 +42,6 @@ def initialized(self): def _load_all(self): all_data = { FEATURES: {}, SEGMENTS: {} } - print "Loading: %s" % self._paths for path in self._paths: try: self._load_file(path, all_data) From aa7684a5181143a0d7c0874c3d01f1b837f4c3b2 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 1 Nov 2018 16:01:26 -0700 Subject: [PATCH 006/356] rm debugging --- ldclient/file_data_source.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index 5ebb062d..a8351ba6 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -49,7 +49,6 @@ def _load_all(self): log.error('Unable to load flag data from "%s": %s' % (path, repr(e))) traceback.print_exc() return - print "Initing: %s" % all_data self._store.init(all_data) self._inited = True @@ -68,11 +67,9 @@ def _load_file(self, path, all_data): def _parse_content(self, content): if have_yaml: if content.strip().startswith("{"): - print("json: %s" % content) return json.loads(content) else: return yaml.load(content) - print("json: %s" % content) return json.loads(content) def _add_item(self, all_data, kind, item): From 39c90424302e934502db4cff01e9f9de96cd2e65 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 1 Nov 2018 16:30:34 -0700 Subject: [PATCH 007/356] Python 3 compatibility fix --- testing/test_file_data_source.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index de4d9393..9b4a2c7b 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -1,6 +1,7 @@ import json import os import pytest +import six import tempfile import threading import time @@ -93,7 +94,7 @@ def teardown_function(): def make_temp_file(content): f, path = tempfile.mkstemp() - os.write(f, content) + os.write(f, six.b(content)) os.close(f) return path From a43bf0c56789f26f80199e260ca04c4b9cb6b918 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 2 Nov 2018 13:14:55 -0700 Subject: [PATCH 008/356] add file watching, update documentation and tests --- ldclient/file_data_source.py | 194 ++++++++++++++++++++++++++++++- test-requirements.txt | 1 + testing/test_file_data_source.py | 76 +++++++----- 3 files changed, 239 insertions(+), 32 deletions(-) diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index a8351ba6..09afa263 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -1,4 +1,5 @@ import json +import os import six import traceback @@ -9,7 +10,17 @@ except ImportError: pass +have_watchdog = False +try: + import watchdog + import watchdog.events + import watchdog.observers + have_watchdog = True +except ImportError: + pass + from ldclient.interfaces import UpdateProcessor +from ldclient.repeating_timer import RepeatingTimer from ldclient.util import log from ldclient.versioned_data_kind import FEATURES, SEGMENTS @@ -17,6 +28,101 @@ class FileDataSource(UpdateProcessor): @classmethod def factory(cls, **kwargs): + """Provides a way to use local files as a source of feature flag state. This would typically be + used in a test environment, to operate using a predetermined feature flag state without an + actual LaunchDarkly connection. + + To use this component, call `FileDataSource.factory`, and store its return value in the + `update_processor_class` property of your LaunchDarkly client configuration. In the options + to `factory`, set `paths` to the file path(s) of your data file(s): + :: + + factory = FileDataSource.factory(paths: [ myFilePath ]) + config = Config(update_processor_class = factory) + + This will cause the client not to connect to LaunchDarkly to get feature flags. The + client may still make network connections to send analytics events, unless you have disabled + this with Config.send_events or Config.offline. + + Flag data files can be either JSON or YAML (in order to use YAML, you must install the 'pyyaml' + package). They contain an object with three possible properties: + + * "flags": Feature flag definitions. + * "flagValues": Simplified feature flags that contain only a value. + * "segments": User segment definitions. + + The format of the data in "flags" and "segments" is defined by the LaunchDarkly application + and is subject to change. Rather than trying to construct these objects yourself, it is simpler + to request existing flags directly from the LaunchDarkly server in JSON format, and use this + output as the starting point for your file. In Linux you would do this: + :: + + curl -H "Authorization: {your sdk key}" https://app.launchdarkly.com/sdk/latest-all + + The output will look something like this (but with many more properties): + :: + + { + "flags": { + "flag-key-1": { + "key": "flag-key-1", + "on": true, + "variations": [ "a", "b" ] + } + }, + "segments": { + "segment-key-1": { + "key": "segment-key-1", + "includes": [ "user-key-1" ] + } + } + } + + Data in this format allows the SDK to exactly duplicate all the kinds of flag behavior supported + by LaunchDarkly. However, in many cases you will not need this complexity, but will just want to + set specific flag keys to specific values. For that, you can use a much simpler format: + :: + + { + "flagValues": { + "my-string-flag-key": "value-1", + "my-boolean-flag-key": true, + "my-integer-flag-key": 3 + } + } + + Or, in YAML: + :: + + flagValues: + my-string-flag-key: "value-1" + my-boolean-flag-key: true + my-integer-flag-key: 1 + + It is also possible to specify both "flags" and "flagValues", if you want some flags + to have simple values and others to have complex behavior. However, it is an error to use the + same flag key or segment key more than once, either in a single file or across multiple files. + + If the data source encounters any error in any file-- malformed content, a missing file, or a + duplicate key-- it will not load flags from any of the files. + + :param kwargs: + See below + + :Keyword arguments: + * **paths** (array): The paths of the source files for loading flag data. These may be absolute paths + or relative to the current working directory. Files will be parsed as JSON unless the 'pyyaml' + package is installed, in which case YAML is also allowed. + * **auto_update** (boolean): True if the data source should watch for changes to the source file(s) + and reload flags whenever there is a change. The default implementation of this feature is based on + polling the filesystem, which may not perform well; if you install the 'watchdog' package (not + included by default, to avoid adding unwanted dependencies to the SDK), its native file watching + mechanism will be used instead. Note that auto-updating will only work if all of the files you + specified have valid directory paths at startup time. + * **poll_interval** (float): The minimum interval, in seconds, between checks for file modifications - + used only if auto_update is true, and if the native file-watching mechanism from 'watchdog' is not + being used. The default value is 1 second. + """ return lambda config, store, ready : FileDataSource(store, kwargs, ready) def __init__(self, store, options, ready): @@ -26,16 +132,25 @@ def __init__(self, store, options, ready): self._paths = options.get('paths', []) if isinstance(self._paths, six.string_types): self._paths = [ self._paths ] - + self._auto_update = options.get('auto_update', False) + self._poll_interval = options.get('poll_interval', 1) + self._force_polling = options.get('force_polling', False) # used only in tests + def start(self): self._load_all() + if self._auto_update: + self._auto_updater = self._start_auto_updater() + else: + self._auto_updater = None + # We will signal readiness immediately regardless of whether the file load succeeded or failed - # the difference can be detected by checking initialized() self._ready.set() def stop(self): - pass + if self._auto_updater: + self._auto_updater.stop() def initialized(self): return self._inited @@ -66,10 +181,7 @@ def _load_file(self, path, all_data): def _parse_content(self, content): if have_yaml: - if content.strip().startswith("{"): - return json.loads(content) - else: - return yaml.load(content) + return yaml.load(content) # pyyaml correctly parses JSON too return json.loads(content) def _add_item(self, all_data, kind, item): @@ -89,3 +201,73 @@ def _make_flag_with_value(self, key, value): }, 'variations': [ value ] } + + def _start_auto_updater(self): + resolved_paths = [] + for path in self._paths: + try: + resolved_paths.append(os.path.realpath(path)) + except: + log.warn('Cannot watch for changes to data file "%s" because it is an invalid path' % path) + if have_watchdog and not self._force_polling: + return FileDataSource.WatchdogAutoUpdater(resolved_paths, self._load_all) + else: + return FileDataSource.PollingAutoUpdater(resolved_paths, self._load_all, self._poll_interval) + + # Watch for changes to data files using the watchdog package. This uses native OS filesystem notifications + # if available for the current platform. + class WatchdogAutoUpdater(object): + def __init__(self, resolved_paths, reloader): + watched_files = set(resolved_paths) + + class LDWatchdogHandler(watchdog.events.FileSystemEventHandler): + def on_any_event(self, event): + if event.src_path in watched_files: + reloader() + + dir_paths = set() + for path in resolved_paths: + dir_paths.add(os.path.dirname(path)) + + self._observer = watchdog.observers.Observer() + handler = LDWatchdogHandler() + for path in dir_paths: + self._observer.schedule(handler, path) + self._observer.start() + + def stop(self): + self._observer.stop() + self._observer.join() + + # Watch for changes to data files by polling their modification times. This is used if auto-update is + # on but the watchdog package is not installed. + class PollingAutoUpdater(object): + def __init__(self, resolved_paths, reloader, interval): + self._paths = resolved_paths + self._reloader = reloader + self._file_times = self._check_file_times() + self._timer = RepeatingTimer(interval, self._poll) + self._timer.start() + + def stop(self): + self._timer.stop() + + def _poll(self): + new_times = self._check_file_times() + changed = False + for file_path, file_time in six.iteritems(self._file_times): + if new_times.get(file_path) is not None and new_times.get(file_path) != file_time: + changed = True + break + self._file_times = new_times + if changed: + self._reloader() + + def _check_file_times(self): + ret = {} + for path in self._paths: + try: + ret[path] = os.path.getmtime(path) + except: + ret[path] = None + return ret diff --git a/test-requirements.txt b/test-requirements.txt index 1aa5903e..413ef355 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -6,3 +6,4 @@ pytest-capturelog>=0.7 pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 pyyaml>=3.0 +watchdog>=0.9 diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 9b4a2c7b..4fa16bff 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -78,19 +78,19 @@ } ''' -fds = None +data_source = None store = None ready = None def setup_function(): - global fds, store, ready + global data_source, store, ready store = InMemoryFeatureStore() ready = threading.Event() def teardown_function(): - if fds is not None: - fds.stop() + if data_source is not None: + data_source.stop() def make_temp_file(content): f, path = tempfile.mkstemp() @@ -105,9 +105,9 @@ def replace_file(path, content): def test_does_not_load_data_prior_to_start(): path = make_temp_file('{"flagValues":{"key":"value"}}') try: - fds = FileDataSource.factory(paths = path)(Config(), store, ready) + data_source = FileDataSource.factory(paths = path)(Config(), store, ready) assert ready.is_set() is False - assert fds.initialized() is False + assert data_source.initialized() is False assert store.initialized is False finally: os.remove(path) @@ -115,8 +115,8 @@ def test_does_not_load_data_prior_to_start(): def test_loads_flags_on_start_from_json(): path = make_temp_file(all_properties_json) try: - fds = FileDataSource.factory(paths = path)(Config(), store, ready) - fds.start() + data_source = FileDataSource.factory(paths = path)(Config(), store, ready) + data_source.start() assert store.initialized is True assert sorted(list(store.all(FEATURES, lambda x: x).keys())) == all_flag_keys finally: @@ -125,8 +125,8 @@ def test_loads_flags_on_start_from_json(): def test_loads_flags_on_start_from_yaml(): path = make_temp_file(all_properties_yaml) try: - fds = FileDataSource.factory(paths = path)(Config(), store, ready) - fds.start() + data_source = FileDataSource.factory(paths = path)(Config(), store, ready) + data_source.start() assert store.initialized is True assert sorted(list(store.all(FEATURES, lambda x: x).keys())) == all_flag_keys finally: @@ -135,26 +135,26 @@ def test_loads_flags_on_start_from_yaml(): def test_sets_ready_event_and_initialized_on_successful_load(): path = make_temp_file(all_properties_json) try: - fds = FileDataSource.factory(paths = path)(Config(), store, ready) - fds.start() - assert fds.initialized() is True + data_source = FileDataSource.factory(paths = path)(Config(), store, ready) + data_source.start() + assert data_source.initialized() is True assert ready.is_set() is True finally: os.remove(path) def test_sets_ready_event_and_does_not_set_initialized_on_unsuccessful_load(): bad_file_path = 'no-such-file' - fds = FileDataSource.factory(paths = bad_file_path)(Config(), store, ready) - fds.start() - assert fds.initialized() is False + data_source = FileDataSource.factory(paths = bad_file_path)(Config(), store, ready) + data_source.start() + assert data_source.initialized() is False assert ready.is_set() is True def test_can_load_multiple_files(): path1 = make_temp_file(flag_only_json) path2 = make_temp_file(segment_only_json) try: - fds = FileDataSource.factory(paths = [ path1, path2 ])(Config(), store, ready) - fds.start() + data_source = FileDataSource.factory(paths = [ path1, path2 ])(Config(), store, ready) + data_source.start() assert len(store.all(FEATURES, lambda x: x)) == 1 assert len(store.all(SEGMENTS, lambda x: x)) == 1 finally: @@ -165,8 +165,8 @@ def test_does_not_allow_duplicate_keys(): path1 = make_temp_file(flag_only_json) path2 = make_temp_file(flag_only_json) try: - fds = FileDataSource.factory(paths = [ path1, path2 ])(Config(), store, ready) - fds.start() + data_source = FileDataSource.factory(paths = [ path1, path2 ])(Config(), store, ready) + data_source.start() assert len(store.all(FEATURES, lambda x: x)) == 0 finally: os.remove(path1) @@ -175,8 +175,8 @@ def test_does_not_allow_duplicate_keys(): def test_does_not_reload_modified_file_if_auto_update_is_off(): path = make_temp_file(flag_only_json) try: - fds = FileDataSource.factory(paths = path)(Config(), store, ready) - fds.start() + data_source = FileDataSource.factory(paths = path)(Config(), store, ready) + data_source.start() assert len(store.all(SEGMENTS, lambda x: x)) == 0 time.sleep(0.5) replace_file(path, segment_only_json) @@ -185,22 +185,46 @@ def test_does_not_reload_modified_file_if_auto_update_is_off(): finally: os.remove(path) +def do_auto_update_test(options): + path = make_temp_file(flag_only_json) + options['paths'] = path + try: + data_source = FileDataSource.factory(**options)(Config(), store, ready) + data_source.start() + assert len(store.all(SEGMENTS, lambda x: x)) == 0 + time.sleep(0.5) + replace_file(path, segment_only_json) + time.sleep(0.5) + assert len(store.all(SEGMENTS, lambda x: x)) == 1 + finally: + os.remove(path) + +def test_reloads_modified_file_if_auto_update_is_on(): + do_auto_update_test({ 'auto_update': True }) + +def test_reloads_modified_file_in_polling_mode(): + do_auto_update_test({ 'auto_update': True, 'force_polling': True, 'poll_interval': 0.1 }) + def test_evaluates_full_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: - fds = FileDataSource.factory(paths = path) - client = LDClient(config=Config(update_processor_class = fds, send_events = False)) + data_source = FileDataSource.factory(paths = path) + client = LDClient(config=Config(update_processor_class = data_source, send_events = False)) value = client.variation('flag1', { 'key': 'user' }, '') assert value == 'on' finally: os.remove(path) + if client is not None: + client.close() def test_evaluates_simplified_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: - fds = FileDataSource.factory(paths = path) - client = LDClient(config=Config(update_processor_class = fds, send_events = False)) + data_source = FileDataSource.factory(paths = path) + client = LDClient(config=Config(update_processor_class = data_source, send_events = False)) value = client.variation('flag2', { 'key': 'user' }, '') assert value == 'value2' finally: os.remove(path) + if client is not None: + client.close() From 2cea73061eaba3d4d7ac812e9fbf9fffb7de5712 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 2 Nov 2018 13:19:29 -0700 Subject: [PATCH 009/356] readme --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 57aac968..edef13e6 100644 --- a/README.md +++ b/README.md @@ -78,6 +78,10 @@ Supported Python versions ---------- The SDK is tested with the most recent patch releases of Python 2.7, 3.3, 3.4, 3.5, and 3.6. Python 2.6 is no longer supported. +Using flag data from a file +--------------------------- +For testing purposes, the SDK can be made to read feature flag state from a file or files instead of connecting to LaunchDarkly. See [`file_data_source.py`](https://github.com/launchdarkly/python-client/blob/master/ldclient/file_data_source.py) for more details. + Learn more ----------- From dcf1afe6f7f1fd1535450a36fd26af18afd5c6af Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 2 Nov 2018 13:20:42 -0700 Subject: [PATCH 010/356] debugging --- ldclient/file_data_source.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index 09afa263..79d9655f 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -218,10 +218,12 @@ def _start_auto_updater(self): # if available for the current platform. class WatchdogAutoUpdater(object): def __init__(self, resolved_paths, reloader): + print("*** all paths: %s" % resolved_paths) watched_files = set(resolved_paths) class LDWatchdogHandler(watchdog.events.FileSystemEventHandler): def on_any_event(self, event): + print("*** got event: %s" % event.src_path) if event.src_path in watched_files: reloader() @@ -232,6 +234,7 @@ def on_any_event(self, event): self._observer = watchdog.observers.Observer() handler = LDWatchdogHandler() for path in dir_paths: + print("*** watching: %s" % path) self._observer.schedule(handler, path) self._observer.start() From 4e98fdd3f3c0e0ecfbf608643ad17268c92925fa Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 2 Nov 2018 13:35:03 -0700 Subject: [PATCH 011/356] debugging --- testing/test_file_data_source.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 4fa16bff..7e565c17 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -194,8 +194,14 @@ def do_auto_update_test(options): assert len(store.all(SEGMENTS, lambda x: x)) == 0 time.sleep(0.5) replace_file(path, segment_only_json) - time.sleep(0.5) - assert len(store.all(SEGMENTS, lambda x: x)) == 1 + print("*** modified file %s" % path) + deadline = time.time() + 10 + while time.time() < deadline: + time.sleep(0.1) + if len(store.all(SEGMENTS, lambda x: x)) == 1: + return + print("*** checked") + assert False, "Flags were not reloaded after 10 seconds" finally: os.remove(path) From 8f3c2217805da177d412a6a5543982ad3e118ca6 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 2 Nov 2018 13:41:52 -0700 Subject: [PATCH 012/356] debugging --- ldclient/file_data_source.py | 2 ++ testing/test_file_data_source.py | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index 79d9655f..c1be6974 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -239,6 +239,7 @@ def on_any_event(self, event): self._observer.start() def stop(self): + print("*** stopping observer") self._observer.stop() self._observer.join() @@ -253,6 +254,7 @@ def __init__(self, resolved_paths, reloader, interval): self._timer.start() def stop(self): + print("*** stopping polling") self._timer.stop() def _poll(self): diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 7e565c17..e62fff62 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -199,16 +199,19 @@ def do_auto_update_test(options): while time.time() < deadline: time.sleep(0.1) if len(store.all(SEGMENTS, lambda x: x)) == 1: + print("*** success on %s" % path) return - print("*** checked") + print("*** checked %s" % path) assert False, "Flags were not reloaded after 10 seconds" finally: os.remove(path) def test_reloads_modified_file_if_auto_update_is_on(): + print("*** with watchdog") do_auto_update_test({ 'auto_update': True }) def test_reloads_modified_file_in_polling_mode(): + print("*** with polling") do_auto_update_test({ 'auto_update': True, 'force_polling': True, 'poll_interval': 0.1 }) def test_evaluates_full_flag_with_client_as_expected(): From 84276ddc908a1de2fae9922aaaf538d5eac560a1 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 2 Nov 2018 13:48:26 -0700 Subject: [PATCH 013/356] fix cleanup logic --- ldclient/file_data_source.py | 3 +- testing/test_file_data_source.py | 53 +++++++++++++++++--------------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index c1be6974..0b51cfdd 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -133,6 +133,7 @@ def __init__(self, store, options, ready): if isinstance(self._paths, six.string_types): self._paths = [ self._paths ] self._auto_update = options.get('auto_update', False) + self._auto_updater = None self._poll_interval = options.get('poll_interval', 1) self._force_polling = options.get('force_polling', False) # used only in tests @@ -141,8 +142,6 @@ def start(self): if self._auto_update: self._auto_updater = self._start_auto_updater() - else: - self._auto_updater = None # We will signal readiness immediately regardless of whether the file load succeeded or failed - # the difference can be detected by checking initialized() diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index e62fff62..8a8f5d5a 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -92,6 +92,11 @@ def teardown_function(): if data_source is not None: data_source.stop() +def make_data_source(**kwargs): + global data_source + data_source = FileDataSource.factory(**kwargs)(Config(), store, ready) + return data_source + def make_temp_file(content): f, path = tempfile.mkstemp() os.write(f, six.b(content)) @@ -105,9 +110,9 @@ def replace_file(path, content): def test_does_not_load_data_prior_to_start(): path = make_temp_file('{"flagValues":{"key":"value"}}') try: - data_source = FileDataSource.factory(paths = path)(Config(), store, ready) + source = make_data_source(paths = path) assert ready.is_set() is False - assert data_source.initialized() is False + assert source.initialized() is False assert store.initialized is False finally: os.remove(path) @@ -115,8 +120,8 @@ def test_does_not_load_data_prior_to_start(): def test_loads_flags_on_start_from_json(): path = make_temp_file(all_properties_json) try: - data_source = FileDataSource.factory(paths = path)(Config(), store, ready) - data_source.start() + source = make_data_source(paths = path) + source.start() assert store.initialized is True assert sorted(list(store.all(FEATURES, lambda x: x).keys())) == all_flag_keys finally: @@ -125,8 +130,8 @@ def test_loads_flags_on_start_from_json(): def test_loads_flags_on_start_from_yaml(): path = make_temp_file(all_properties_yaml) try: - data_source = FileDataSource.factory(paths = path)(Config(), store, ready) - data_source.start() + source = make_data_source(paths = path) + source.start() assert store.initialized is True assert sorted(list(store.all(FEATURES, lambda x: x).keys())) == all_flag_keys finally: @@ -135,26 +140,26 @@ def test_loads_flags_on_start_from_yaml(): def test_sets_ready_event_and_initialized_on_successful_load(): path = make_temp_file(all_properties_json) try: - data_source = FileDataSource.factory(paths = path)(Config(), store, ready) - data_source.start() - assert data_source.initialized() is True + source = make_data_source(paths = path) + source.start() + assert source.initialized() is True assert ready.is_set() is True finally: os.remove(path) def test_sets_ready_event_and_does_not_set_initialized_on_unsuccessful_load(): bad_file_path = 'no-such-file' - data_source = FileDataSource.factory(paths = bad_file_path)(Config(), store, ready) - data_source.start() - assert data_source.initialized() is False + source = make_data_source(paths = bad_file_path) + source.start() + assert source.initialized() is False assert ready.is_set() is True def test_can_load_multiple_files(): path1 = make_temp_file(flag_only_json) path2 = make_temp_file(segment_only_json) try: - data_source = FileDataSource.factory(paths = [ path1, path2 ])(Config(), store, ready) - data_source.start() + source = make_data_source(paths = [ path1, path2 ]) + source.start() assert len(store.all(FEATURES, lambda x: x)) == 1 assert len(store.all(SEGMENTS, lambda x: x)) == 1 finally: @@ -165,8 +170,8 @@ def test_does_not_allow_duplicate_keys(): path1 = make_temp_file(flag_only_json) path2 = make_temp_file(flag_only_json) try: - data_source = FileDataSource.factory(paths = [ path1, path2 ])(Config(), store, ready) - data_source.start() + source = make_data_source(paths = [ path1, path2 ]) + source.start() assert len(store.all(FEATURES, lambda x: x)) == 0 finally: os.remove(path1) @@ -175,8 +180,8 @@ def test_does_not_allow_duplicate_keys(): def test_does_not_reload_modified_file_if_auto_update_is_off(): path = make_temp_file(flag_only_json) try: - data_source = FileDataSource.factory(paths = path)(Config(), store, ready) - data_source.start() + source = make_data_source(paths = path) + source.start() assert len(store.all(SEGMENTS, lambda x: x)) == 0 time.sleep(0.5) replace_file(path, segment_only_json) @@ -189,8 +194,8 @@ def do_auto_update_test(options): path = make_temp_file(flag_only_json) options['paths'] = path try: - data_source = FileDataSource.factory(**options)(Config(), store, ready) - data_source.start() + source = make_data_source(**options) + source.start() assert len(store.all(SEGMENTS, lambda x: x)) == 0 time.sleep(0.5) replace_file(path, segment_only_json) @@ -217,8 +222,8 @@ def test_reloads_modified_file_in_polling_mode(): def test_evaluates_full_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: - data_source = FileDataSource.factory(paths = path) - client = LDClient(config=Config(update_processor_class = data_source, send_events = False)) + factory = FileDataSource.factory(paths = path) + client = LDClient(config=Config(update_processor_class = factory, send_events = False)) value = client.variation('flag1', { 'key': 'user' }, '') assert value == 'on' finally: @@ -229,8 +234,8 @@ def test_evaluates_full_flag_with_client_as_expected(): def test_evaluates_simplified_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: - data_source = FileDataSource.factory(paths = path) - client = LDClient(config=Config(update_processor_class = data_source, send_events = False)) + factory = FileDataSource.factory(paths = path) + client = LDClient(config=Config(update_processor_class = factory, send_events = False)) value = client.variation('flag2', { 'key': 'user' }, '') assert value == 'value2' finally: From 2a822e6e82a1e8dffcdfd59d183d43219dff391c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 2 Nov 2018 13:50:31 -0700 Subject: [PATCH 014/356] rm debugging --- ldclient/file_data_source.py | 5 ----- testing/test_file_data_source.py | 5 ----- 2 files changed, 10 deletions(-) diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index 0b51cfdd..c4013a52 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -217,12 +217,10 @@ def _start_auto_updater(self): # if available for the current platform. class WatchdogAutoUpdater(object): def __init__(self, resolved_paths, reloader): - print("*** all paths: %s" % resolved_paths) watched_files = set(resolved_paths) class LDWatchdogHandler(watchdog.events.FileSystemEventHandler): def on_any_event(self, event): - print("*** got event: %s" % event.src_path) if event.src_path in watched_files: reloader() @@ -233,12 +231,10 @@ def on_any_event(self, event): self._observer = watchdog.observers.Observer() handler = LDWatchdogHandler() for path in dir_paths: - print("*** watching: %s" % path) self._observer.schedule(handler, path) self._observer.start() def stop(self): - print("*** stopping observer") self._observer.stop() self._observer.join() @@ -253,7 +249,6 @@ def __init__(self, resolved_paths, reloader, interval): self._timer.start() def stop(self): - print("*** stopping polling") self._timer.stop() def _poll(self): diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 8a8f5d5a..68d1e5b7 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -199,24 +199,19 @@ def do_auto_update_test(options): assert len(store.all(SEGMENTS, lambda x: x)) == 0 time.sleep(0.5) replace_file(path, segment_only_json) - print("*** modified file %s" % path) deadline = time.time() + 10 while time.time() < deadline: time.sleep(0.1) if len(store.all(SEGMENTS, lambda x: x)) == 1: - print("*** success on %s" % path) return - print("*** checked %s" % path) assert False, "Flags were not reloaded after 10 seconds" finally: os.remove(path) def test_reloads_modified_file_if_auto_update_is_on(): - print("*** with watchdog") do_auto_update_test({ 'auto_update': True }) def test_reloads_modified_file_in_polling_mode(): - print("*** with polling") do_auto_update_test({ 'auto_update': True, 'force_polling': True, 'poll_interval': 0.1 }) def test_evaluates_full_flag_with_client_as_expected(): From ac5e8de65036434ee0be93dee64c7179a9200b50 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 13 Nov 2018 20:39:44 -0800 Subject: [PATCH 015/356] typo in comment --- ldclient/file_data_source.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index c4013a52..ebff765b 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -37,7 +37,7 @@ def factory(cls, **kwargs): to `factory`, set `paths` to the file path(s) of your data file(s): :: - factory = FileDataSource.factory(paths: [ myFilePath ]) + factory = FileDataSource.factory(paths = [ myFilePath ]) config = Config(update_processor_class = factory) This will cause the client not to connect to LaunchDarkly to get feature flags. The From 040ced945495c896db7e6eb0a5f259710f2e7113 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Sat, 29 Dec 2018 13:27:13 -0800 Subject: [PATCH 016/356] add feature store wrapper class and make Redis feature store use it --- ldclient/feature_store.py | 47 +++++ ldclient/feature_store_helpers.py | 103 +++++++++ ldclient/integrations.py | 31 +++ ldclient/interfaces.py | 145 +++++++++++-- ldclient/redis_feature_store.py | 124 +++++------ testing/test_feature_store.py | 13 +- testing/test_feature_store_helpers.py | 287 ++++++++++++++++++++++++++ 7 files changed, 649 insertions(+), 101 deletions(-) create mode 100644 ldclient/feature_store_helpers.py create mode 100644 ldclient/integrations.py create mode 100644 testing/test_feature_store_helpers.py diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index 155743ea..e4d2f667 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -4,6 +4,53 @@ from ldclient.rwlock import ReadWriteLock +class CacheConfig: + """Encapsulates caching parameters for feature store implementations that support local caching. + """ + + DEFAULT_EXPIRATION = 15 + DEFAULT_CAPACITY = 1000 + + def __init__(self, + expiration = DEFAULT_EXPIRATION, + capacity = DEFAULT_CAPACITY): + """Constructs an instance of CacheConfig. + :param float expiration: The cache TTL, in seconds. Items will be evicted from the cache after + this amount of time from the time when they were originally cached. If the time is less than or + equal to zero, caching is disabled. + :param int capacity: The maximum number of items that can be in the cache at a time. + """ + self._expiration = expiration + self._capacity = capacity + + @staticmethod + def default(): + """Returns an instance of CacheConfig with default properties. By default, caching is enabled. + This is the same as calling the constructor with no parameters. + :rtype: CacheConfig + """ + return CacheConfig() + + @staticmethod + def disabled(): + """Returns an instance of CacheConfig specifying that caching should be disabled. + :rtype: CacheConfig + """ + return CacheConfig(expiration = 0) + + @property + def enabled(self): + return self._expiration > 0 + + @property + def expiration(self): + return self._expiration + + @property + def capacity(self): + return self._capacity + + class InMemoryFeatureStore(FeatureStore): """ In-memory implementation of a store that holds feature flags and related data received from the streaming API. diff --git a/ldclient/feature_store_helpers.py b/ldclient/feature_store_helpers.py new file mode 100644 index 00000000..d8359274 --- /dev/null +++ b/ldclient/feature_store_helpers.py @@ -0,0 +1,103 @@ +from expiringdict import ExpiringDict + +from ldclient.interfaces import FeatureStore + + +class CachingStoreWrapper(FeatureStore): + """CachingStoreWrapper is a partial implementation of :class:ldclient.interfaces.FeatureStore that + delegates the basic functionality to an implementation of :class:ldclient.interfaces.FeatureStoreCore - + while adding optional caching behavior and other logic that would otherwise be repeated in every + feature store implementation. This makes it easier to create new database integrations by implementing + only the database-specific logic. + """ + __INITED_CACHE_KEY__ = "$inited" + + def __init__(self, core, cache_config): + self._core = core + if cache_config.enabled: + self._cache = ExpiringDict(max_len=cache_config.capacity, max_age_seconds=cache_config.expiration) + else: + self._cache = None + self._inited = False + + def init(self, all_data): + self._core.init_internal(all_data) + if self._cache is not None: + self._cache.clear() + for kind, items in all_data.items(): + self._cache[self._all_cache_key(kind)] = self._items_if_not_deleted(items) + for key, item in items.items(): + self._cache[self._item_cache_key(kind, key)] = [item] # note array wrapper + self._inited = True + + def get(self, kind, key, callback=lambda x: x): + if self._cache is not None: + cache_key = self._item_cache_key(kind, key) + cached_item = self._cache.get(cache_key) + # note, cached items are wrapped in an array so we can cache None values + if cached_item is not None: + return callback(self._item_if_not_deleted(cached_item[0])) + item = self._core.get_internal(kind, key) + if self._cache is not None: + self._cache[cache_key] = [item] + return callback(self._item_if_not_deleted(item)) + + def all(self, kind, callback): + if self._cache is not None: + cache_key = self._all_cache_key(kind) + cached_items = self._cache.get(cache_key) + if cached_items is not None: + return callback(cached_items) + items = self._items_if_not_deleted(self._core.get_all_internal(kind)) + if self._cache is not None: + self._cache[cache_key] = items + return callback(items) + + def delete(self, kind, key, version): + deleted_item = { "key": key, "version": version, "deleted": True } + self.upsert(kind, deleted_item) + + def upsert(self, kind, item): + new_state = self._core.upsert_internal(kind, item) + if self._cache is not None: + self._cache[self._item_cache_key(kind, item.get('key'))] = [new_state] + self._cache.pop(self._all_cache_key(kind), None) + + @property + def initialized(self): + if self._inited: + return True + if self._cache is None: + result = self._core.initialized_internal() + else: + result = self._cache.get(CachingStoreWrapper.__INITED_CACHE_KEY__) + if result is None: + result = self._core.initialized_internal() + self._cache[CachingStoreWrapper.__INITED_CACHE_KEY__] = result + if result: + self._inited = True + return result + + @staticmethod + def _item_cache_key(kind, key): + return "{0}:{1}".format(kind.namespace, key) + + @staticmethod + def _all_cache_key(kind): + return kind.namespace + + @staticmethod + def _item_if_not_deleted(item): + if item is not None and item.get('deleted', False): + return None + return item + + @staticmethod + def _items_if_not_deleted(items): + results = {} + if items is not None: + for key, item in items.items(): + if not item.get('deleted', False): + results[key] = item + return results + \ No newline at end of file diff --git a/ldclient/integrations.py b/ldclient/integrations.py new file mode 100644 index 00000000..a82783be --- /dev/null +++ b/ldclient/integrations.py @@ -0,0 +1,31 @@ +from ldclient.feature_store import CacheConfig +from ldclient.feature_store_helpers import CachingStoreWrapper +from ldclient.redis_feature_store import _RedisFeatureStoreCore + + +class Redis(object): + """Provides factory methods for integrations between the LaunchDarkly SDK and Redis, + """ + DEFAULT_URL = 'redis://localhost:6379/0' + DEFAULT_PREFIX = 'launchdarkly' + DEFAULT_MAX_CONNECTIONS = 16 + + @staticmethod + def new_feature_store(url=Redis.DEFAULT_URL, + prefix=Redis.DEFAULT_PREFIX, + max_connections=Redis.DEFAULT_MAX_CONNECTIONS, + caching=CacheConfig.default()): + """Creates a Redis-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + + :param string url: The URL of the Redis host; defaults to `DEFAULT_URL` + :param string prefix: A namespace prefix to be prepended to all Redis keys; defaults to + `DEFAULT_PREFIX` + :param int max_connections: The maximum number of Redis connections to keep in the + connection pool; defaults to `DEFAULT_MAX_CONNECTIONS` + :param CacheConfig caching: Specifies whether local caching should be enabled and if so, + sets the cache properties; defaults to `CacheConfig.default()` + """ + core = _RedisFeatureStoreCore(url, prefix, max_connections) + wrapper = CachingStoreWrapper(core, caching) + wrapper.core = core # exposed for testing + return wrapper diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 39898408..2710fa25 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -3,64 +3,86 @@ class FeatureStore(object): """ - Stores and retrieves the state of feature flags and related data + A versioned store for feature flags and related objects received from LaunchDarkly. + Implementations should permit concurrent access and updates. + + An "object", for `FeatureStore`, is simply a dict of arbitrary data which must have at least + three properties: "key" (its unique key), "version" (the version number provided by + LaunchDarkly), and "deleted" (True if this is a placeholder for a deleted object). + + Delete and upsert requests are versioned-- if the version number in the request is less than + the currently stored version of the object, the request should be ignored. + + These semantics support the primary use case for the store, which synchronizes a collection + of objects based on update messages that may be received out-of-order. """ __metaclass__ = ABCMeta @abstractmethod def get(self, kind, key, callback): """ - Gets a feature and calls the callback with the feature data to return the result - :param kind: Denotes which collection to access - one of the constants in versioned_data_kind + Retrieves the object to which the specified key is mapped, or None if the key is not found + or the associated object has a "deleted" property of True. The retrieved object, if any (a + dict) can be transformed by the specified callback. + + :param kind: The kind of object to get :type kind: VersionedDataKind - :param key: The key of the object + :param key: The key whose associated object is to be returned :type key: str - :param callback: The function that accepts the retrieved data and returns a transformed value - :type callback: Function that processes the retrieved object once received. - :return: The result of executing callback. + :param callback: A function that accepts the retrieved data and returns a transformed value + :type callback: function + :return: The result of executing callback """ @abstractmethod - def all(self, callback): + def all(self, kind, callback): """ - Returns all feature flags and their data - :param kind: Denotes which collection to access - one of the constants in versioned_data_kind + Retrieves a dictionary of all associated objects of a given kind. The retrieved dict of keys + to objects can be transformed by the specified callback. + + :param kind: The kind of objects to get :type kind: VersionedDataKind - :param callback: The function that accepts the retrieved data and returns a transformed value - :type callback: Function that processes the retrieved objects once received. - :rtype: The result of executing callback. + :param callback: A function that accepts the retrieved data and returns a transformed value + :type callback: function + :rtype: The result of executing callback """ @abstractmethod def init(self, all_data): """ - Initializes the store with a set of objects. Meant to be called by the UpdateProcessor + Initializes (or re-initializes) the store with the specified set of objects. Any existing entries + will be removed. Implementations can assume that this set of objects is up to date-- there is no + need to perform individual version comparisons between the existing objects and the supplied data. - :param all_data: The features and their data as provided by LD + :param all_data: All objects to be stored :type all_data: dict[VersionedDataKind, dict[str, dict]] """ @abstractmethod def delete(self, kind, key, version): """ - Marks an object as deleted + Deletes the object associated with the specified key, if it exists and its version is less than + the specified version. The object should be replaced in the data store by a + placeholder with the specified version and a "deleted" property of TErue. - :param kind: Denotes which collection to access - one of the constants in versioned_data_kind + :param kind: The kind of object to delete :type kind: VersionedDataKind - :param key: The object key + :param key: The key of the object to be deleted :type key: str - :param version: The version of the object to mark as deleted + :param version: The version for the delete operation :type version: int """ @abstractmethod def upsert(self, kind, item): """ - Inserts an object if its version is newer or missing + Updates or inserts the object associated with the specified key. If an item with the same key + already exists, it should update it only if the new item's version property is greater than + the old one. - :param kind: Denotes which collection to access - one of the constants in versioned_data_kind + :param kind: The kind of object to update :type kind: VersionedDataKind - :param item: The object to be inserted or updated - must have key and version properties + :param item: The object to update or insert :type feature: dict """ @@ -73,6 +95,85 @@ def initialized(self): """ +class FeatureStoreCore(object): + """ + `FeatureStoreCore` is an interface for a simplified subset of the functionality of :class:`FeatureStore`, + to be used in conjunction with :class:`feature_store_helpers.CachingStoreWrapper`. This allows developers + developers of custom `FeatureStore` implementations to avoid repeating logic that would + commonly be needed in any such implementation, such as caching. Instead, they can implement + only `FeatureStoreCore` and then create a `CachingStoreWrapper`. + """ + __metaclass__ = ABCMeta + + @abstractmethod + def get_internal(self, kind, key): + """ + Returns the object to which the specified key is mapped, or None if no such item exists. + The method should not attempt to filter out any items based on their deleted property, + nor to cache any items. + + :param kind: The kind of object to get + :type kind: VersionedDataKind + :param key: The key of the object + :type key: str + :return: The object to which the specified key is mapped, or None + :rtype: dict + """ + + @abstractmethod + def get_all_internal(self, callback): + """ + Returns a dictionary of all associated objects of a given kind. The method should not attempt + to filter out any items based on their deleted property, nor to cache any items. + + :param kind: The kind of objects to get + :type kind: VersionedDataKind + :return: A dictionary of keys to items + :rtype: dict[str, dict] + """ + + @abstractmethod + def init_internal(self, all_data): + """ + Initializes (or re-initializes) the store with the specified set of objects. Any existing entries + will be removed. Implementations can assume that this set of objects is up to date-- there is no + need to perform individual version comparisons between the existing objects and the supplied + data. + + :param all_data: A dictionary of data kinds to item collections + :type all_data: dict[VersionedDataKind, dict[str, dict]] + """ + + @abstractmethod + def upsert_internal(self, kind, item): + """ + Updates or inserts the object associated with the specified key. If an item with the same key + already exists, it should update it only if the new item's version property is greater than + the old one. It should return the final state of the item, i.e. if the update succeeded then + it returns the item that was passed in, and if the update failed due to the version check + then it returns the item that is currently in the data store (this ensures that + `CachingStoreWrapper` will update the cache correctly). + + :param kind: The kind of object to update + :type kind: VersionedDataKind + :param item: The object to update or insert + :type item: dict + :return: The state of the object after the update + :rtype: dict + """ + + @abstractmethod + def initialized_internal(self): + """ + Returns true if this store has been initialized. In a shared data store, it should be able to + detect this even if initInternal was called in a different process, i.e. the test should be + based on looking at what is in the data store. The method does not need to worry about caching + this value; `CachingStoreWrapper` will only call it when necessary. + + :rtype: bool + """ + + class BackgroundOperation(object): """ Performs a task in the background diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py index 71b7261b..b9bdf731 100644 --- a/ldclient/redis_feature_store.py +++ b/ldclient/redis_feature_store.py @@ -1,21 +1,20 @@ import json -from pprint import pprint -from expiringdict import ExpiringDict import redis from ldclient import log -from ldclient.interfaces import FeatureStore -from ldclient.memoized_value import MemoizedValue +from ldclient.feature_store import CacheConfig +from ldclient.feature_store_helpers import CachingStoreWrapper +from ldclient.interfaces import FeatureStore, FeatureStoreCore from ldclient.versioned_data_kind import FEATURES -class ForgetfulDict(dict): - def __setitem__(self, key, value): - pass - - class RedisFeatureStore(FeatureStore): + """A Redis-backed implementation of :class:`ldclient.feature_store.FeatureStore`. + + This implementation class is deprecated and may be changed or removed in the future. Please use + :func:`ldclient.integrations.Redis.new_feature_store()`. + """ def __init__(self, url='redis://localhost:6379/0', prefix='launchdarkly', @@ -23,23 +22,42 @@ def __init__(self, expiration=15, capacity=1000): + self.core = _RedisFeatureStoreCore(url, prefix, max_connections) # exposed for testing + self._wrapper = CachingStoreWrapper(self.core, CacheConfig(expiration=expiration, capacity=capacity)) + + def get(self, kind, key, callback = lambda x: x): + return self._wrapper.get(kind, key, callback) + + def all(self, kind, callback): + return self._wrapper.all(kind, callback) + + def init(self, all_data): + return self._wrapper.init(all_data) + + def upsert(self, kind, item): + return self._wrapper.upsert(kind, item) + + def delete(self, kind, key, version): + return self._wrapper.delete(kind, key, version) + + @property + def initialized(self): + return self._wrapper.initialized + + +class _RedisFeatureStoreCore(FeatureStoreCore): + def __init__(self, url, prefix, max_connections): self._prefix = prefix - self._cache = ForgetfulDict() if expiration == 0 else ExpiringDict(max_len=capacity, - max_age_seconds=expiration) self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) - self._inited = MemoizedValue(lambda: self._query_init()) + self.test_update_hook = None # exposed for testing log.info("Started RedisFeatureStore connected to URL: " + url + " using prefix: " + prefix) def _items_key(self, kind): return "{0}:{1}".format(self._prefix, kind.namespace) - def _cache_key(self, kind, key): - return "{0}:{1}".format(kind.namespace, key) - - def init(self, all_data): + def init_internal(self, all_data): pipe = redis.Redis(connection_pool=self._pool).pipeline() - self._cache.clear() all_count = 0 for kind, items in all_data.items(): @@ -48,53 +66,30 @@ def init(self, all_data): for key, item in items.items(): item_json = json.dumps(item) pipe.hset(base_key, key, item_json) - self._cache[self._cache_key(kind, key)] = item all_count = all_count + len(items) - try: - pipe.execute() - except: - self._cache.clear() - raise + pipe.execute() log.info("Initialized RedisFeatureStore with %d items", all_count) - self._inited.set(True) - def all(self, kind, callback): + def get_all_internal(self, kind): r = redis.Redis(connection_pool=self._pool) try: all_items = r.hgetall(self._items_key(kind)) except BaseException as e: log.error("RedisFeatureStore: Could not retrieve '%s' from Redis with error: %s. Returning None.", kind.namespace, e) - return callback(None) + return None if all_items is None or all_items is "": log.warn("RedisFeatureStore: call to get all '%s' returned no results. Returning None.", kind.namespace) - return callback(None) + return None results = {} for key, item_json in all_items.items(): key = key.decode('utf-8') # necessary in Python 3 - item = json.loads(item_json.decode('utf-8')) - if item.get('deleted', False) is False: - results[key] = item - return callback(results) - - def get(self, kind, key, callback=lambda x: x): - item = self._get_even_if_deleted(kind, key, check_cache=True) - if item is not None and item.get('deleted', False) is True: - log.debug("RedisFeatureStore: get returned deleted item %s in '%s'. Returning None.", key, kind.namespace) - return callback(None) - return callback(item) - - def _get_even_if_deleted(self, kind, key, check_cache = True): - cacheKey = self._cache_key(kind, key) - if check_cache: - item = self._cache.get(cacheKey) - if item is not None: - # reset ttl - self._cache[cacheKey] = item - return item + results[key] = json.loads(item_json.decode('utf-8')) + return results + def get_internal(self, kind, key): try: r = redis.Redis(connection_pool=self._pool) item_json = r.hget(self._items_key(kind), key) @@ -107,26 +102,9 @@ def _get_even_if_deleted(self, kind, key, check_cache = True): log.debug("RedisFeatureStore: key %s not found in '%s'. Returning None.", key, kind.namespace) return None - item = json.loads(item_json.decode('utf-8')) - self._cache[cacheKey] = item - return item - - def delete(self, kind, key, version): - deleted_item = { "key": key, "version": version, "deleted": True } - self._update_with_versioning(kind, deleted_item) - - def upsert(self, kind, item): - self._update_with_versioning(kind, item) - - @property - def initialized(self): - return self._inited.get() - - def _query_init(self): - r = redis.Redis(connection_pool=self._pool) - return r.exists(self._items_key(FEATURES)) + return json.loads(item_json.decode('utf-8')) - def _update_with_versioning(self, kind, item): + def upsert_internal(self, kind, item): r = redis.Redis(connection_pool=self._pool) base_key = self._items_key(kind) key = item['key'] @@ -135,14 +113,15 @@ def _update_with_versioning(self, kind, item): while True: pipeline = r.pipeline() pipeline.watch(base_key) - old = self._get_even_if_deleted(kind, key, check_cache=False) - self._before_update_transaction(base_key, key) + old = self.get_internal(kind, key) + if self.test_update_hook is not None: + self.test_update_hook(base_key, key) if old and old['version'] >= item['version']: log.debug('RedisFeatureStore: Attempted to %s key: %s version %d with a version that is the same or older: %d in "%s"', 'delete' if item.get('deleted') else 'update', key, old['version'], item['version'], kind.namespace) pipeline.unwatch() - break + return old else: pipeline.multi() pipeline.hset(base_key, key, item_json) @@ -153,8 +132,11 @@ def _update_with_versioning(self, kind, item): except redis.exceptions.WatchError: log.debug("RedisFeatureStore: concurrent modification detected, retrying") continue - self._cache[self._cache_key(kind, key)] = item - break + return item + + def initialized_internal(self): + r = redis.Redis(connection_pool=self._pool) + return r.exists(self._items_key(FEATURES)) def _before_update_transaction(self, base_key, key): # exposed for testing diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 245341ec..b8696529 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -1,5 +1,4 @@ import json -from mock import patch import pytest import redis @@ -133,8 +132,7 @@ def test_upsert_older_version_after_delete(self, store): class TestRedisFeatureStoreExtraTests: - @patch.object(RedisFeatureStore, '_before_update_transaction') - def test_upsert_race_condition_against_external_client_with_higher_version(self, mock_method): + def test_upsert_race_condition_against_external_client_with_higher_version(self): other_client = redis.StrictRedis(host='localhost', port=6379, db=0) store = RedisFeatureStore() store.init({ FEATURES: {} }) @@ -144,7 +142,7 @@ def hook(base_key, key): if other_version['version'] <= 4: other_client.hset(base_key, key, json.dumps(other_version)) other_version['version'] = other_version['version'] + 1 - mock_method.side_effect = hook + store.core.test_update_hook = hook feature = { u'key': 'flagkey', u'version': 1 } @@ -152,8 +150,7 @@ def hook(base_key, key): result = store.get(FEATURES, 'flagkey', lambda x: x) assert result['version'] == 2 - @patch.object(RedisFeatureStore, '_before_update_transaction') - def test_upsert_race_condition_against_external_client_with_lower_version(self, mock_method): + def test_upsert_race_condition_against_external_client_with_lower_version(self): other_client = redis.StrictRedis(host='localhost', port=6379, db=0) store = RedisFeatureStore() store.init({ FEATURES: {} }) @@ -163,7 +160,7 @@ def hook(base_key, key): if other_version['version'] <= 4: other_client.hset(base_key, key, json.dumps(other_version)) other_version['version'] = other_version['version'] + 1 - mock_method.side_effect = hook + store.core.test_update_hook = hook feature = { u'key': 'flagkey', u'version': 5 } @@ -186,7 +183,7 @@ def test_exception_is_handled_in_all(self, caplog): # This just verifies the fix for a bug that caused an error during exception handling in Python 3 store = RedisFeatureStore(url='redis://bad') all = store.all(FEATURES, lambda x: x) - assert all is None + assert all == {} loglines = get_log_lines(caplog) assert len(loglines) == 2 message = loglines[1].message diff --git a/testing/test_feature_store_helpers.py b/testing/test_feature_store_helpers.py new file mode 100644 index 00000000..01bb245a --- /dev/null +++ b/testing/test_feature_store_helpers.py @@ -0,0 +1,287 @@ +import pytest +from time import sleep + +from ldclient.feature_store import CacheConfig +from ldclient.feature_store_helpers import CachingStoreWrapper +from ldclient.versioned_data_kind import VersionedDataKind + +THINGS = VersionedDataKind(namespace = "things", request_api_path = "", stream_api_path = "") +WRONG_THINGS = VersionedDataKind(namespace = "wrong", request_api_path = "", stream_api_path = "") + +def make_wrapper(core, cached): + return CachingStoreWrapper(core, CacheConfig(expiration=30) if cached else CacheConfig.disabled()) + +class MockCore: + def __init__(self): + self.data = {} + self.inited = False + self.inited_query_count = 0 + + def init_internal(self, all_data): + self.data = {} + for kind, items in all_data.items(): + self.data[kind] = items.copy() + + def get_internal(self, kind, key): + items = self.data.get(kind) + return None if items is None else items.get(key) + + def get_all_internal(self, kind): + return self.data.get(kind) + + def upsert_internal(self, kind, item): + key = item.get('key') + items = self.data.get(kind) + if items is None: + items = {} + self.data[kind] = items + old_item = items.get(key) + if old_item is None or old_item.get('version') < item.get('version'): + items[key] = item + return item + return old_item + + def initialized_internal(self): + self.inited_query_count = self.inited_query_count + 1 + return self.inited + + def force_set(self, kind, item): + items = self.data.get(kind) + if items is None: + items = {} + self.data[kind] = items + items[item.get('key')] = item + + def force_remove(self, kind, key): + items = self.data.get(kind) + if items is not None: + items.pop(key, None) + +class TestCachingStoreWrapper: + @pytest.mark.parametrize("cached", [False, True]) + def test_get_item(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + key = "flag" + itemv1 = { "key": key, "version": 1 } + itemv2 = { "key": key, "version": 2 } + + core.force_set(THINGS, itemv1) + assert wrapper.get(THINGS, key) == itemv1 + + core.force_set(THINGS, itemv2) + assert wrapper.get(THINGS, key) == (itemv1 if cached else itemv2) # if cached, we will not see the new underlying value yet + + @pytest.mark.parametrize("cached", [False, True]) + def test_get_deleted_item(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + key = "flag" + itemv1 = { "key": key, "version": 1, "deleted": True } + itemv2 = { "key": key, "version": 2 } + + core.force_set(THINGS, itemv1) + assert wrapper.get(THINGS, key) is None # item is filtered out because deleted is true + + core.force_set(THINGS, itemv2) + assert wrapper.get(THINGS, key) == (None if cached else itemv2) # if cached, we will not see the new underlying value yet + + @pytest.mark.parametrize("cached", [False, True]) + def test_get_missing_item(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + key = "flag" + item = { "key": key, "version": 1 } + + assert wrapper.get(THINGS, key) is None + + core.force_set(THINGS, item) + assert wrapper.get(THINGS, key) == (None if cached else item) # the cache can retain a nil result + + @pytest.mark.parametrize("cached", [False, True]) + def test_get_with_lambda(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + key = "flag" + item = { "key": key, "version": 1 } + modified_item = { "key": key, "version": 99 } + + core.force_set(THINGS, item) + assert wrapper.get(THINGS, key, lambda x: modified_item) == modified_item + + def test_cached_get_uses_values_from_init(self): + core = MockCore() + wrapper = make_wrapper(core, True) + item1 = { "key": "flag1", "version": 1 } + item2 = { "key": "flag2", "version": 1 } + + wrapper.init({ THINGS: { item1["key"]: item1, item2["key"]: item2 } }) + core.force_remove(THINGS, item1["key"]) + assert wrapper.get(THINGS, item1["key"]) == item1 + + @pytest.mark.parametrize("cached", [False, True]) + def test_get_all(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + item1 = { "key": "flag1", "version": 1 } + item2 = { "key": "flag2", "version": 1 } + + core.force_set(THINGS, item1) + core.force_set(THINGS, item2) + assert wrapper.all(THINGS, lambda x: x) == { item1["key"]: item1, item2["key"]: item2 } + + core.force_remove(THINGS, item2["key"]) + if cached: + assert wrapper.all(THINGS, lambda x: x) == { item1["key"]: item1, item2["key"]: item2 } + else: + assert wrapper.all(THINGS, lambda x: x) == { item1["key"]: item1 } + + @pytest.mark.parametrize("cached", [False, True]) + def test_get_all_removes_deleted_items(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + item1 = { "key": "flag1", "version": 1 } + item2 = { "key": "flag2", "version": 1, "deleted": True } + + core.force_set(THINGS, item1) + core.force_set(THINGS, item2) + assert wrapper.all(THINGS, lambda x: x) == { item1["key"]: item1 } + + @pytest.mark.parametrize("cached", [False, True]) + def test_get_all_changes_None_to_empty_dict(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + + assert wrapper.all(WRONG_THINGS, lambda x:x) == {} + + @pytest.mark.parametrize("cached", [False, True]) + def test_get_all_iwith_lambda(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + extra = { "extra": True } + item1 = { "key": "flag1", "version": 1 } + item2 = { "key": "flag2", "version": 1 } + core.force_set(THINGS, item1) + core.force_set(THINGS, item2) + assert wrapper.all(THINGS, lambda x: dict(x, **extra)) == { + item1["key"]: item1, item2["key"]: item2, "extra": True + } + + def test_cached_get_all_uses_values_from_init(self): + core = MockCore() + wrapper = make_wrapper(core, True) + item1 = { "key": "flag1", "version": 1 } + item2 = { "key": "flag2", "version": 1 } + both = { item1["key"]: item1, item2["key"]: item2 } + + wrapper.init({ THINGS: both }) + core.force_remove(THINGS, item1["key"]) + assert wrapper.all(THINGS, lambda x: x) == both + + @pytest.mark.parametrize("cached", [False, True]) + def test_upsert_successful(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + key = "flag" + itemv1 = { "key": key, "version": 1 } + itemv2 = { "key": key, "version": 2 } + + wrapper.upsert(THINGS, itemv1) + assert core.data[THINGS][key] == itemv1 + + wrapper.upsert(THINGS, itemv2) + assert core.data[THINGS][key] == itemv2 + + # if we have a cache, verify that the new item is now cached by writing a different value + # to the underlying data - Get should still return the cached item + if cached: + itemv3 = { "key": key, "version": 3 } + core.force_set(THINGS, itemv3) + + assert wrapper.get(THINGS, key) == itemv2 + + def test_cached_upsert_unsuccessful(self): + # This is for an upsert where the data in the store has a higher version. In an uncached + # store, this is just a no-op as far as the wrapper is concerned so there's nothing to + # test here. In a cached store, we need to verify that the cache has been refreshed + # using the data that was found in the store. + core = MockCore() + wrapper = make_wrapper(core, True) + key = "flag" + itemv1 = { "key": key, "version": 1 } + itemv2 = { "key": key, "version": 2 } + + wrapper.upsert(THINGS, itemv2) + assert core.data[THINGS][key] == itemv2 + + wrapper.upsert(THINGS, itemv1) + assert core.data[THINGS][key] == itemv2 # value in store remains the same + + itemv3 = { "key": key, "version": 3 } + core.force_set(THINGS, itemv3) # bypasses cache so we can verify that itemv2 is in the cache + assert wrapper.get(THINGS, key) == itemv2 + + @pytest.mark.parametrize("cached", [False, True]) + def test_delete(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + key = "flag" + itemv1 = { "key": key, "version": 1 } + itemv2 = { "key": key, "version": 2, "deleted": True } + itemv3 = { "key": key, "version": 3 } + + core.force_set(THINGS, itemv1) + assert wrapper.get(THINGS, key) == itemv1 + + wrapper.delete(THINGS, key, 2) + assert core.data[THINGS][key] == itemv2 + + core.force_set(THINGS, itemv3) # make a change that bypasses the cache + assert wrapper.get(THINGS, key) == (None if cached else itemv3) + + def test_uncached_initialized_queries_state_only_until_inited(self): + core = MockCore() + wrapper = make_wrapper(core, False) + + assert wrapper.initialized is False + assert core.inited_query_count == 1 + + core.inited = True + assert wrapper.initialized is True + assert core.inited_query_count == 2 + + core.inited = False + assert wrapper.initialized is True + assert core.inited_query_count == 2 + + def test_uncached_initialized_does_not_query_state_if_init_was_called(self): + core = MockCore() + wrapper = make_wrapper(core, False) + + assert wrapper.initialized is False + assert core.inited_query_count == 1 + + wrapper.init({}) + + assert wrapper.initialized is True + assert core.inited_query_count == 1 + + def test_cached_initialized_can_cache_false_result(self): + core = MockCore() + wrapper = CachingStoreWrapper(core, CacheConfig(expiration=0.2)) # use a shorter cache TTL for this test + + assert wrapper.initialized is False + assert core.inited_query_count == 1 + + core.inited = True + assert wrapper.initialized is False + assert core.inited_query_count == 1 + + sleep(0.5) + + assert wrapper.initialized is True + assert core.inited_query_count == 2 + + # From this point on it should remain true and the method should not be called + assert wrapper.initialized is True + assert core.inited_query_count == 2 From 59a67a844b1650eb7a7600a1d44ca120a8f03a72 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Sat, 29 Dec 2018 13:39:42 -0800 Subject: [PATCH 017/356] test the new Redis factory method --- ldclient/integrations.py | 6 +++--- testing/test_feature_store.py | 23 +++++++++++++++++------ 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/ldclient/integrations.py b/ldclient/integrations.py index a82783be..86b5248d 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -11,9 +11,9 @@ class Redis(object): DEFAULT_MAX_CONNECTIONS = 16 @staticmethod - def new_feature_store(url=Redis.DEFAULT_URL, - prefix=Redis.DEFAULT_PREFIX, - max_connections=Redis.DEFAULT_MAX_CONNECTIONS, + def new_feature_store(url='redis://localhost:6379/0', + prefix='launchdarkly', + max_connections=16, caching=CacheConfig.default()): """Creates a Redis-backed implementation of `:class:ldclient.feature_store.FeatureStore`. diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index b8696529..5716fa0e 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -2,7 +2,8 @@ import pytest import redis -from ldclient.feature_store import InMemoryFeatureStore +from ldclient.feature_store import CacheConfig, InMemoryFeatureStore +from ldclient.integrations import Redis from ldclient.redis_feature_store import RedisFeatureStore from ldclient.versioned_data_kind import FEATURES @@ -19,17 +20,27 @@ class TestFeatureStore: redis_host = 'localhost' redis_port = 6379 + def clear_redis_data(self): + r = redis.StrictRedis(host=self.redis_host, port=self.redis_port, db=0) + r.delete("launchdarkly:features") + def in_memory(self): return InMemoryFeatureStore() def redis_with_local_cache(self): - r = redis.StrictRedis(host=self.redis_host, port=self.redis_port, db=0) - r.delete("launchdarkly:features") - return RedisFeatureStore() + self.clear_redis_data() + return Redis.new_feature_store() def redis_no_local_cache(self): - r = redis.StrictRedis(host=self.redis_host, port=self.redis_port, db=0) - r.delete("launchdarkly:features") + self.clear_redis_data() + return Redis.new_feature_store(caching=CacheConfig.disabled()) + + def deprecated_redis_with_local_cache(self): + self.clear_redis_data() + return RedisFeatureStore() + + def deprecated_redis_no_local_cache(self): + self.clear_redis_data() return RedisFeatureStore(expiration=0) params = [in_memory, redis_with_local_cache, redis_no_local_cache] From 1e38ac10afceb7a4b34ada8351e4c9552070f563 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Sat, 29 Dec 2018 15:22:39 -0800 Subject: [PATCH 018/356] add DynamoDB support --- .circleci/config.yml | 6 + dynamodb-requirements.txt | 1 + ldclient/dynamodb_feature_store.py | 191 +++++++++++++++++++++++++++++ ldclient/integrations.py | 25 +++- ldclient/redis_feature_store.py | 11 +- test-requirements.txt | 1 + testing/test_feature_store.py | 134 ++++++++++++++++---- 7 files changed, 345 insertions(+), 24 deletions(-) create mode 100644 dynamodb-requirements.txt create mode 100644 ldclient/dynamodb_feature_store.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 05cb973c..92699a3c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -40,28 +40,34 @@ jobs: docker: - image: circleci/python:2.7-jessie - image: redis + - image: amazon/dynamodb-local test-3.3: <<: *test-template docker: - image: circleci/python:3.3-jessie - image: redis + - image: amazon/dynamodb-local test-3.4: <<: *test-template docker: - image: circleci/python:3.4-jessie - image: redis + - image: amazon/dynamodb-local test-3.5: <<: *test-template docker: - image: circleci/python:3.5-jessie - image: redis + - image: amazon/dynamodb-local test-3.6: <<: *test-template docker: - image: circleci/python:3.6-jessie - image: redis + - image: amazon/dynamodb-local test-3.7: <<: *test-template docker: - image: circleci/python:3.7-stretch - image: redis + - image: amazon/dynamodb-local diff --git a/dynamodb-requirements.txt b/dynamodb-requirements.txt new file mode 100644 index 00000000..b72b66b6 --- /dev/null +++ b/dynamodb-requirements.txt @@ -0,0 +1 @@ +boto3>=1.9.71 diff --git a/ldclient/dynamodb_feature_store.py b/ldclient/dynamodb_feature_store.py new file mode 100644 index 00000000..f3879d71 --- /dev/null +++ b/ldclient/dynamodb_feature_store.py @@ -0,0 +1,191 @@ +import json + +have_dynamodb = False +try: + import boto3 + have_dynamodb = True +except ImportError: + pass + +from ldclient import log +from ldclient.feature_store import CacheConfig +from ldclient.feature_store_helpers import CachingStoreWrapper +from ldclient.interfaces import FeatureStore, FeatureStoreCore + +# +# Internal implementation of the DynamoDB feature store. +# +# Implementation notes: +# +# * Feature flags, segments, and any other kind of entity the LaunchDarkly client may wish +# to store, are all put in the same table. The only two required attributes are "key" (which +# is present in all storeable entities) and "namespace" (a parameter from the client that is +# used to disambiguate between flags and segments). +# +# * Because of DynamoDB's restrictions on attribute values (e.g. empty strings are not +# allowed), the standard DynamoDB marshaling mechanism with one attribute per object property +# is not used. Instead, the entire object is serialized to JSON and stored in a single +# attribute, "item". The "version" property is also stored as a separate attribute since it +# is used for updates. +# +# * Since DynamoDB doesn't have transactions, the init() method - which replaces the entire data +# store - is not atomic, so there can be a race condition if another process is adding new data +# via upsert(). To minimize this, we don't delete all the data at the start; instead, we update +# the items we've received, and then delete all other items. That could potentially result in +# deleting new data from another process, but that would be the case anyway if the init() +# happened to execute later than the upsert(); we are relying on the fact that normally the +# process that did the init() will also receive the new data shortly and do its own upsert(). +# +# * DynamoDB has a maximum item size of 400KB. Since each feature flag or user segment is +# stored as a single item, this mechanism will not work for extremely large flags or segments. +# + +class _DynamoDBFeatureStoreCore(FeatureStoreCore): + PARTITION_KEY = 'namespace' + SORT_KEY = 'key' + VERSION_ATTRIBUTE = 'version' + ITEM_JSON_ATTRIBUTE = 'item' + + def __init__(self, table_name, prefix, dynamodb_opts): + if not have_dynamodb: + raise NotImplementedError("Cannot use DynamoDB feature store because AWS SDK (boto3 package) is not installed") + self._table_name = table_name + self._prefix = None if prefix == "" else prefix + self._client = boto3.client('dynamodb', **dynamodb_opts) + + def init_internal(self, all_data): + # Start by reading the existing keys; we will later delete any of these that weren't in all_data. + unused_old_keys = self._read_existing_keys(all_data.keys()) + requests = [] + num_items = 0 + inited_key = self._inited_key() + + # Insert or update every provided item + for kind, items in all_data.items(): + for key, item in items.items(): + encoded_item = self._marshal_item(kind, item) + requests.append({ 'PutRequest': { 'Item': encoded_item } }) + combined_key = (self._namespace_for_kind(kind), key) + unused_old_keys.discard(combined_key) + num_items = num_items + 1 + + # Now delete any previously existing items whose keys were not in the current data + for combined_key in unused_old_keys: + if combined_key[0] != inited_key: + requests.append({ 'DeleteRequest': { 'Key': self._make_keys(combined_key[0], combined_key[1]) } }) + + # Now set the special key that we check in initialized_internal() + requests.append({ 'PutRequest': { 'Item': self._make_keys(inited_key, inited_key) } }) + + _DynamoDBHelpers.batch_write_requests(self._client, self._table_name, requests) + log.info('Initialized table %s with %d items', self._table_name, num_items) + + def get_internal(self, kind, key): + resp = self._get_item_by_keys(self._namespace_for_kind(kind), key) + return self._unmarshal_item(resp.get('Item')) + + def get_all_internal(self, kind): + items_out = {} + paginator = self._client.get_paginator('query') + for resp in paginator.paginate(**self._make_query_for_kind(kind)): + for item in resp['Items']: + item_out = self._unmarshal_item(item) + items_out[item_out['key']] = item_out + return items_out + + def upsert_internal(self, kind, item): + encoded_item = self._marshal_item(kind, item) + try: + req = { + 'TableName': self._table_name, + 'Item': encoded_item, + 'ConditionExpression': 'attribute_not_exists(#namespace) or attribute_not_exists(#key) or :version > #version', + 'ExpressionAttributeNames': { + '#namespace': self.PARTITION_KEY, + '#key': self.SORT_KEY, + '#version': self.VERSION_ATTRIBUTE + }, + 'ExpressionAttributeValues': { + ':version': { 'N': str(item['version']) } + } + } + self._client.put_item(**req) + except self._client.exceptions.ConditionalCheckFailedException: + # The item was not updated because there's a newer item in the database. We must now + # read the item that's in the database and return it, so CachingStoreWrapper can cache it. + return self.get_internal(kind, item['key']) + return item + + def initialized_internal(self): + resp = self._get_item_by_keys(self._inited_key(), self._inited_key()) + return resp.get('Item') is not None and len(resp['Item']) > 0 + + def _prefixed_namespace(self, base): + return base if self._prefix is None else (self._prefix + ':' + base) + + def _namespace_for_kind(self, kind): + return self._prefixed_namespace(kind.namespace) + + def _inited_key(self): + return self._prefixed_namespace('$inited') + + def _make_keys(self, namespace, key): + return { + self.PARTITION_KEY: { 'S': namespace }, + self.SORT_KEY: { 'S': key } + } + + def _make_query_for_kind(self, kind): + return { + 'TableName': self._table_name, + 'ConsistentRead': True, + 'KeyConditions': { + self.PARTITION_KEY: { + 'AttributeValueList': [ + { 'S': self._namespace_for_kind(kind) } + ], + 'ComparisonOperator': 'EQ' + } + } + } + + def _get_item_by_keys(self, namespace, key): + return self._client.get_item(TableName=self._table_name, Key=self._make_keys(namespace, key)) + + def _read_existing_keys(self, kinds): + keys = set() + for kind in kinds: + req = self._make_query_for_kind(kind) + req['ProjectionExpression'] = '#namespace, #key' + req['ExpressionAttributeNames'] = { + '#namespace': self.PARTITION_KEY, + '#key': self.SORT_KEY + } + paginator = self._client.get_paginator('query') + for resp in paginator.paginate(**req): + for item in resp['Items']: + namespace = item[self.PARTITION_KEY]['S'] + key = item[self.SORT_KEY]['S'] + keys.add((namespace, key)) + return keys + + def _marshal_item(self, kind, item): + json_str = json.dumps(item) + ret = self._make_keys(self._namespace_for_kind(kind), item['key']) + ret[self.VERSION_ATTRIBUTE] = { 'N': str(item['version']) } + ret[self.ITEM_JSON_ATTRIBUTE] = { 'S': json_str } + return ret + + def _unmarshal_item(self, item): + if item is None: + return None + json_attr = item.get(self.ITEM_JSON_ATTRIBUTE) + return None if json_attr is None else json.loads(json_attr['S']) + + +class _DynamoDBHelpers(object): + @staticmethod + def batch_write_requests(client, table_name, requests): + batch_size = 25 + for batch in (requests[i:i+batch_size] for i in xrange(0, len(requests), batch_size)): + client.batch_write_item(RequestItems={ table_name: batch }) diff --git a/ldclient/integrations.py b/ldclient/integrations.py index 86b5248d..80063389 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -1,10 +1,33 @@ from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper +from ldclient.dynamodb_feature_store import _DynamoDBFeatureStoreCore from ldclient.redis_feature_store import _RedisFeatureStoreCore +class DynamoDB(object): + """Provides factory methods for integrations between the LaunchDarkly SDK and DynamoDB. + """ + + @staticmethod + def new_feature_store(table_name, + prefix=None, + dynamodb_opts={}, + caching=CacheConfig.default()): + """Creates a DynamoDB-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + + :param string table_name: The name of an existing DynamoDB table + :param string prefix: An optional namespace prefix to be prepended to all Redis keys + :param dict dynamodb_opts: Optional parameters for configuring the DynamoDB client, as defined in + the boto3 API + :param CacheConfig caching: Specifies whether local caching should be enabled and if so, + sets the cache properties; defaults to `CacheConfig.default()` + """ + core = _DynamoDBFeatureStoreCore(table_name, prefix, dynamodb_opts) + return CachingStoreWrapper(core, caching) + + class Redis(object): - """Provides factory methods for integrations between the LaunchDarkly SDK and Redis, + """Provides factory methods for integrations between the LaunchDarkly SDK and Redis. """ DEFAULT_URL = 'redis://localhost:6379/0' DEFAULT_PREFIX = 'launchdarkly' diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py index b9bdf731..02df0e57 100644 --- a/ldclient/redis_feature_store.py +++ b/ldclient/redis_feature_store.py @@ -1,6 +1,11 @@ import json -import redis +have_redis = False +try: + import redis + have_redis = True +except ImportError: + pass from ldclient import log from ldclient.feature_store import CacheConfig @@ -21,7 +26,8 @@ def __init__(self, max_connections=16, expiration=15, capacity=1000): - + if not have_redis: + raise NotImplementedError("Cannot use Redis feature store because redis package is not installed") self.core = _RedisFeatureStoreCore(url, prefix, max_connections) # exposed for testing self._wrapper = CachingStoreWrapper(self.core, CacheConfig(expiration=expiration, capacity=capacity)) @@ -47,6 +53,7 @@ def initialized(self): class _RedisFeatureStoreCore(FeatureStoreCore): def __init__(self, url, prefix, max_connections): + self._prefix = prefix self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) self.test_update_hook = None # exposed for testing diff --git a/test-requirements.txt b/test-requirements.txt index 413ef355..88cbbc2e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,7 @@ mock>=2.0.0 pytest>=2.8 redis>=2.10.5 +boto3>=1.9.71 coverage>=4.4 pytest-capturelog>=0.7 pytest-cov>=2.4.0 diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 5716fa0e..003434b1 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -1,9 +1,12 @@ +import boto3 import json import pytest import redis +import time +from ldclient.dynamodb_feature_store import _DynamoDBFeatureStoreCore, _DynamoDBHelpers from ldclient.feature_store import CacheConfig, InMemoryFeatureStore -from ldclient.integrations import Redis +from ldclient.integrations import DynamoDB, Redis from ldclient.redis_feature_store import RedisFeatureStore from ldclient.versioned_data_kind import FEATURES @@ -16,38 +19,124 @@ def get_log_lines(caplog): return loglines -class TestFeatureStore: +class InMemoryTester(object): + def init_store(self): + return InMemoryFeatureStore() + + +class RedisTester(object): redis_host = 'localhost' redis_port = 6379 - def clear_redis_data(self): + def __init__(self, cache_config): + self._cache_config = cache_config + + def init_store(self): + self._clear_data() + return Redis.new_feature_store(caching=self._cache_config) + + def _clear_data(self): r = redis.StrictRedis(host=self.redis_host, port=self.redis_port, db=0) r.delete("launchdarkly:features") - def in_memory(self): - return InMemoryFeatureStore() - def redis_with_local_cache(self): - self.clear_redis_data() - return Redis.new_feature_store() - - def redis_no_local_cache(self): - self.clear_redis_data() - return Redis.new_feature_store(caching=CacheConfig.disabled()) - - def deprecated_redis_with_local_cache(self): - self.clear_redis_data() - return RedisFeatureStore() +class RedisWithDeprecatedConstructorTester(RedisTester): + def init_store(self): + self._clear_data() + return RedisFeatureStore(expiration=(30 if self._cache_config.enabled else 0)) + + +class DynamoDBTester(object): + table_name = 'LD_DYNAMODB_TEST_TABLE' + table_created = False + options = { 'endpoint_url': 'http://localhost:8000', 'region_name': 'us-east-1' } + + def __init__(self, cache_config): + self._cache_config = cache_config + + def init_store(self): + self._create_table() + self._clear_data() + return DynamoDB.new_feature_store(self.table_name, dynamodb_opts=self.options) + + def _create_table(self): + if self.table_created: + return + client = boto3.client('dynamodb', **self.options) + try: + client.describe_table(TableName=self.table_name) + self.table_created = True + return + except client.exceptions.ResourceNotFoundException: + pass + req = { + 'TableName': self.table_name, + 'KeySchema': [ + { + 'AttributeName': _DynamoDBFeatureStoreCore.PARTITION_KEY, + 'KeyType': 'HASH', + }, + { + 'AttributeName': _DynamoDBFeatureStoreCore.SORT_KEY, + 'KeyType': 'RANGE' + } + ], + 'AttributeDefinitions': [ + { + 'AttributeName': _DynamoDBFeatureStoreCore.PARTITION_KEY, + 'AttributeType': 'S' + }, + { + 'AttributeName': _DynamoDBFeatureStoreCore.SORT_KEY, + 'AttributeType': 'S' + } + ], + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + } + client.create_table(**req) + while True: + try: + client.describe_table(TableName=self.table_name) + self.table_created = True + return + except client.exceptions.ResourceNotFoundException: + time.sleep(0.5) + + def _clear_data(self): + client = boto3.client('dynamodb', **self.options) + delete_requests = [] + req = { + 'TableName': self.table_name, + 'ConsistentRead': True, + 'ProjectionExpression': '#namespace, #key', + 'ExpressionAttributeNames': { + '#namespace': _DynamoDBFeatureStoreCore.PARTITION_KEY, + '#key': _DynamoDBFeatureStoreCore.SORT_KEY + } + } + for resp in client.get_paginator('scan').paginate(**req): + for item in resp['Items']: + delete_requests.append({ 'DeleteRequest': { 'Key': item } }) + _DynamoDBHelpers.batch_write_requests(client, self.table_name, delete_requests) - def deprecated_redis_no_local_cache(self): - self.clear_redis_data() - return RedisFeatureStore(expiration=0) - params = [in_memory, redis_with_local_cache, redis_no_local_cache] +class TestFeatureStore: + params = [ + InMemoryTester(), + RedisTester(CacheConfig.default()), + RedisTester(CacheConfig.disabled()), + RedisWithDeprecatedConstructorTester(CacheConfig.default()), + RedisWithDeprecatedConstructorTester(CacheConfig.disabled()), + DynamoDBTester(CacheConfig.default()), + DynamoDBTester(CacheConfig.disabled()) + ] @pytest.fixture(params=params) def store(self, request): - return request.param(self) + return request.param.init_store() @staticmethod def make_feature(key, ver): @@ -79,6 +168,9 @@ def base_initialized_store(self, store): }) return store + def test_not_initialized_before_init(self, store): + assert store.initialized is False + def test_initialized(self, store): store = self.base_initialized_store(store) assert store.initialized is True From 431dddf55ea9bdc16d1e15d680e519287ed14723 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Sat, 29 Dec 2018 15:25:52 -0800 Subject: [PATCH 019/356] add test credentials --- testing/test_feature_store.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 003434b1..229a0f40 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -49,7 +49,12 @@ def init_store(self): class DynamoDBTester(object): table_name = 'LD_DYNAMODB_TEST_TABLE' table_created = False - options = { 'endpoint_url': 'http://localhost:8000', 'region_name': 'us-east-1' } + options = { + 'aws_access_key_id': 'key', # not used by local DynamoDB, but still required + 'aws_secret_access_key': 'secret', + 'endpoint_url': 'http://localhost:8000', + 'region_name': 'us-east-1' + } def __init__(self, cache_config): self._cache_config = cache_config From 3aa5644edf5c5f65f201733c20bb21e924fd10ef Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 31 Dec 2018 11:34:53 -0800 Subject: [PATCH 020/356] link in comment --- ldclient/integrations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/integrations.py b/ldclient/integrations.py index 80063389..6102d354 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -18,7 +18,7 @@ def new_feature_store(table_name, :param string table_name: The name of an existing DynamoDB table :param string prefix: An optional namespace prefix to be prepended to all Redis keys :param dict dynamodb_opts: Optional parameters for configuring the DynamoDB client, as defined in - the boto3 API + the boto3 API; see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session.client :param CacheConfig caching: Specifies whether local caching should be enabled and if so, sets the cache properties; defaults to `CacheConfig.default()` """ From bd00276f874d40d1a5d1f2c66e033cd99452f00c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 31 Dec 2018 11:36:13 -0800 Subject: [PATCH 021/356] comment --- ldclient/redis_feature_store.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py index b9bdf731..e08af6dc 100644 --- a/ldclient/redis_feature_store.py +++ b/ldclient/redis_feature_store.py @@ -9,6 +9,11 @@ from ldclient.versioned_data_kind import FEATURES +# Note that this class is now just a facade around CachingStoreWrapper, which is in turn delegating +# to _RedisFeatureStoreCore where the actual database logic is. This class was retained for historical +# reasons, to support existing code that calls the RedisFeatureStore constructor. In the future, we +# will migrate away from exposing these concrete classes and use only the factory methods. + class RedisFeatureStore(FeatureStore): """A Redis-backed implementation of :class:`ldclient.feature_store.FeatureStore`. From 534ec5deadb46e318a18e7bc80431f2bc531a639 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 31 Dec 2018 12:48:27 -0800 Subject: [PATCH 022/356] don't catch exceptions in Redis feature store, let the client catch them --- ldclient/client.py | 15 +++++-- ldclient/feature_store_helpers.py | 6 +-- ldclient/interfaces.py | 4 +- ldclient/redis_feature_store.py | 19 ++------- testing/test_feature_store.py | 30 -------------- testing/test_feature_store_helpers.py | 59 +++++++++++++++++++++++---- testing/test_ldclient_evaluation.py | 56 +++++++++++++++++++++++++ 7 files changed, 128 insertions(+), 61 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 039fad52..eea7d970 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -243,7 +243,14 @@ def send_event(value, variation=None, flag=None, reason=None): if user is not None and user.get('key', "") == "": log.warn("User key is blank. Flag evaluation will proceed, but the user will not be stored in LaunchDarkly.") - flag = self._store.get(FEATURES, key, lambda x: x) + try: + flag = self._store.get(FEATURES, key, lambda x: x) + except Exception as e: + log.error("Unexpected error while retrieving feature flag \"%s\": %s" % (key, repr(e))) + log.debug(traceback.format_exc()) + reason = error_reason('EXCEPTION') + send_event(default, None, None, reason) + return EvaluationDetail(default, None, reason) if not flag: reason = error_reason('FLAG_NOT_FOUND') send_event(default, None, None, reason) @@ -264,7 +271,7 @@ def send_event(value, variation=None, flag=None, reason=None): send_event(detail.value, detail.variation_index, flag, detail.reason) return detail except Exception as e: - log.error("Unexpected error while evaluating feature flag \"%s\": %s" % (key, e)) + log.error("Unexpected error while evaluating feature flag \"%s\": %s" % (key, repr(e))) log.debug(traceback.format_exc()) reason = error_reason('EXCEPTION') send_event(default, None, flag, reason) @@ -328,7 +335,7 @@ def all_flags_state(self, user, **kwargs): if flags_map is None: raise ValueError("feature store error") except Exception as e: - log.error("Unable to read flags for all_flag_state: %s" % e) + log.error("Unable to read flags for all_flag_state: %s" % repr(e)) return FeatureFlagsState(False) for key, flag in flags_map.items(): @@ -339,7 +346,7 @@ def all_flags_state(self, user, **kwargs): state.add_flag(flag, detail.value, detail.variation_index, detail.reason if with_reasons else None, details_only_if_tracked) except Exception as e: - log.error("Error evaluating flag \"%s\" in all_flags_state: %s" % (key, e)) + log.error("Error evaluating flag \"%s\" in all_flags_state: %s" % (key, repr(e))) log.debug(traceback.format_exc()) reason = {'kind': 'ERROR', 'errorKind': 'EXCEPTION'} state.add_flag(flag, None, None, reason if with_reasons else None, details_only_if_tracked) diff --git a/ldclient/feature_store_helpers.py b/ldclient/feature_store_helpers.py index d8359274..2ba83713 100644 --- a/ldclient/feature_store_helpers.py +++ b/ldclient/feature_store_helpers.py @@ -42,7 +42,7 @@ def get(self, kind, key, callback=lambda x: x): self._cache[cache_key] = [item] return callback(self._item_if_not_deleted(item)) - def all(self, kind, callback): + def all(self, kind, callback=lambda x: x): if self._cache is not None: cache_key = self._all_cache_key(kind) cached_items = self._cache.get(cache_key) @@ -68,11 +68,11 @@ def initialized(self): if self._inited: return True if self._cache is None: - result = self._core.initialized_internal() + result = bool(self._core.initialized_internal()) else: result = self._cache.get(CachingStoreWrapper.__INITED_CACHE_KEY__) if result is None: - result = self._core.initialized_internal() + result = bool(self._core.initialized_internal()) self._cache[CachingStoreWrapper.__INITED_CACHE_KEY__] = result if result: self._inited = True diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 2710fa25..9556bdfc 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -19,7 +19,7 @@ class FeatureStore(object): __metaclass__ = ABCMeta @abstractmethod - def get(self, kind, key, callback): + def get(self, kind, key, callback=lambda x: x): """ Retrieves the object to which the specified key is mapped, or None if the key is not found or the associated object has a "deleted" property of True. The retrieved object, if any (a @@ -35,7 +35,7 @@ def get(self, kind, key, callback): """ @abstractmethod - def all(self, kind, callback): + def all(self, kind, callback=lambda x: x): """ Retrieves a dictionary of all associated objects of a given kind. The retrieved dict of keys to objects can be transformed by the specified callback. diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py index e08af6dc..c3eabc42 100644 --- a/ldclient/redis_feature_store.py +++ b/ldclient/redis_feature_store.py @@ -77,16 +77,10 @@ def init_internal(self, all_data): def get_all_internal(self, kind): r = redis.Redis(connection_pool=self._pool) - try: - all_items = r.hgetall(self._items_key(kind)) - except BaseException as e: - log.error("RedisFeatureStore: Could not retrieve '%s' from Redis with error: %s. Returning None.", - kind.namespace, e) - return None + all_items = r.hgetall(self._items_key(kind)) if all_items is None or all_items is "": - log.warn("RedisFeatureStore: call to get all '%s' returned no results. Returning None.", kind.namespace) - return None + all_items = {} results = {} for key, item_json in all_items.items(): @@ -95,13 +89,8 @@ def get_all_internal(self, kind): return results def get_internal(self, kind, key): - try: - r = redis.Redis(connection_pool=self._pool) - item_json = r.hget(self._items_key(kind), key) - except BaseException as e: - log.error("RedisFeatureStore: Could not retrieve key %s from '%s' with error: %s", - key, kind.namespace, e) - return None + r = redis.Redis(connection_pool=self._pool) + item_json = r.hget(self._items_key(kind), key) if item_json is None or item_json is "": log.debug("RedisFeatureStore: key %s not found in '%s'. Returning None.", key, kind.namespace) diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 5716fa0e..ffff39a8 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -8,14 +8,6 @@ from ldclient.versioned_data_kind import FEATURES -def get_log_lines(caplog): - loglines = caplog.records - if callable(loglines): - # records() is a function in older versions of the caplog plugin - loglines = loglines() - return loglines - - class TestFeatureStore: redis_host = 'localhost' redis_port = 6379 @@ -178,25 +170,3 @@ def hook(base_key, key): store.upsert(FEATURES, feature) result = store.get(FEATURES, 'flagkey', lambda x: x) assert result['version'] == 5 - - def test_exception_is_handled_in_get(self, caplog): - # This just verifies the fix for a bug that caused an error during exception handling in Python 3 - store = RedisFeatureStore(url='redis://bad') - feature = store.get(FEATURES, 'flagkey') - assert feature is None - loglines = get_log_lines(caplog) - assert len(loglines) == 2 - message = loglines[1].message - assert message.startswith("RedisFeatureStore: Could not retrieve key flagkey from 'features' with error:") - assert "connecting to bad:6379" in message - - def test_exception_is_handled_in_all(self, caplog): - # This just verifies the fix for a bug that caused an error during exception handling in Python 3 - store = RedisFeatureStore(url='redis://bad') - all = store.all(FEATURES, lambda x: x) - assert all == {} - loglines = get_log_lines(caplog) - assert len(loglines) == 2 - message = loglines[1].message - assert message.startswith("RedisFeatureStore: Could not retrieve 'features' from Redis") - assert "connecting to bad:6379" in message diff --git a/testing/test_feature_store_helpers.py b/testing/test_feature_store_helpers.py index 01bb245a..77ccb6f8 100644 --- a/testing/test_feature_store_helpers.py +++ b/testing/test_feature_store_helpers.py @@ -16,20 +16,25 @@ def __init__(self): self.data = {} self.inited = False self.inited_query_count = 0 + self.error = None def init_internal(self, all_data): + self._maybe_throw() self.data = {} for kind, items in all_data.items(): self.data[kind] = items.copy() def get_internal(self, kind, key): + self._maybe_throw() items = self.data.get(kind) return None if items is None else items.get(key) def get_all_internal(self, kind): + self._maybe_throw() return self.data.get(kind) def upsert_internal(self, kind, item): + self._maybe_throw() key = item.get('key') items = self.data.get(kind) if items is None: @@ -42,9 +47,14 @@ def upsert_internal(self, kind, item): return old_item def initialized_internal(self): + self._maybe_throw() self.inited_query_count = self.inited_query_count + 1 return self.inited - + + def _maybe_throw(self): + if self.error is not None: + raise self.error + def force_set(self, kind, item): items = self.data.get(kind) if items is None: @@ -57,6 +67,9 @@ def force_remove(self, kind, key): if items is not None: items.pop(key, None) +class CustomError(Exception): + pass + class TestCachingStoreWrapper: @pytest.mark.parametrize("cached", [False, True]) def test_get_item(self, cached): @@ -119,6 +132,14 @@ def test_cached_get_uses_values_from_init(self): core.force_remove(THINGS, item1["key"]) assert wrapper.get(THINGS, item1["key"]) == item1 + @pytest.mark.parametrize("cached", [False, True]) + def test_get_can_throw_exception(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + core.error = CustomError() + with pytest.raises(CustomError, message="expected exception"): + wrapper.get(THINGS, "key", lambda x: x) + @pytest.mark.parametrize("cached", [False, True]) def test_get_all(self, cached): core = MockCore() @@ -128,13 +149,13 @@ def test_get_all(self, cached): core.force_set(THINGS, item1) core.force_set(THINGS, item2) - assert wrapper.all(THINGS, lambda x: x) == { item1["key"]: item1, item2["key"]: item2 } + assert wrapper.all(THINGS) == { item1["key"]: item1, item2["key"]: item2 } core.force_remove(THINGS, item2["key"]) if cached: - assert wrapper.all(THINGS, lambda x: x) == { item1["key"]: item1, item2["key"]: item2 } + assert wrapper.all(THINGS) == { item1["key"]: item1, item2["key"]: item2 } else: - assert wrapper.all(THINGS, lambda x: x) == { item1["key"]: item1 } + assert wrapper.all(THINGS) == { item1["key"]: item1 } @pytest.mark.parametrize("cached", [False, True]) def test_get_all_removes_deleted_items(self, cached): @@ -145,14 +166,14 @@ def test_get_all_removes_deleted_items(self, cached): core.force_set(THINGS, item1) core.force_set(THINGS, item2) - assert wrapper.all(THINGS, lambda x: x) == { item1["key"]: item1 } + assert wrapper.all(THINGS) == { item1["key"]: item1 } @pytest.mark.parametrize("cached", [False, True]) def test_get_all_changes_None_to_empty_dict(self, cached): core = MockCore() wrapper = make_wrapper(core, cached) - assert wrapper.all(WRONG_THINGS, lambda x:x) == {} + assert wrapper.all(WRONG_THINGS) == {} @pytest.mark.parametrize("cached", [False, True]) def test_get_all_iwith_lambda(self, cached): @@ -176,7 +197,15 @@ def test_cached_get_all_uses_values_from_init(self): wrapper.init({ THINGS: both }) core.force_remove(THINGS, item1["key"]) - assert wrapper.all(THINGS, lambda x: x) == both + assert wrapper.all(THINGS) == both + + @pytest.mark.parametrize("cached", [False, True]) + def test_get_all_can_throw_exception(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + core.error = CustomError() + with pytest.raises(CustomError, message="expected exception"): + wrapper.all(THINGS) @pytest.mark.parametrize("cached", [False, True]) def test_upsert_successful(self, cached): @@ -221,6 +250,14 @@ def test_cached_upsert_unsuccessful(self): core.force_set(THINGS, itemv3) # bypasses cache so we can verify that itemv2 is in the cache assert wrapper.get(THINGS, key) == itemv2 + @pytest.mark.parametrize("cached", [False, True]) + def test_upsert_can_throw_exception(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + core.error = CustomError() + with pytest.raises(CustomError, message="expected exception"): + wrapper.upsert(THINGS, { "key": "x", "version": 1 }) + @pytest.mark.parametrize("cached", [False, True]) def test_delete(self, cached): core = MockCore() @@ -239,6 +276,14 @@ def test_delete(self, cached): core.force_set(THINGS, itemv3) # make a change that bypasses the cache assert wrapper.get(THINGS, key) == (None if cached else itemv3) + @pytest.mark.parametrize("cached", [False, True]) + def test_delete_can_throw_exception(self, cached): + core = MockCore() + wrapper = make_wrapper(core, cached) + core.error = CustomError() + with pytest.raises(CustomError, message="expected exception"): + wrapper.delete(THINGS, "x", 1) + def test_uncached_initialized_queries_state_only_until_inited(self): core = MockCore() wrapper = make_wrapper(core, False) diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 46c48756..e48f0329 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -4,6 +4,7 @@ from ldclient.client import LDClient, Config from ldclient.feature_store import InMemoryFeatureStore from ldclient.flag import EvaluationDetail +from ldclient.interfaces import FeatureStore from ldclient.versioned_data_kind import FEATURES from testing.stub_util import MockEventProcessor, MockUpdateProcessor from testing.test_ldclient import make_off_flag_with_value @@ -28,6 +29,17 @@ 'debugEventsUntilDate': 1000 } +class ErroringFeatureStore(FeatureStore): + def get(self, kind, key, callback=lambda x: x): + raise NotImplementedError() + + def all(self, kind, callback=lambda x: x): + raise NotImplementedError() + + @property + def initialized(self): + return True + def make_client(store): return LDClient(config=Config(sdk_key='SDK_KEY', base_uri='http://test', @@ -35,6 +47,14 @@ def make_client(store): update_processor_class=MockUpdateProcessor, feature_store=store)) +def get_log_lines(caplog): + loglines = caplog.records + if callable(loglines): + # records() is a function in older versions of the caplog plugin + loglines = loglines() + return loglines + + def test_variation_for_existing_feature(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() @@ -116,6 +136,25 @@ def test_variation_detail_for_flag_that_evaluates_to_none(): assert expected == actual assert actual.is_default_value() == True +def test_variation_when_feature_store_throws_error(caplog): + store = ErroringFeatureStore() + client = make_client(store) + assert client.variation('feature.key', { "key": "user" }, default='default') == 'default' + loglines = get_log_lines(caplog) + assert len(loglines) == 1 + assert loglines[0].message == 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' + +def test_variation_detail_when_feature_store_throws_error(caplog): + store = ErroringFeatureStore() + client = make_client(store) + expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'EXCEPTION'}) + actual = client.variation_detail('feature.key', { }, default='default') + assert expected == actual + assert actual.is_default_value() == True + loglines = get_log_lines(caplog) + assert len(loglines) == 1 + assert loglines[0].message == 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' + def test_all_flags_returns_values(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) @@ -137,6 +176,14 @@ def test_all_flags_returns_none_if_user_has_no_key(): result = client.all_flags({ }) assert result is None +def test_all_flags_returns_none_if_feature_store_throws_error(caplog): + store = ErroringFeatureStore() + client = make_client(store) + assert client.all_flags({ "key": "user" }) is None + loglines = get_log_lines(caplog) + assert len(loglines) == 1 + assert loglines[0].message == 'Unable to read flags for all_flag_state: NotImplementedError()' + def test_all_flags_state_returns_state(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) @@ -297,3 +344,12 @@ def test_all_flags_state_returns_empty_state_if_user_has_no_key(): client = make_client(store) state = client.all_flags_state({ }) assert state.valid == False + +def test_all_flags_returns_empty_state_if_feature_store_throws_error(caplog): + store = ErroringFeatureStore() + client = make_client(store) + state = client.all_flags_state({ "key": "user" }) + assert state.valid == False + loglines = get_log_lines(caplog) + assert len(loglines) == 1 + assert loglines[0].message == 'Unable to read flags for all_flag_state: NotImplementedError()' From 5f16c8d31337ab03f4b925c5552074f6562d1b55 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 31 Dec 2018 12:48:35 -0800 Subject: [PATCH 023/356] gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 0d1700ee..d988c61f 100644 --- a/.gitignore +++ b/.gitignore @@ -44,6 +44,7 @@ nosetests.xml coverage.xml *,cover .hypothesis/ +.pytest_cache # Translations *.mo From ac0f2eae2fc64b9402b708e1cf418eb1d2ce320a Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 31 Dec 2018 13:02:54 -0800 Subject: [PATCH 024/356] misc test fixes --- testing/test_ldclient_evaluation.py | 35 ++++++++++++++++------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index e48f0329..be925a5c 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -36,6 +36,15 @@ def get(self, kind, key, callback=lambda x: x): def all(self, kind, callback=lambda x: x): raise NotImplementedError() + def upsert(self, kind, item): + pass + + def delete(self, key, version): + pass + + def init(self, data): + pass + @property def initialized(self): return True @@ -47,12 +56,12 @@ def make_client(store): update_processor_class=MockUpdateProcessor, feature_store=store)) -def get_log_lines(caplog): +def get_log_lines(caplog, level): loglines = caplog.records if callable(loglines): # records() is a function in older versions of the caplog plugin loglines = loglines() - return loglines + return [line.message for line in loglines if line.levelname == level] def test_variation_for_existing_feature(): @@ -140,20 +149,18 @@ def test_variation_when_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) assert client.variation('feature.key', { "key": "user" }, default='default') == 'default' - loglines = get_log_lines(caplog) - assert len(loglines) == 1 - assert loglines[0].message == 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' + errlog = get_log_lines(caplog, 'ERROR') + assert errlog == [ 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' ] def test_variation_detail_when_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'EXCEPTION'}) - actual = client.variation_detail('feature.key', { }, default='default') + actual = client.variation_detail('feature.key', { "key": "user" }, default='default') assert expected == actual assert actual.is_default_value() == True - loglines = get_log_lines(caplog) - assert len(loglines) == 1 - assert loglines[0].message == 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' + errlog = get_log_lines(caplog, 'ERROR') + assert errlog == [ 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' ] def test_all_flags_returns_values(): store = InMemoryFeatureStore() @@ -180,9 +187,8 @@ def test_all_flags_returns_none_if_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) assert client.all_flags({ "key": "user" }) is None - loglines = get_log_lines(caplog) - assert len(loglines) == 1 - assert loglines[0].message == 'Unable to read flags for all_flag_state: NotImplementedError()' + errlog = get_log_lines(caplog, 'ERROR') + assert errlog == [ 'Unable to read flags for all_flag_state: NotImplementedError()' ] def test_all_flags_state_returns_state(): store = InMemoryFeatureStore() @@ -350,6 +356,5 @@ def test_all_flags_returns_empty_state_if_feature_store_throws_error(caplog): client = make_client(store) state = client.all_flags_state({ "key": "user" }) assert state.valid == False - loglines = get_log_lines(caplog) - assert len(loglines) == 1 - assert loglines[0].message == 'Unable to read flags for all_flag_state: NotImplementedError()' + errlog = get_log_lines(caplog, 'ERROR') + assert errlog == [ 'Unable to read flags for all_flag_state: NotImplementedError()' ] From 256b6fb0ca3eb868f28526a209b194b06267d685 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 9 Jan 2019 12:57:28 -0800 Subject: [PATCH 025/356] implement dependency ordering for feature store data --- ldclient/client.py | 32 ++++++++++++++++++++- ldclient/feature_store.py | 51 ++++++++++++++++++++++++++++++++- ldclient/versioned_data_kind.py | 11 +++++-- testing/stub_util.py | 27 +++++++++++++++-- testing/test_ldclient.py | 35 ++++++++++++++++++++-- 5 files changed, 148 insertions(+), 8 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index eea7d970..3ce19d15 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -12,6 +12,7 @@ from ldclient.feature_requester import FeatureRequesterImpl from ldclient.flag import EvaluationDetail, evaluate, error_reason from ldclient.flags_state import FeatureFlagsState +from ldclient.interfaces import FeatureStore from ldclient.polling import PollingUpdateProcessor from ldclient.streaming import StreamingUpdateProcessor from ldclient.util import check_uwsgi, log @@ -27,6 +28,35 @@ from threading import Lock +class _FeatureStoreClientWrapper(FeatureStore): + """Provides additional behavior that the client requires before or after feature store operations. + Currently this just means sorting the data set for init(). In the future we may also use this + to provide an update listener capability. + """ + + def __init__(self, store): + self.store = store + + def get(self, kind, key, callback): + return self.store.get(self, kind, key, callback) + + def all(self, kind, callback): + return self.store.all(self, kind, callback) + + def init(self, all_data): + return self.store.init(self, all_data) + + def delete(self, kind, key, version): + return self.store.delete(self, kind, key, version) + + def upsert(self, kind, item): + return self.store.upsert(self, kind, item) + + @property + def initialized(self): + return self.store.initialized + + class LDClient(object): def __init__(self, sdk_key=None, config=None, start_wait=5): """Constructs a new LDClient instance. @@ -55,7 +85,7 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): self._event_processor = None self._lock = Lock() - self._store = self._config.feature_store + self._store = _FeatureStoreClientWrapper(self._config.feature_store) """ :type: FeatureStore """ if self._config.offline or not self._config.send_events: diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index e4d2f667..07223a32 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -1,4 +1,4 @@ -from collections import defaultdict +from collections import OrderedDict, defaultdict from ldclient.util import log from ldclient.interfaces import FeatureStore from ldclient.rwlock import ReadWriteLock @@ -126,3 +126,52 @@ def initialized(self): return self._initialized finally: self._lock.runlock() + + +class _FeatureStoreDataSetSorter: + """ + Implements a dependency graph ordering for data to be stored in a feature store. We must use this + on every data set that will be passed to the feature store's init() method. + """ + @staticmethod + def sort_all_collections(all_data): + """ Returns a copy of the input data that has the following guarantees: the iteration order of the outer + dictionary will be in ascending order by the VersionDataKind's :priority property (if any), and for each + data kind that has a "get_dependency_keys" function, the inner dictionary will have an iteration order + where B is before A if A has a dependency on B. + """ + outer_hash = OrderedDict() + kinds = list(all_data.keys()) + def priority_order(kind): + return kind.get('priority', len(kind['namespace'])) # use arbitrary order if there's no priority + kinds.sort(key=priority_order) + for kind in kinds: + items = all_data[kind] + outer_hash[kind] = _FeatureStoreDataSetSorter._sort_collection(kind, items) + return outer_hash + + @staticmethod + def _sort_collection(kind, input): + if len(input) == 0 or not hasattr(kind, 'get_dependency_keys'): + return input + dependency_fn = kind.get_dependency_keys + if dependency_fn is None or len(input) == 0: + return input + remaining_items = input.copy() + items_out = OrderedDict() + while len(remaining_items) > 0: + # pick a random item that hasn't been updated yet + for key, item in remaining_items: + _FeatureStoreDataSetSorter._add_with_dependencies_first(item, dependency_fn, remaining_items, items_out) + break + return items_out + + @staticmethod + def _add_with_dependencies_first(item, dependency_fn, remaining_items, items_out): + key = item.get('key') + del remaining_items[key] # we won't need to visit this item again + for dep_key in dependency_fn(item): + dep_item = remaining_items.get(dep_key) + if dep_item is not None: + _FeatureStoreDataSetSorter._add_with_dependencies_first(dep_item, dependency_fn, remaining_items, items_out) + items_out[key] = item diff --git a/ldclient/versioned_data_kind.py b/ldclient/versioned_data_kind.py index 6df96a32..0054a42e 100644 --- a/ldclient/versioned_data_kind.py +++ b/ldclient/versioned_data_kind.py @@ -10,10 +10,17 @@ VersionedDataKind = namedtuple('VersionedDataKind', ['namespace', 'request_api_path', 'stream_api_path']) +VersionedDataKindWithOrdering = namedtuple('VersionedDataKindWithOrdering', + ['namespace', 'request_api_path', 'stream_api_path', 'priority', 'get_dependency_keys']) + FEATURES = VersionedDataKind(namespace = "features", request_api_path = "/sdk/latest-flags", - stream_api_path = "/flags/") + stream_api_path = "/flags/", + priority = 1, + get_dependency_keys = lambda flag: p.get('key') for p in flag.get('prerequisites', [])) SEGMENTS = VersionedDataKind(namespace = "segments", request_api_path = "/sdk/latest-segments", - stream_api_path = "/segments/") + stream_api_path = "/segments/", + priority = 0, + get_dependency_keys = None) diff --git a/testing/stub_util.py b/testing/stub_util.py index bcb45ef2..80e53af6 100644 --- a/testing/stub_util.py +++ b/testing/stub_util.py @@ -1,14 +1,13 @@ from email.utils import formatdate from requests.structures import CaseInsensitiveDict -from ldclient.interfaces import EventProcessor, FeatureRequester, UpdateProcessor +from ldclient.interfaces import EventProcessor, FeatureRequester, FeatureStore, UpdateProcessor class MockEventProcessor(EventProcessor): def __init__(self, *_): self._running = False self._events = [] - mock_event_processor = self def stop(self): self._running = False @@ -103,3 +102,27 @@ def is_alive(self): def initialized(self): return True + +class CapturingFeatureStore(FeatureStore): + def init(self, all_data): + self.data = all_data + + def get(self, kind, key, callback=lambda x: x): + pass + + def all(self, kind, callback=lambda x: x): + pass + + def delete(self, kind, key, version): + pass + + def upsert(self, kind, item): + pass + + @property + def initialized(self): + return True + + @property + def received_data(self): + return self.data diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 1766386b..be290fda 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -2,10 +2,10 @@ from ldclient.client import LDClient, Config from ldclient.event_processor import NullEventProcessor from ldclient.feature_store import InMemoryFeatureStore -from ldclient.interfaces import FeatureRequester, FeatureStore, UpdateProcessor +from ldclient.interfaces import UpdateProcessor from ldclient.versioned_data_kind import FEATURES import pytest -from testing.stub_util import MockEventProcessor, MockUpdateProcessor +from testing.stub_util import CapturingFeatureStore, MockEventProcessor, MockUpdateProcessor from testing.sync_util import wait_until try: @@ -259,3 +259,34 @@ def test_event_for_existing_feature_with_no_user_key(): def test_secure_mode_hash(): user = {'key': 'Message'} assert offline_client.secure_mode_hash(user) == "aa747c502a898200f9e4fa21bac68136f886a0e27aec70ba06daf2e2a5cb5597" + + +dependency_ordering_test_data = { + FEATURES: { + + }, + SEGMENTS: { + + } +} + +class DependencyOrderingDataUpdateProcessor(UpdateProcessor): + def __init__(self, config, store, ready): + store.init(dependency_ordering_test_data) + ready.set() + + def start(self): + pass + + def initialized(self): + return True + + +def test_store_data_set_ordering(): + store = CapturingFeatureStore() + config = Config(sdk_key = 'SDK_KEY', send_events=False, feature_store=store, + update_processor_class=DependencyOrderingDataUpdateProcessor) + client = LDClient(config=config) + + data = store.received_data + From 289077c9761e1cba7d574732ccd7059fd2ca1ede Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 9 Jan 2019 13:23:49 -0800 Subject: [PATCH 026/356] fix incomplete implementation & test --- ldclient/client.py | 15 ++++++++------- ldclient/feature_store.py | 7 +++++-- ldclient/versioned_data_kind.py | 8 +++++--- testing/test_ldclient.py | 32 +++++++++++++++++++++++++++----- 4 files changed, 45 insertions(+), 17 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 3ce19d15..30c37e53 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -10,6 +10,7 @@ from ldclient.config import Config as Config from ldclient.event_processor import NullEventProcessor from ldclient.feature_requester import FeatureRequesterImpl +from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.flag import EvaluationDetail, evaluate, error_reason from ldclient.flags_state import FeatureFlagsState from ldclient.interfaces import FeatureStore @@ -37,20 +38,20 @@ class _FeatureStoreClientWrapper(FeatureStore): def __init__(self, store): self.store = store + def init(self, all_data): + return self.store.init(_FeatureStoreDataSetSorter.sort_all_collections(all_data)) + def get(self, kind, key, callback): - return self.store.get(self, kind, key, callback) + return self.store.get(kind, key, callback) def all(self, kind, callback): - return self.store.all(self, kind, callback) - - def init(self, all_data): - return self.store.init(self, all_data) + return self.store.all(kind, callback) def delete(self, kind, key, version): - return self.store.delete(self, kind, key, version) + return self.store.delete(kind, key, version) def upsert(self, kind, item): - return self.store.upsert(self, kind, item) + return self.store.upsert(kind, item) @property def initialized(self): diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index 07223a32..fccef5b5 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -2,6 +2,7 @@ from ldclient.util import log from ldclient.interfaces import FeatureStore from ldclient.rwlock import ReadWriteLock +from six import iteritems class CacheConfig: @@ -143,7 +144,9 @@ def sort_all_collections(all_data): outer_hash = OrderedDict() kinds = list(all_data.keys()) def priority_order(kind): - return kind.get('priority', len(kind['namespace'])) # use arbitrary order if there's no priority + if hasattr(kind, 'priority'): + return kind.priority + return len(kind.namespace) # use arbitrary order if there's no priority kinds.sort(key=priority_order) for kind in kinds: items = all_data[kind] @@ -161,7 +164,7 @@ def _sort_collection(kind, input): items_out = OrderedDict() while len(remaining_items) > 0: # pick a random item that hasn't been updated yet - for key, item in remaining_items: + for key, item in iteritems(remaining_items): _FeatureStoreDataSetSorter._add_with_dependencies_first(item, dependency_fn, remaining_items, items_out) break return items_out diff --git a/ldclient/versioned_data_kind.py b/ldclient/versioned_data_kind.py index 0054a42e..04acce43 100644 --- a/ldclient/versioned_data_kind.py +++ b/ldclient/versioned_data_kind.py @@ -7,19 +7,21 @@ to add a corresponding constant here and the existing store should be able to handle it. """ +# Note that VersionedDataKind without the extra attributes is no longer used in the SDK, +# but it's preserved here for backward compatibility just in case someone else used it VersionedDataKind = namedtuple('VersionedDataKind', ['namespace', 'request_api_path', 'stream_api_path']) VersionedDataKindWithOrdering = namedtuple('VersionedDataKindWithOrdering', ['namespace', 'request_api_path', 'stream_api_path', 'priority', 'get_dependency_keys']) -FEATURES = VersionedDataKind(namespace = "features", +FEATURES = VersionedDataKindWithOrdering(namespace = "features", request_api_path = "/sdk/latest-flags", stream_api_path = "/flags/", priority = 1, - get_dependency_keys = lambda flag: p.get('key') for p in flag.get('prerequisites', [])) + get_dependency_keys = lambda flag: (p.get('key') for p in flag.get('prerequisites', []))) -SEGMENTS = VersionedDataKind(namespace = "segments", +SEGMENTS = VersionedDataKindWithOrdering(namespace = "segments", request_api_path = "/sdk/latest-segments", stream_api_path = "/segments/", priority = 0, diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index be290fda..4e5dc2f1 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -3,7 +3,7 @@ from ldclient.event_processor import NullEventProcessor from ldclient.feature_store import InMemoryFeatureStore from ldclient.interfaces import UpdateProcessor -from ldclient.versioned_data_kind import FEATURES +from ldclient.versioned_data_kind import FEATURES, SEGMENTS import pytest from testing.stub_util import CapturingFeatureStore, MockEventProcessor, MockUpdateProcessor from testing.sync_util import wait_until @@ -263,10 +263,15 @@ def test_secure_mode_hash(): dependency_ordering_test_data = { FEATURES: { - + "a": { "key": "a", "prerequisites": [ { "key": "b" }, { "key": "c" } ] }, + "b": { "key": "b", "prerequisites": [ { "key": "c" }, { "key": "e" } ] }, + "c": { "key": "c" }, + "d": { "key": "d" }, + "e": { "key": "e" }, + "f": { "key": "f" } }, SEGMENTS: { - + "o": { "key": "o" } } } @@ -286,7 +291,24 @@ def test_store_data_set_ordering(): store = CapturingFeatureStore() config = Config(sdk_key = 'SDK_KEY', send_events=False, feature_store=store, update_processor_class=DependencyOrderingDataUpdateProcessor) - client = LDClient(config=config) + LDClient(config=config) data = store.received_data - + assert data is not None + assert len(data) == 2 + + assert data.keys()[0] == SEGMENTS + assert len(data.values()[0]) == len(dependency_ordering_test_data[SEGMENTS]) + + assert data.keys()[1] == FEATURES + flags_map = data.values()[1] + flags_list = flags_map.values() + assert len(flags_list) == len(dependency_ordering_test_data[FEATURES]) + for item_index, item in enumerate(flags_list): + for prereq in item.get("prerequisites", []): + prereq_item = flags_map[prereq["key"]] + prereq_index = flags_list.index(prereq_item) + if prereq_index > item_index: + all_keys = (f["key"] for f in flags_list) + raise Exception("%s depends on %s, but %s was listed first; keys in order are [%s]" % + (item["key"], prereq["key"], item["key"], ", ".join(all_keys))) From 2c5929497d015c1377d409124173e8b5c88cb7f9 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 9 Jan 2019 13:31:23 -0800 Subject: [PATCH 027/356] Python 3.x fix --- testing/test_ldclient.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 4e5dc2f1..a31d2324 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -296,13 +296,15 @@ def test_store_data_set_ordering(): data = store.received_data assert data is not None assert len(data) == 2 + keys = list(data.keys()) + values = list(data.values()) - assert data.keys()[0] == SEGMENTS - assert len(data.values()[0]) == len(dependency_ordering_test_data[SEGMENTS]) + assert keys[0] == SEGMENTS + assert len(values[0]) == len(dependency_ordering_test_data[SEGMENTS]) - assert data.keys()[1] == FEATURES - flags_map = data.values()[1] - flags_list = flags_map.values() + assert keys[1] == FEATURES + flags_map = values[1] + flags_list = list(flags_map.values()) assert len(flags_list) == len(dependency_ordering_test_data[FEATURES]) for item_index, item in enumerate(flags_list): for prereq in item.get("prerequisites", []): From 78b611865e82278339e8fed4a3fe84ee24b24466 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 15 Jan 2019 16:04:39 -0800 Subject: [PATCH 028/356] minor doc fixes --- README.md | 14 ++++++++++---- ldclient/integrations.py | 19 ++++++++++++++++++- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index edef13e6..d25ee307 100644 --- a/README.md +++ b/README.md @@ -52,7 +52,6 @@ Or it can be set from within python: os.environ["https_proxy"] = "https://web-proxy.domain.com:8080" ``` - If your proxy requires authentication then you can prefix the URN with your login information: ``` export HTTPS_PROXY=http://user:pass@web-proxy.domain.com:8080 @@ -75,12 +74,19 @@ Your first feature flag # the code to run if the feature is off Supported Python versions ----------- +------------------------- + The SDK is tested with the most recent patch releases of Python 2.7, 3.3, 3.4, 3.5, and 3.6. Python 2.6 is no longer supported. +Database integrations +--------------------- + +Feature flag data can be kept in a persistent store using Redis or DynamoDB. These adapters are implemented in the `DynamoDB` and `Redis` classes in `ldclient.integrations`; to use them, call the `new_feature_store` method in the appropriate class, and put the returned object in the `feature_store` property of your client configuration. See [`ldclient.integrations`](https://github.com/launchdarkly/python-client-private/blob/master/ldclient/integrations.py) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store) for more information. + Using flag data from a file --------------------------- -For testing purposes, the SDK can be made to read feature flag state from a file or files instead of connecting to LaunchDarkly. See [`file_data_source.py`](https://github.com/launchdarkly/python-client/blob/master/ldclient/file_data_source.py) for more details. + +For testing purposes, the SDK can be made to read feature flag state from a file or files instead of connecting to LaunchDarkly. See [`file_data_source.py`](https://github.com/launchdarkly/python-client/blob/master/ldclient/file_data_source.py) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/reading-flags-from-a-file) for more details. Learn more ----------- @@ -100,7 +106,7 @@ Contributing See [CONTRIBUTING](CONTRIBUTING.md) for more information. About LaunchDarkly ------------ +------------------ * LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can: * Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases. diff --git a/ldclient/integrations.py b/ldclient/integrations.py index 6102d354..63c01202 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -15,8 +15,21 @@ def new_feature_store(table_name, caching=CacheConfig.default()): """Creates a DynamoDB-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + To use this method, you must first install the `boto3` package containing the AWS SDK gems. + Then, put the object returned by this method into the `feature_store` property of your + client configuration (:class:ldclient.config.Config). + + Note that the DynamoDB table must already exist; the LaunchDarkly SDK does not create the table + automatically, because it has no way of knowing what additional properties (such as permissions + and throughput) you would want it to have. The table must have a partition key called + "namespace" and a sort key called "key", both with a string type. + + By default, the DynamoDB client will try to get your AWS credentials and region name from + environment variables and/or local configuration files, as described in the AWS SDK documentation. + You may also pass configuration settings in `dynamodb_opts`. + :param string table_name: The name of an existing DynamoDB table - :param string prefix: An optional namespace prefix to be prepended to all Redis keys + :param string prefix: An optional namespace prefix to be prepended to all DynamoDB keys :param dict dynamodb_opts: Optional parameters for configuring the DynamoDB client, as defined in the boto3 API; see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session.client :param CacheConfig caching: Specifies whether local caching should be enabled and if so, @@ -40,6 +53,10 @@ def new_feature_store(url='redis://localhost:6379/0', caching=CacheConfig.default()): """Creates a Redis-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + To use this method, you must first install the `redis` package. Then, put the object + returned by this method into the `feature_store` property of your client configuration + (:class:ldclient.config.Config). + :param string url: The URL of the Redis host; defaults to `DEFAULT_URL` :param string prefix: A namespace prefix to be prepended to all Redis keys; defaults to `DEFAULT_PREFIX` From 3eb821c483dfe9ae5a8d6b6d62a717bc6d32fc5b Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 14:12:33 -0800 Subject: [PATCH 029/356] feature store test improvements --- ldclient/redis_feature_store.py | 4 +-- testing/test_feature_store.py | 47 ++++++++++++++++++++++++++++----- 2 files changed, 42 insertions(+), 9 deletions(-) diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py index 27139567..16302212 100644 --- a/ldclient/redis_feature_store.py +++ b/ldclient/redis_feature_store.py @@ -59,10 +59,10 @@ def initialized(self): class _RedisFeatureStoreCore(FeatureStoreCore): def __init__(self, url, prefix, max_connections): - self._prefix = prefix + self._prefix = prefix or 'launchdarkly' self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) self.test_update_hook = None # exposed for testing - log.info("Started RedisFeatureStore connected to URL: " + url + " using prefix: " + prefix) + log.info("Started RedisFeatureStore connected to URL: " + url + " using prefix: " + self._prefix) def _items_key(self, kind): return "{0}:{1}".format(self._prefix, kind.namespace) diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 8ab8c422..f6912ff3 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -15,6 +15,10 @@ class InMemoryTester(object): def init_store(self): return InMemoryFeatureStore() + @property + def supports_prefix(self): + return False + class RedisTester(object): redis_host = 'localhost' @@ -23,19 +27,27 @@ class RedisTester(object): def __init__(self, cache_config): self._cache_config = cache_config - def init_store(self): + def init_store(self, prefix=None): self._clear_data() - return Redis.new_feature_store(caching=self._cache_config) + return Redis.new_feature_store(caching=self._cache_config, prefix=prefix) + + @property + def supports_prefix(self): + return True def _clear_data(self): r = redis.StrictRedis(host=self.redis_host, port=self.redis_port, db=0) - r.delete("launchdarkly:features") + r.flushdb() class RedisWithDeprecatedConstructorTester(RedisTester): - def init_store(self): + def init_store(self, prefix=None): self._clear_data() - return RedisFeatureStore(expiration=(30 if self._cache_config.enabled else 0)) + return RedisFeatureStore(expiration=(30 if self._cache_config.enabled else 0), prefix=prefix) + + @property + def supports_prefix(self): + return True class DynamoDBTester(object): @@ -51,10 +63,14 @@ class DynamoDBTester(object): def __init__(self, cache_config): self._cache_config = cache_config - def init_store(self): + def init_store(self, prefix=None): self._create_table() self._clear_data() - return DynamoDB.new_feature_store(self.table_name, dynamodb_opts=self.options) + return DynamoDB.new_feature_store(self.table_name, prefix=prefix, dynamodb_opts=self.options) + + @property + def supports_prefix(self): + return True def _create_table(self): if self.table_created: @@ -131,6 +147,10 @@ class TestFeatureStore: DynamoDBTester(CacheConfig.disabled()) ] + @pytest.fixture(params=params) + def tester(self, request): + return request.param + @pytest.fixture(params=params) def store(self, request): return request.param.init_store() @@ -230,6 +250,19 @@ def test_upsert_older_version_after_delete(self, store): store.upsert(FEATURES, old_ver) assert store.get(FEATURES, 'foo', lambda x: x) is None + def test_stores_with_different_prefixes_are_independent(self, tester): + if not tester.supports_prefix: + return + store_a = tester.init_store('a') + store_b = tester.init_store('b') + flag = { 'key': 'flag', 'version': 1 } + store_a.init({ FEATURES: { flag['key']: flag } }) + store_b.init({ FEATURES: { } }) + item = store_a.get(FEATURES, flag['key'], lambda x: x) + assert item == flag + item = store_b.get(FEATURES, flag['key'], lambda x: x) + assert item is None + class TestRedisFeatureStoreExtraTests: def test_upsert_race_condition_against_external_client_with_higher_version(self): From cc938e33221b35daf612b10c881e87c5b5b60056 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 16:14:39 -0800 Subject: [PATCH 030/356] better database prefix test --- testing/test_feature_store.py | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index f6912ff3..6c0f0c5e 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -251,17 +251,36 @@ def test_upsert_older_version_after_delete(self, store): assert store.get(FEATURES, 'foo', lambda x: x) is None def test_stores_with_different_prefixes_are_independent(self, tester): + # This verifies that init, get, and upsert are all correctly using the specified key prefix. if not tester.supports_prefix: return + + flag_a1 = { 'key': 'flagA1', 'version': 1 } + flag_a2 = { 'key': 'flagA2', 'version': 1 } + flag_b1 = { 'key': 'flagB1', 'version': 1 } + flag_b2 = { 'key': 'flagB2', 'version': 1 } store_a = tester.init_store('a') store_b = tester.init_store('b') - flag = { 'key': 'flag', 'version': 1 } - store_a.init({ FEATURES: { flag['key']: flag } }) - store_b.init({ FEATURES: { } }) - item = store_a.get(FEATURES, flag['key'], lambda x: x) - assert item == flag - item = store_b.get(FEATURES, flag['key'], lambda x: x) + + store_a.init({ FEATURES: { 'flagA1': flag_a1 } }) + store_a.upsert(FEATURES, flag_a2) + + store_b.init({ FEATURES: { 'flagB1': flag_b1 } }) + store_b.upsert(FEATURES, flag_b2) + + item = store_a.get(FEATURES, 'flagA1', lambda x: x) + assert item == flag_a1 + item = store_a.get(FEATURES, 'flagB1', lambda x: x) + assert item is None + items = store_a.all(FEATURES, lambda x: x) + assert items == { 'flagA1': flag_a1, 'flagA2': flag_a2 } + + item = store_b.get(FEATURES, 'flagB1', lambda x: x) + assert item == flag_b1 + item = store_b.get(FEATURES, 'flagA1', lambda x: x) assert item is None + items = store_b.all(FEATURES, lambda x: x) + assert items == { 'flagB1': flag_b1, 'flagB2': flag_b2 } class TestRedisFeatureStoreExtraTests: From 5b8b33745521e5909d01fa2982a66b4b28901cb7 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 16:20:32 -0800 Subject: [PATCH 031/356] clarify comment --- testing/test_feature_store.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 6c0f0c5e..35a2ef6e 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -251,7 +251,8 @@ def test_upsert_older_version_after_delete(self, store): assert store.get(FEATURES, 'foo', lambda x: x) is None def test_stores_with_different_prefixes_are_independent(self, tester): - # This verifies that init, get, and upsert are all correctly using the specified key prefix. + # This verifies that init(), get(), all(), and upsert() are all correctly using the specified key prefix. + # The delete() method isn't tested separately because it's implemented as a variant of upsert(). if not tester.supports_prefix: return From f9ce243f9e6e49dadae858fd2bfc654f41b56f7c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 17:47:16 -0800 Subject: [PATCH 032/356] add Consul feature store integration --- .circleci/config.yml | 11 +- consul-requirements.txt | 1 + ldclient/impl/__init__.py | 0 ldclient/impl/integrations/__init__.py | 0 ldclient/impl/integrations/consul/__init__.py | 0 .../impl/integrations/consul/feature_store.py | 125 ++++++++++++++++++ ldclient/integrations.py | 36 +++++ testing/test_feature_store.py | 37 +++++- 8 files changed, 207 insertions(+), 3 deletions(-) create mode 100644 consul-requirements.txt create mode 100644 ldclient/impl/__init__.py create mode 100644 ldclient/impl/integrations/__init__.py create mode 100644 ldclient/impl/integrations/consul/__init__.py create mode 100644 ldclient/impl/integrations/consul/feature_store.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 92699a3c..5c83ba64 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -17,13 +17,16 @@ test-template: &test-template command: | sudo pip install --upgrade pip setuptools; sudo pip install -r test-requirements.txt; + if [[ "$CIRCLE_JOB != test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then + sudo pip install -r consul-requirements.text; + fi; sudo python setup.py install; pip freeze - run: name: run tests command: | mkdir test-reports; - if [[ $CIRCLE_JOB == test-2.7 ]]; then + if [[ "$CIRCLE_JOB" == "test-2.7" ]]; then pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing; sh -c '[ -n "${CODECLIMATE_REPO_TOKEN+1}" ] && codeclimate-test-reporter || echo "No CODECLIMATE_REPO_TOKEN value is set; not publishing coverage report"'; else @@ -41,33 +44,39 @@ jobs: - image: circleci/python:2.7-jessie - image: redis - image: amazon/dynamodb-local + - image: consul test-3.3: <<: *test-template docker: - image: circleci/python:3.3-jessie - image: redis - image: amazon/dynamodb-local + # python-consul doesn't support Python 3.3 test-3.4: <<: *test-template docker: - image: circleci/python:3.4-jessie - image: redis - image: amazon/dynamodb-local + # python-consul doesn't support Python 3.4 test-3.5: <<: *test-template docker: - image: circleci/python:3.5-jessie - image: redis - image: amazon/dynamodb-local + - image: consul test-3.6: <<: *test-template docker: - image: circleci/python:3.6-jessie - image: redis - image: amazon/dynamodb-local + - image: consul test-3.7: <<: *test-template docker: - image: circleci/python:3.7-stretch - image: redis - image: amazon/dynamodb-local + - image: consul diff --git a/consul-requirements.txt b/consul-requirements.txt new file mode 100644 index 00000000..637f7fe1 --- /dev/null +++ b/consul-requirements.txt @@ -0,0 +1 @@ +python-consul>=1.0.1 diff --git a/ldclient/impl/__init__.py b/ldclient/impl/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/__init__.py b/ldclient/impl/integrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/consul/__init__.py b/ldclient/impl/integrations/consul/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/consul/feature_store.py b/ldclient/impl/integrations/consul/feature_store.py new file mode 100644 index 00000000..5fe2d8ad --- /dev/null +++ b/ldclient/impl/integrations/consul/feature_store.py @@ -0,0 +1,125 @@ +import json + +have_consul = False +try: + import consul + have_consul = True +except ImportError: + pass + +from ldclient import log +from ldclient.feature_store import CacheConfig +from ldclient.feature_store_helpers import CachingStoreWrapper +from ldclient.interfaces import FeatureStore, FeatureStoreCore + +# +# Internal implementation of the Consul feature store. +# +# Implementation notes: +# +# * Feature flags, segments, and any other kind of entity the LaunchDarkly client may wish +# to store, are stored as individual items with the key "{prefix}/features/{flag-key}", +# "{prefix}/segments/{segment-key}", etc. +# +# * The special key "{prefix}/$inited" indicates that the store contains a complete data set. +# +# * Since Consul has limited support for transactions (they can't contain more than 64 +# operations), the init method-- which replaces the entire data store-- is not guaranteed to +# be atomic, so there can be a race condition if another process is adding new data via +# Upsert. To minimize this, we don't delete all the data at the start; instead, we update +# the items we've received, and then delete all other items. That could potentially result in +# deleting new data from another process, but that would be the case anyway if the Init +# happened to execute later than the Upsert; we are relying on the fact that normally the +# process that did the Init will also receive the new data shortly and do its own Upsert. +# + +class _ConsulFeatureStoreCore(FeatureStoreCore): + def __init__(self, host, port, prefix, consul_opts): + if not have_consul: + raise NotImplementedError("Cannot use Consul feature store because the python-consul package is not installed") + opts = consul_opts or {} + if host is not None: + opts['host'] = host + if port is not None: + opts['port'] = port + self._prefix = ("launchdarkly" if prefix is None else prefix) + "/" + self._client = consul.Consul(**opts) + + def init_internal(self, all_data): + # Start by reading the existing keys; we will later delete any of these that weren't in all_data. + index, keys = self._client.kv.get(self._prefix, recurse=True, keys=True) + unused_old_keys = set(keys or []) + + num_items = 0 + inited_key = self._inited_key() + unused_old_keys.discard(inited_key) + + # Insert or update every provided item. Note that this Consul client doesn't support batch + # operations (the "txn" method), so we'll write them one at a time. + for kind, items in all_data.items(): + for key, item in items.items(): + encoded_item = json.dumps(item) + db_key = self._item_key(kind, item['key']) + self._client.kv.put(db_key, encoded_item) + unused_old_keys.discard(db_key) + num_items = num_items + 1 + + # Now delete any previously existing items whose keys were not in the current data + for key in unused_old_keys: + self._client.kv.delete(key) + + # Now set the special key that we check in initialized_internal() + self._client.kv.put(inited_key, "") + + log.info('Initialized Consul store with %d items', num_items) + + def get_internal(self, kind, key): + index, resp = self._client.kv.get(self._item_key(kind, key)) + return None if resp is None else json.loads(resp['Value']) + + def get_all_internal(self, kind): + items_out = {} + index, results = self._client.kv.get(self._kind_key(kind), recurse=True) + for result in results: + item = json.loads(result['Value']) + items_out[item['key']] = item + return items_out + + def upsert_internal(self, kind, new_item): + key = self._item_key(kind, new_item['key']) + encoded_item = json.dumps(new_item) + + # We will potentially keep retrying indefinitely until someone's write succeeds + while True: + index, old_value = self._client.kv.get(key) + if old_value is None: + mod_index = 0 + else: + old_item = json.loads(old_value['Value']) + # Check whether the item is stale. If so, don't do the update (and return the existing item to + # CachingStoreWrapper so it can be cached) + if old_item['version'] >= new_item['version']: + return old_item + mod_index = old_value['ModifyIndex'] + + # Otherwise, try to write. We will do a compare-and-set operation, so the write will only succeed if + # the key's ModifyIndex is still equal to the previous value. If the previous ModifyIndex was zero, + # it means the key did not previously exist and the write will only succeed if it still doesn't exist. + success = self._client.kv.put(key, encoded_item, cas=mod_index) + if success: + return new_item + + log.debug('Concurrent modification detected, retrying') + + def initialized_internal(self): + index, resp = self._client.kv.get(self._inited_key()) + return (resp is not None) + + def _kind_key(self, kind): + return self._prefix + kind.namespace + + def _item_key(self, kind, key): + return self._kind_key(kind) + '/' + key + + def _inited_key(self): + return self._prefix + ('$inited') diff --git a/ldclient/integrations.py b/ldclient/integrations.py index 63c01202..aa74da1e 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -1,9 +1,41 @@ from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper +from ldclient.impl.integrations.consul.feature_store import _ConsulFeatureStoreCore from ldclient.dynamodb_feature_store import _DynamoDBFeatureStoreCore from ldclient.redis_feature_store import _RedisFeatureStoreCore +class Consul(object): + """Provides factory methods for integrations between the LaunchDarkly SDK and Consul. + """ + + @staticmethod + def new_feature_store(host=None, + port=None, + prefix=None, + consul_opts=None, + caching=CacheConfig.default()): + """Creates a Consul-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + For more details about how and why you can use a persistent feature store, see the + SDK reference guide: https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store + + To use this method, you must first install the `python-consul` package. Then, put the object + returned by this method into the `feature_store` property of your client configuration + (:class:ldclient.config.Config). + + :param string host: Hostname of the Consul server (uses "localhost" if omitted) + :param int port: Port of the Consul server (uses 8500 if omitted) + :param string prefix: An optional namespace prefix to be prepended to all Consul keys + :param dict consul_opts: Optional parameters for configuring the Consul client, if you need + to set any of them besides host and port, as defined in the python-consul API; see + https://python-consul.readthedocs.io/en/latest/#consul + :param CacheConfig caching: Specifies whether local caching should be enabled and if so, + sets the cache properties; defaults to `CacheConfig.default()` + """ + core = _ConsulFeatureStoreCore(host, port, prefix, consul_opts) + return CachingStoreWrapper(core, caching) + + class DynamoDB(object): """Provides factory methods for integrations between the LaunchDarkly SDK and DynamoDB. """ @@ -14,6 +46,8 @@ def new_feature_store(table_name, dynamodb_opts={}, caching=CacheConfig.default()): """Creates a DynamoDB-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + For more details about how and why you can use a persistent feature store, see the + SDK reference guide: https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store To use this method, you must first install the `boto3` package containing the AWS SDK gems. Then, put the object returned by this method into the `feature_store` property of your @@ -52,6 +86,8 @@ def new_feature_store(url='redis://localhost:6379/0', max_connections=16, caching=CacheConfig.default()): """Creates a Redis-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + For more details about how and why you can use a persistent feature store, see the + SDK reference guide: https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store To use this method, you must first install the `redis` package. Then, put the object returned by this method into the `feature_store` property of your client configuration diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 35a2ef6e..76a7f41e 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -1,12 +1,21 @@ import boto3 +import consul import json import pytest import redis import time +# Consul is only supported in some Python versions +have_consul = False +try: + import consul + have_consul = True +except ImportError: + pass + from ldclient.dynamodb_feature_store import _DynamoDBFeatureStoreCore, _DynamoDBHelpers from ldclient.feature_store import CacheConfig, InMemoryFeatureStore -from ldclient.integrations import DynamoDB, Redis +from ldclient.integrations import Consul, DynamoDB, Redis from ldclient.redis_feature_store import RedisFeatureStore from ldclient.versioned_data_kind import FEATURES @@ -50,6 +59,25 @@ def supports_prefix(self): return True +class ConsulTester(object): + def __init__(self, cache_config): + self._cache_config = cache_config + + def init_store(self, prefix=None): + self._clear_data(prefix or "launchdarkly") + return Consul.new_feature_store(prefix=prefix, caching=self._cache_config) + + @property + def supports_prefix(self): + return True + + def _clear_data(self, prefix): + client = consul.Consul() + index, keys = client.kv.get(prefix + "/", recurse=True, keys=True) + for key in (keys or []): + client.kv.delete(key) + + class DynamoDBTester(object): table_name = 'LD_DYNAMODB_TEST_TABLE' table_created = False @@ -66,7 +94,8 @@ def __init__(self, cache_config): def init_store(self, prefix=None): self._create_table() self._clear_data() - return DynamoDB.new_feature_store(self.table_name, prefix=prefix, dynamodb_opts=self.options) + return DynamoDB.new_feature_store(self.table_name, prefix=prefix, dynamodb_opts=self.options, + caching=self._cache_config) @property def supports_prefix(self): @@ -147,6 +176,10 @@ class TestFeatureStore: DynamoDBTester(CacheConfig.disabled()) ] + if have_consul: + params.append(ConsulTester(CacheConfig.default())) + params.append(ConsulTester(CacheConfig.disabled())) + @pytest.fixture(params=params) def tester(self, request): return request.param From 89a96be19b24163292c0b00a46638325f3cf780e Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 17:49:25 -0800 Subject: [PATCH 033/356] typo --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5c83ba64..8671b022 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,7 +18,7 @@ test-template: &test-template sudo pip install --upgrade pip setuptools; sudo pip install -r test-requirements.txt; if [[ "$CIRCLE_JOB != test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then - sudo pip install -r consul-requirements.text; + sudo pip install -r consul-requirements.txt; fi; sudo python setup.py install; pip freeze From da8c1a67b8492e30800f411a1616538f8ee665e2 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 17:53:21 -0800 Subject: [PATCH 034/356] rm extra import --- testing/test_feature_store.py | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 76a7f41e..6370a848 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -1,5 +1,4 @@ import boto3 -import consul import json import pytest import redis From b19e6188d834c5e1997050200e0c59b9664a842a Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 18:04:15 -0800 Subject: [PATCH 035/356] fix byte/string issue and rename file --- .../consul/{feature_store.py => consul_feature_store.py} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename ldclient/impl/integrations/consul/{feature_store.py => consul_feature_store.py} (97%) diff --git a/ldclient/impl/integrations/consul/feature_store.py b/ldclient/impl/integrations/consul/consul_feature_store.py similarity index 97% rename from ldclient/impl/integrations/consul/feature_store.py rename to ldclient/impl/integrations/consul/consul_feature_store.py index 5fe2d8ad..6fc8652e 100644 --- a/ldclient/impl/integrations/consul/feature_store.py +++ b/ldclient/impl/integrations/consul/consul_feature_store.py @@ -75,13 +75,13 @@ def init_internal(self, all_data): def get_internal(self, kind, key): index, resp = self._client.kv.get(self._item_key(kind, key)) - return None if resp is None else json.loads(resp['Value']) + return None if resp is None else json.loads(resp['Value'].decode('utf-8')) def get_all_internal(self, kind): items_out = {} index, results = self._client.kv.get(self._kind_key(kind), recurse=True) for result in results: - item = json.loads(result['Value']) + item = json.loads(result['Value'].decode('utf-8')) items_out[item['key']] = item return items_out @@ -95,7 +95,7 @@ def upsert_internal(self, kind, new_item): if old_value is None: mod_index = 0 else: - old_item = json.loads(old_value['Value']) + old_item = json.loads(old_value['Value'].decode('utf-8')) # Check whether the item is stale. If so, don't do the update (and return the existing item to # CachingStoreWrapper so it can be cached) if old_item['version'] >= new_item['version']: From db621dc4d72d90b87a6474a06cf010a55b3d3bf2 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 18:04:36 -0800 Subject: [PATCH 036/356] rename file --- ldclient/integrations.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ldclient/integrations.py b/ldclient/integrations.py index aa74da1e..d2d55354 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -1,6 +1,6 @@ from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper -from ldclient.impl.integrations.consul.feature_store import _ConsulFeatureStoreCore +from ldclient.impl.integrations.consul.consul_feature_store import _ConsulFeatureStoreCore from ldclient.dynamodb_feature_store import _DynamoDBFeatureStoreCore from ldclient.redis_feature_store import _RedisFeatureStoreCore @@ -23,6 +23,9 @@ def new_feature_store(host=None, returned by this method into the `feature_store` property of your client configuration (:class:ldclient.config.Config). + Note that `python-consul` is not available for Python 3.3 or 3.4, so this feature cannot be + used in those Python versions. + :param string host: Hostname of the Consul server (uses "localhost" if omitted) :param int port: Port of the Consul server (uses 8500 if omitted) :param string prefix: An optional namespace prefix to be prepended to all Consul keys From b09e07eabba1410adba388cce7980488238dba8a Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 18:04:45 -0800 Subject: [PATCH 037/356] docs --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index d25ee307..61e67050 100644 --- a/README.md +++ b/README.md @@ -81,7 +81,9 @@ The SDK is tested with the most recent patch releases of Python 2.7, 3.3, 3.4, 3 Database integrations --------------------- -Feature flag data can be kept in a persistent store using Redis or DynamoDB. These adapters are implemented in the `DynamoDB` and `Redis` classes in `ldclient.integrations`; to use them, call the `new_feature_store` method in the appropriate class, and put the returned object in the `feature_store` property of your client configuration. See [`ldclient.integrations`](https://github.com/launchdarkly/python-client-private/blob/master/ldclient/integrations.py) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store) for more information. +Feature flag data can be kept in a persistent store using Consul, DynamoDB, or Redis. These adapters are implemented in the `Consul`, `DynamoDB` and `Redis` classes in `ldclient.integrations`; to use them, call the `new_feature_store` method in the appropriate class, and put the returned object in the `feature_store` property of your client configuration. See [`ldclient.integrations`](https://github.com/launchdarkly/python-client-private/blob/master/ldclient/integrations.py) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store) for more information. + +Note that Consul is not supported in Python 3.3 or 3.4. Using flag data from a file --------------------------- From 9ea89ca60c501c4795e663ef0b36738e082fb3ae Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 18:09:42 -0800 Subject: [PATCH 038/356] script typo --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8671b022..714c5ee1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -17,7 +17,7 @@ test-template: &test-template command: | sudo pip install --upgrade pip setuptools; sudo pip install -r test-requirements.txt; - if [[ "$CIRCLE_JOB != test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then + if [[ "$CIRCLE_JOB" != "test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then sudo pip install -r consul-requirements.txt; fi; sudo python setup.py install; From a50e6f35d14de0b0689ee49d419f63b51bd049b4 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 18 Jan 2019 18:30:38 -0800 Subject: [PATCH 039/356] move all low-level feature store integration code into submodules --- .../impl/integrations/dynamodb/__init__.py | 0 .../dynamodb}/dynamodb_feature_store.py | 0 ldclient/impl/integrations/redis/__init__.py | 0 .../integrations/redis/redis_feature_store.py | 101 +++++++++++++++++ ldclient/integrations.py | 9 +- ldclient/redis_feature_store.py | 107 +----------------- testing/test_feature_store.py | 2 +- 7 files changed, 112 insertions(+), 107 deletions(-) create mode 100644 ldclient/impl/integrations/dynamodb/__init__.py rename ldclient/{ => impl/integrations/dynamodb}/dynamodb_feature_store.py (100%) create mode 100644 ldclient/impl/integrations/redis/__init__.py create mode 100644 ldclient/impl/integrations/redis/redis_feature_store.py diff --git a/ldclient/impl/integrations/dynamodb/__init__.py b/ldclient/impl/integrations/dynamodb/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/dynamodb_feature_store.py b/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py similarity index 100% rename from ldclient/dynamodb_feature_store.py rename to ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py diff --git a/ldclient/impl/integrations/redis/__init__.py b/ldclient/impl/integrations/redis/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/redis/redis_feature_store.py b/ldclient/impl/integrations/redis/redis_feature_store.py new file mode 100644 index 00000000..f0be83a4 --- /dev/null +++ b/ldclient/impl/integrations/redis/redis_feature_store.py @@ -0,0 +1,101 @@ +import json + +have_redis = False +try: + import redis + have_redis = True +except ImportError: + pass + +from ldclient import log +from ldclient.interfaces import FeatureStoreCore +from ldclient.versioned_data_kind import FEATURES + + +class _RedisFeatureStoreCore(FeatureStoreCore): + def __init__(self, url, prefix, max_connections): + if not have_redis: + raise NotImplementedError("Cannot use Redis feature store because redis package is not installed") + self._prefix = prefix or 'launchdarkly' + self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) + self.test_update_hook = None # exposed for testing + log.info("Started RedisFeatureStore connected to URL: " + url + " using prefix: " + self._prefix) + + def _items_key(self, kind): + return "{0}:{1}".format(self._prefix, kind.namespace) + + def init_internal(self, all_data): + pipe = redis.Redis(connection_pool=self._pool).pipeline() + + all_count = 0 + + for kind, items in all_data.items(): + base_key = self._items_key(kind) + pipe.delete(base_key) + for key, item in items.items(): + item_json = json.dumps(item) + pipe.hset(base_key, key, item_json) + all_count = all_count + len(items) + pipe.execute() + log.info("Initialized RedisFeatureStore with %d items", all_count) + + def get_all_internal(self, kind): + r = redis.Redis(connection_pool=self._pool) + all_items = r.hgetall(self._items_key(kind)) + + if all_items is None or all_items is "": + all_items = {} + + results = {} + for key, item_json in all_items.items(): + key = key.decode('utf-8') # necessary in Python 3 + results[key] = json.loads(item_json.decode('utf-8')) + return results + + def get_internal(self, kind, key): + r = redis.Redis(connection_pool=self._pool) + item_json = r.hget(self._items_key(kind), key) + + if item_json is None or item_json is "": + log.debug("RedisFeatureStore: key %s not found in '%s'. Returning None.", key, kind.namespace) + return None + + return json.loads(item_json.decode('utf-8')) + + def upsert_internal(self, kind, item): + r = redis.Redis(connection_pool=self._pool) + base_key = self._items_key(kind) + key = item['key'] + item_json = json.dumps(item) + + while True: + pipeline = r.pipeline() + pipeline.watch(base_key) + old = self.get_internal(kind, key) + if self.test_update_hook is not None: + self.test_update_hook(base_key, key) + if old and old['version'] >= item['version']: + log.debug('RedisFeatureStore: Attempted to %s key: %s version %d with a version that is the same or older: %d in "%s"', + 'delete' if item.get('deleted') else 'update', + key, old['version'], item['version'], kind.namespace) + pipeline.unwatch() + return old + else: + pipeline.multi() + pipeline.hset(base_key, key, item_json) + try: + pipeline.execute() + # Unlike Redis implementations for other platforms, in redis-py a failed WATCH + # produces an exception rather than a null result from execute(). + except redis.exceptions.WatchError: + log.debug("RedisFeatureStore: concurrent modification detected, retrying") + continue + return item + + def initialized_internal(self): + r = redis.Redis(connection_pool=self._pool) + return r.exists(self._items_key(FEATURES)) + + def _before_update_transaction(self, base_key, key): + # exposed for testing + pass diff --git a/ldclient/integrations.py b/ldclient/integrations.py index d2d55354..5cfc468b 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -1,14 +1,17 @@ from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper from ldclient.impl.integrations.consul.consul_feature_store import _ConsulFeatureStoreCore -from ldclient.dynamodb_feature_store import _DynamoDBFeatureStoreCore -from ldclient.redis_feature_store import _RedisFeatureStoreCore +from ldclient.impl.integrations.dynamodb.dynamodb_feature_store import _DynamoDBFeatureStoreCore +from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore class Consul(object): """Provides factory methods for integrations between the LaunchDarkly SDK and Consul. """ + """The key prefix that is used if you do not specify one.""" + DEFAULT_PREFIX = "launchdarkly" + @staticmethod def new_feature_store(host=None, port=None, @@ -28,7 +31,7 @@ def new_feature_store(host=None, :param string host: Hostname of the Consul server (uses "localhost" if omitted) :param int port: Port of the Consul server (uses 8500 if omitted) - :param string prefix: An optional namespace prefix to be prepended to all Consul keys + :param string prefix: A namespace prefix to be prepended to all Consul keys :param dict consul_opts: Optional parameters for configuring the Consul client, if you need to set any of them besides host and port, as defined in the python-consul API; see https://python-consul.readthedocs.io/en/latest/#consul diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py index 16302212..ff93c402 100644 --- a/ldclient/redis_feature_store.py +++ b/ldclient/redis_feature_store.py @@ -1,17 +1,8 @@ -import json +from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore -have_redis = False -try: - import redis - have_redis = True -except ImportError: - pass - -from ldclient import log from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper -from ldclient.interfaces import FeatureStore, FeatureStoreCore -from ldclient.versioned_data_kind import FEATURES +from ldclient.interfaces import FeatureStore # Note that this class is now just a facade around CachingStoreWrapper, which is in turn delegating @@ -22,8 +13,8 @@ class RedisFeatureStore(FeatureStore): """A Redis-backed implementation of :class:`ldclient.feature_store.FeatureStore`. - This implementation class is deprecated and may be changed or removed in the future. Please use - :func:`ldclient.integrations.Redis.new_feature_store()`. + This module and this implementation class are deprecated and may be changed or removed in the future. + Please use :func:`ldclient.integrations.Redis.new_feature_store()`. """ def __init__(self, url='redis://localhost:6379/0', @@ -31,8 +22,6 @@ def __init__(self, max_connections=16, expiration=15, capacity=1000): - if not have_redis: - raise NotImplementedError("Cannot use Redis feature store because redis package is not installed") self.core = _RedisFeatureStoreCore(url, prefix, max_connections) # exposed for testing self._wrapper = CachingStoreWrapper(self.core, CacheConfig(expiration=expiration, capacity=capacity)) @@ -54,91 +43,3 @@ def delete(self, kind, key, version): @property def initialized(self): return self._wrapper.initialized - - -class _RedisFeatureStoreCore(FeatureStoreCore): - def __init__(self, url, prefix, max_connections): - - self._prefix = prefix or 'launchdarkly' - self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) - self.test_update_hook = None # exposed for testing - log.info("Started RedisFeatureStore connected to URL: " + url + " using prefix: " + self._prefix) - - def _items_key(self, kind): - return "{0}:{1}".format(self._prefix, kind.namespace) - - def init_internal(self, all_data): - pipe = redis.Redis(connection_pool=self._pool).pipeline() - - all_count = 0 - - for kind, items in all_data.items(): - base_key = self._items_key(kind) - pipe.delete(base_key) - for key, item in items.items(): - item_json = json.dumps(item) - pipe.hset(base_key, key, item_json) - all_count = all_count + len(items) - pipe.execute() - log.info("Initialized RedisFeatureStore with %d items", all_count) - - def get_all_internal(self, kind): - r = redis.Redis(connection_pool=self._pool) - all_items = r.hgetall(self._items_key(kind)) - - if all_items is None or all_items is "": - all_items = {} - - results = {} - for key, item_json in all_items.items(): - key = key.decode('utf-8') # necessary in Python 3 - results[key] = json.loads(item_json.decode('utf-8')) - return results - - def get_internal(self, kind, key): - r = redis.Redis(connection_pool=self._pool) - item_json = r.hget(self._items_key(kind), key) - - if item_json is None or item_json is "": - log.debug("RedisFeatureStore: key %s not found in '%s'. Returning None.", key, kind.namespace) - return None - - return json.loads(item_json.decode('utf-8')) - - def upsert_internal(self, kind, item): - r = redis.Redis(connection_pool=self._pool) - base_key = self._items_key(kind) - key = item['key'] - item_json = json.dumps(item) - - while True: - pipeline = r.pipeline() - pipeline.watch(base_key) - old = self.get_internal(kind, key) - if self.test_update_hook is not None: - self.test_update_hook(base_key, key) - if old and old['version'] >= item['version']: - log.debug('RedisFeatureStore: Attempted to %s key: %s version %d with a version that is the same or older: %d in "%s"', - 'delete' if item.get('deleted') else 'update', - key, old['version'], item['version'], kind.namespace) - pipeline.unwatch() - return old - else: - pipeline.multi() - pipeline.hset(base_key, key, item_json) - try: - pipeline.execute() - # Unlike Redis implementations for other platforms, in redis-py a failed WATCH - # produces an exception rather than a null result from execute(). - except redis.exceptions.WatchError: - log.debug("RedisFeatureStore: concurrent modification detected, retrying") - continue - return item - - def initialized_internal(self): - r = redis.Redis(connection_pool=self._pool) - return r.exists(self._items_key(FEATURES)) - - def _before_update_transaction(self, base_key, key): - # exposed for testing - pass diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 6370a848..ce0150cf 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -12,8 +12,8 @@ except ImportError: pass -from ldclient.dynamodb_feature_store import _DynamoDBFeatureStoreCore, _DynamoDBHelpers from ldclient.feature_store import CacheConfig, InMemoryFeatureStore +from ldclient.impl.integrations.dynamodb.dynamodb_feature_store import _DynamoDBFeatureStoreCore, _DynamoDBHelpers from ldclient.integrations import Consul, DynamoDB, Redis from ldclient.redis_feature_store import RedisFeatureStore from ldclient.versioned_data_kind import FEATURES From 0baddab8a068d034ca73b5ae72b1aa304cb94314 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Jan 2019 13:20:37 -0800 Subject: [PATCH 040/356] move file data source implementation --- ldclient/file_data_source.py | 255 +----------------- ldclient/impl/integrations/files/__init__.py | 0 .../integrations/files/file_data_source.py | 172 ++++++++++++ ldclient/integrations.py | 105 ++++++++ testing/test_file_data_source.py | 8 +- 5 files changed, 290 insertions(+), 250 deletions(-) create mode 100644 ldclient/impl/integrations/files/__init__.py create mode 100644 ldclient/impl/integrations/files/file_data_source.py diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index ebff765b..61088d50 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -1,29 +1,4 @@ -import json -import os -import six -import traceback - -have_yaml = False -try: - import yaml - have_yaml = True -except ImportError: - pass - -have_watchdog = False -try: - import watchdog - import watchdog.events - import watchdog.observers - have_watchdog = True -except ImportError: - pass - -from ldclient.interfaces import UpdateProcessor -from ldclient.repeating_timer import RepeatingTimer -from ldclient.util import log -from ldclient.versioned_data_kind import FEATURES, SEGMENTS - +from ldclient.impl.integrations.files.file_data_source import _FileDataSource class FileDataSource(UpdateProcessor): @classmethod @@ -32,80 +7,9 @@ def factory(cls, **kwargs): used in a test environment, to operate using a predetermined feature flag state without an actual LaunchDarkly connection. - To use this component, call `FileDataSource.factory`, and store its return value in the - `update_processor_class` property of your LaunchDarkly client configuration. In the options - to `factory`, set `paths` to the file path(s) of your data file(s): - :: - - factory = FileDataSource.factory(paths = [ myFilePath ]) - config = Config(update_processor_class = factory) - - This will cause the client not to connect to LaunchDarkly to get feature flags. The - client may still make network connections to send analytics events, unless you have disabled - this with Config.send_events or Config.offline. - - Flag data files can be either JSON or YAML (in order to use YAML, you must install the 'pyyaml' - package). They contain an object with three possible properties: - - * "flags": Feature flag definitions. - * "flagValues": Simplified feature flags that contain only a value. - * "segments": User segment definitions. - - The format of the data in "flags" and "segments" is defined by the LaunchDarkly application - and is subject to change. Rather than trying to construct these objects yourself, it is simpler - to request existing flags directly from the LaunchDarkly server in JSON format, and use this - output as the starting point for your file. In Linux you would do this: - :: - - curl -H "Authorization: {your sdk key}" https://app.launchdarkly.com/sdk/latest-all - - The output will look something like this (but with many more properties): - :: - - { - "flags": { - "flag-key-1": { - "key": "flag-key-1", - "on": true, - "variations": [ "a", "b" ] - } - }, - "segments": { - "segment-key-1": { - "key": "segment-key-1", - "includes": [ "user-key-1" ] - } - } - } - - Data in this format allows the SDK to exactly duplicate all the kinds of flag behavior supported - by LaunchDarkly. However, in many cases you will not need this complexity, but will just want to - set specific flag keys to specific values. For that, you can use a much simpler format: - :: - - { - "flagValues": { - "my-string-flag-key": "value-1", - "my-boolean-flag-key": true, - "my-integer-flag-key": 3 - } - } - - Or, in YAML: - :: - - flagValues: - my-string-flag-key: "value-1" - my-boolean-flag-key: true - my-integer-flag-key: 1 - - It is also possible to specify both "flags" and "flagValues", if you want some flags - to have simple values and others to have complex behavior. However, it is an error to use the - same flag key or segment key more than once, either in a single file or across multiple files. - - If the data source encounters any error in any file-- malformed content, a missing file, or a - duplicate key-- it will not load flags from any of the files. - + This module and this implementation class are deprecated and may be changed or removed in the future. + Please use :func:`ldclient.integrations.Files.new_data_source()`. + :param kwargs: See below @@ -123,150 +27,9 @@ def factory(cls, **kwargs): used only if auto_update is true, and if the native file-watching mechanism from 'watchdog' is not being used. The default value is 1 second. """ - return lambda config, store, ready : FileDataSource(store, kwargs, ready) - - def __init__(self, store, options, ready): - self._store = store - self._ready = ready - self._inited = False - self._paths = options.get('paths', []) - if isinstance(self._paths, six.string_types): - self._paths = [ self._paths ] - self._auto_update = options.get('auto_update', False) - self._auto_updater = None - self._poll_interval = options.get('poll_interval', 1) - self._force_polling = options.get('force_polling', False) # used only in tests - - def start(self): - self._load_all() - - if self._auto_update: - self._auto_updater = self._start_auto_updater() - - # We will signal readiness immediately regardless of whether the file load succeeded or failed - - # the difference can be detected by checking initialized() - self._ready.set() - - def stop(self): - if self._auto_updater: - self._auto_updater.stop() - - def initialized(self): - return self._inited - - def _load_all(self): - all_data = { FEATURES: {}, SEGMENTS: {} } - for path in self._paths: - try: - self._load_file(path, all_data) - except Exception as e: - log.error('Unable to load flag data from "%s": %s' % (path, repr(e))) - traceback.print_exc() - return - self._store.init(all_data) - self._inited = True - - def _load_file(self, path, all_data): - content = None - with open(path, 'r') as f: - content = f.read() - parsed = self._parse_content(content) - for key, flag in six.iteritems(parsed.get('flags', {})): - self._add_item(all_data, FEATURES, flag) - for key, value in six.iteritems(parsed.get('flagValues', {})): - self._add_item(all_data, FEATURES, self._make_flag_with_value(key, value)) - for key, segment in six.iteritems(parsed.get('segments', {})): - self._add_item(all_data, SEGMENTS, segment) - - def _parse_content(self, content): - if have_yaml: - return yaml.load(content) # pyyaml correctly parses JSON too - return json.loads(content) - - def _add_item(self, all_data, kind, item): - items = all_data[kind] - key = item.get('key') - if items.get(key) is None: - items[key] = item - else: - raise Exception('In %s, key "%s" was used more than once' % (kind.namespace, key)) - - def _make_flag_with_value(self, key, value): - return { - 'key': key, - 'on': True, - 'fallthrough': { - 'variation': 0 - }, - 'variations': [ value ] - } - - def _start_auto_updater(self): - resolved_paths = [] - for path in self._paths: - try: - resolved_paths.append(os.path.realpath(path)) - except: - log.warn('Cannot watch for changes to data file "%s" because it is an invalid path' % path) - if have_watchdog and not self._force_polling: - return FileDataSource.WatchdogAutoUpdater(resolved_paths, self._load_all) - else: - return FileDataSource.PollingAutoUpdater(resolved_paths, self._load_all, self._poll_interval) - - # Watch for changes to data files using the watchdog package. This uses native OS filesystem notifications - # if available for the current platform. - class WatchdogAutoUpdater(object): - def __init__(self, resolved_paths, reloader): - watched_files = set(resolved_paths) - - class LDWatchdogHandler(watchdog.events.FileSystemEventHandler): - def on_any_event(self, event): - if event.src_path in watched_files: - reloader() - - dir_paths = set() - for path in resolved_paths: - dir_paths.add(os.path.dirname(path)) - - self._observer = watchdog.observers.Observer() - handler = LDWatchdogHandler() - for path in dir_paths: - self._observer.schedule(handler, path) - self._observer.start() - - def stop(self): - self._observer.stop() - self._observer.join() - - # Watch for changes to data files by polling their modification times. This is used if auto-update is - # on but the watchdog package is not installed. - class PollingAutoUpdater(object): - def __init__(self, resolved_paths, reloader, interval): - self._paths = resolved_paths - self._reloader = reloader - self._file_times = self._check_file_times() - self._timer = RepeatingTimer(interval, self._poll) - self._timer.start() - - def stop(self): - self._timer.stop() - - def _poll(self): - new_times = self._check_file_times() - changed = False - for file_path, file_time in six.iteritems(self._file_times): - if new_times.get(file_path) is not None and new_times.get(file_path) != file_time: - changed = True - break - self._file_times = new_times - if changed: - self._reloader() - def _check_file_times(self): - ret = {} - for path in self._paths: - try: - ret[path] = os.path.getmtime(path) - except: - ret[path] = None - return ret + return lambda config, store, ready : _FileDataSource(store, ready, + paths=kwargs.get("paths"), + auto_update=kwargs.get("auto_update", False), + poll_interval=kwargs.get("poll_interval", 1), + force_polling=kwargs.get("force_polling", False)) diff --git a/ldclient/impl/integrations/files/__init__.py b/ldclient/impl/integrations/files/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/files/file_data_source.py b/ldclient/impl/integrations/files/file_data_source.py new file mode 100644 index 00000000..9ba6e561 --- /dev/null +++ b/ldclient/impl/integrations/files/file_data_source.py @@ -0,0 +1,172 @@ +import json +import os +import six +import traceback + +have_yaml = False +try: + import yaml + have_yaml = True +except ImportError: + pass + +have_watchdog = False +try: + import watchdog + import watchdog.events + import watchdog.observers + have_watchdog = True +except ImportError: + pass + +from ldclient.interfaces import UpdateProcessor +from ldclient.repeating_timer import RepeatingTimer +from ldclient.util import log +from ldclient.versioned_data_kind import FEATURES, SEGMENTS + +class _FileDataSource(UpdateProcessor): + def __init__(self, store, ready, paths, auto_update, poll_interval, force_polling): + self._store = store + self._ready = ready + self._inited = False + self._paths = paths + if isinstance(self._paths, six.string_types): + self._paths = [ self._paths ] + self._auto_update = auto_update + self._auto_updater = None + self._poll_interval = poll_interval + self._force_polling = force_polling + + def start(self): + self._load_all() + + if self._auto_update: + self._auto_updater = self._start_auto_updater() + + # We will signal readiness immediately regardless of whether the file load succeeded or failed - + # the difference can be detected by checking initialized() + self._ready.set() + + def stop(self): + if self._auto_updater: + self._auto_updater.stop() + + def initialized(self): + return self._inited + + def _load_all(self): + all_data = { FEATURES: {}, SEGMENTS: {} } + for path in self._paths: + try: + self._load_file(path, all_data) + except Exception as e: + log.error('Unable to load flag data from "%s": %s' % (path, repr(e))) + traceback.print_exc() + return + self._store.init(all_data) + self._inited = True + + def _load_file(self, path, all_data): + content = None + with open(path, 'r') as f: + content = f.read() + parsed = self._parse_content(content) + for key, flag in six.iteritems(parsed.get('flags', {})): + self._add_item(all_data, FEATURES, flag) + for key, value in six.iteritems(parsed.get('flagValues', {})): + self._add_item(all_data, FEATURES, self._make_flag_with_value(key, value)) + for key, segment in six.iteritems(parsed.get('segments', {})): + self._add_item(all_data, SEGMENTS, segment) + + def _parse_content(self, content): + if have_yaml: + return yaml.load(content) # pyyaml correctly parses JSON too + return json.loads(content) + + def _add_item(self, all_data, kind, item): + items = all_data[kind] + key = item.get('key') + if items.get(key) is None: + items[key] = item + else: + raise Exception('In %s, key "%s" was used more than once' % (kind.namespace, key)) + + def _make_flag_with_value(self, key, value): + return { + 'key': key, + 'on': True, + 'fallthrough': { + 'variation': 0 + }, + 'variations': [ value ] + } + + def _start_auto_updater(self): + resolved_paths = [] + for path in self._paths: + try: + resolved_paths.append(os.path.realpath(path)) + except: + log.warn('Cannot watch for changes to data file "%s" because it is an invalid path' % path) + if have_watchdog and not self._force_polling: + return _FileDataSource.WatchdogAutoUpdater(resolved_paths, self._load_all) + else: + return _FileDataSource.PollingAutoUpdater(resolved_paths, self._load_all, self._poll_interval) + + # Watch for changes to data files using the watchdog package. This uses native OS filesystem notifications + # if available for the current platform. + class WatchdogAutoUpdater(object): + def __init__(self, resolved_paths, reloader): + watched_files = set(resolved_paths) + + class LDWatchdogHandler(watchdog.events.FileSystemEventHandler): + def on_any_event(self, event): + if event.src_path in watched_files: + reloader() + + dir_paths = set() + for path in resolved_paths: + dir_paths.add(os.path.dirname(path)) + + self._observer = watchdog.observers.Observer() + handler = LDWatchdogHandler() + for path in dir_paths: + self._observer.schedule(handler, path) + self._observer.start() + + def stop(self): + self._observer.stop() + self._observer.join() + + # Watch for changes to data files by polling their modification times. This is used if auto-update is + # on but the watchdog package is not installed. + class PollingAutoUpdater(object): + def __init__(self, resolved_paths, reloader, interval): + self._paths = resolved_paths + self._reloader = reloader + self._file_times = self._check_file_times() + self._timer = RepeatingTimer(interval, self._poll) + self._timer.start() + + def stop(self): + self._timer.stop() + + def _poll(self): + new_times = self._check_file_times() + changed = False + for file_path, file_time in six.iteritems(self._file_times): + if new_times.get(file_path) is not None and new_times.get(file_path) != file_time: + changed = True + break + self._file_times = new_times + if changed: + self._reloader() + + def _check_file_times(self): + ret = {} + for path in self._paths: + try: + ret[path] = os.path.getmtime(path) + except: + ret[path] = None + return ret diff --git a/ldclient/integrations.py b/ldclient/integrations.py index 5cfc468b..fcc89abc 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -2,6 +2,7 @@ from ldclient.feature_store_helpers import CachingStoreWrapper from ldclient.impl.integrations.consul.consul_feature_store import _ConsulFeatureStoreCore from ldclient.impl.integrations.dynamodb.dynamodb_feature_store import _DynamoDBFeatureStoreCore +from ldclient.impl.integrations.files.file_data_source import _FileDataSource from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore @@ -111,3 +112,107 @@ def new_feature_store(url='redis://localhost:6379/0', wrapper = CachingStoreWrapper(core, caching) wrapper.core = core # exposed for testing return wrapper + + +class Files(object): + """Provides factory methods for integrations with filesystem data. + """ + + @staticmethod + def new_data_source(paths, auto_update=False, poll_interval=1, force_polling=False): + """Provides a way to use local files as a source of feature flag state. This would typically be + used in a test environment, to operate using a predetermined feature flag state without an + actual LaunchDarkly connection. + + To use this component, call `new_data_source`, specifying the file path(s) of your data file(s) + in the `path` parameter; then put the value returned by this method into the `update_processor_class` + property of your LaunchDarkly client configuration (:class:ldclient.config.Config). + :: + + data_source = LaunchDarkly::Integrations::Files.new_data_source(paths=[ myFilePath ]) + config = Config(update_processor_class=data_source) + + This will cause the client not to connect to LaunchDarkly to get feature flags. The + client may still make network connections to send analytics events, unless you have disabled + this with Config.send_events or Config.offline. + + Flag data files can be either JSON or YAML (in order to use YAML, you must install the 'pyyaml' + package). They contain an object with three possible properties: + + * "flags": Feature flag definitions. + * "flagValues": Simplified feature flags that contain only a value. + * "segments": User segment definitions. + + The format of the data in "flags" and "segments" is defined by the LaunchDarkly application + and is subject to change. Rather than trying to construct these objects yourself, it is simpler + to request existing flags directly from the LaunchDarkly server in JSON format, and use this + output as the starting point for your file. In Linux you would do this: + :: + + curl -H "Authorization: {your sdk key}" https://app.launchdarkly.com/sdk/latest-all + + The output will look something like this (but with many more properties): + :: + + { + "flags": { + "flag-key-1": { + "key": "flag-key-1", + "on": true, + "variations": [ "a", "b" ] + } + }, + "segments": { + "segment-key-1": { + "key": "segment-key-1", + "includes": [ "user-key-1" ] + } + } + } + + Data in this format allows the SDK to exactly duplicate all the kinds of flag behavior supported + by LaunchDarkly. However, in many cases you will not need this complexity, but will just want to + set specific flag keys to specific values. For that, you can use a much simpler format: + :: + + { + "flagValues": { + "my-string-flag-key": "value-1", + "my-boolean-flag-key": true, + "my-integer-flag-key": 3 + } + } + + Or, in YAML: + :: + + flagValues: + my-string-flag-key: "value-1" + my-boolean-flag-key: true + my-integer-flag-key: 1 + + It is also possible to specify both "flags" and "flagValues", if you want some flags + to have simple values and others to have complex behavior. However, it is an error to use the + same flag key or segment key more than once, either in a single file or across multiple files. + + If the data source encounters any error in any file-- malformed content, a missing file, or a + duplicate key-- it will not load flags from any of the files. + + :param array paths: The paths of the source files for loading flag data. These may be absolute paths + or relative to the current working directory. Files will be parsed as JSON unless the 'pyyaml' + package is installed, in which case YAML is also allowed. + :param bool auto_update: (default: false) True if the data source should watch for changes to the source file(s) + and reload flags whenever there is a change. The default implementation of this feature is based on + polling the filesystem, which may not perform well; if you install the 'watchdog' package (not + included by default, to avoid adding unwanted dependencies to the SDK), its native file watching + mechanism will be used instead. Note that auto-updating will only work if all of the files you + specified have valid directory paths at startup time. + :param float poll_interval: (default: 1) The minimum interval, in seconds, between checks for file + modifications-- used only if `auto_update` is true, and if the native file-watching mechanism from + `watchdog` is not being used. + :param bool force_polling: (default: false) True if the data source should implement auto-update via + polling the filesystem even if a native mechanism is available. This is mainly for SDK testing. + + :return: an object (actually a lambda) to be stored in the `update_processor_class` configuration property + """ + return lambda config, store, ready : _FileDataSource(store, ready, paths, auto_update, poll_interval, force_polling) diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 68d1e5b7..2e232ec8 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -9,7 +9,7 @@ from ldclient.client import LDClient from ldclient.config import Config from ldclient.feature_store import InMemoryFeatureStore -from ldclient.file_data_source import FileDataSource +from ldclient.integrations import Files from ldclient.versioned_data_kind import FEATURES, SEGMENTS @@ -94,7 +94,7 @@ def teardown_function(): def make_data_source(**kwargs): global data_source - data_source = FileDataSource.factory(**kwargs)(Config(), store, ready) + data_source = Files.new_data_source(**kwargs)(Config(), store, ready) return data_source def make_temp_file(content): @@ -217,7 +217,7 @@ def test_reloads_modified_file_in_polling_mode(): def test_evaluates_full_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: - factory = FileDataSource.factory(paths = path) + factory = Files.new_data_source(paths = path) client = LDClient(config=Config(update_processor_class = factory, send_events = False)) value = client.variation('flag1', { 'key': 'user' }, '') assert value == 'on' @@ -229,7 +229,7 @@ def test_evaluates_full_flag_with_client_as_expected(): def test_evaluates_simplified_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: - factory = FileDataSource.factory(paths = path) + factory = Files.new_data_source(paths = path) client = LDClient(config=Config(update_processor_class = factory, send_events = False)) value = client.variation('flag2', { 'key': 'user' }, '') assert value == 'value2' From c8585baab7ee86b2087653451e47013c034b8cd6 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Jan 2019 14:11:58 -0800 Subject: [PATCH 041/356] don't need future.with_statement in Python 2.6+ --- ldclient/client.py | 2 +- ldclient/util.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 30c37e53..9cab10b6 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -1,4 +1,4 @@ -from __future__ import division, with_statement, absolute_import +from __future__ import division, absolute_import import hashlib import hmac diff --git a/ldclient/util.py b/ldclient/util.py index 618a7d9e..4cfb0324 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -1,4 +1,4 @@ -from __future__ import division, with_statement, absolute_import +from __future__ import division, absolute_import import certifi import logging From 2a6d53be3c9e1e2e7df87d3f89a43227cb6d402e Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Jan 2019 14:15:16 -0800 Subject: [PATCH 042/356] don't need future.absolute_import in Python 2.6+ --- ldclient/client.py | 2 +- ldclient/event_processor.py | 2 -- ldclient/feature_requester.py | 2 -- ldclient/sse_client.py | 2 -- ldclient/streaming.py | 1 - ldclient/util.py | 2 +- 6 files changed, 2 insertions(+), 9 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 9cab10b6..29d0c756 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -1,4 +1,4 @@ -from __future__ import division, absolute_import +from __future__ import division import hashlib import hmac diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 3b89420f..9a0cae83 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from collections import namedtuple from email.utils import parsedate import errno diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 786c1708..046c594f 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - from collections import namedtuple import json import urllib3 diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index c97eb2d4..7e792961 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -1,5 +1,3 @@ -from __future__ import absolute_import - import re import time import warnings diff --git a/ldclient/streaming.py b/ldclient/streaming.py index bac83433..20599eb1 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -1,4 +1,3 @@ -from __future__ import absolute_import from collections import namedtuple import json diff --git a/ldclient/util.py b/ldclient/util.py index 4cfb0324..4612f871 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -1,4 +1,4 @@ -from __future__ import division, absolute_import +from __future__ import division import certifi import logging From c32793ade292b7f80b1b80fafbed0adbb76c44c2 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Jan 2019 14:19:54 -0800 Subject: [PATCH 043/356] don't need future.print_function when you're printing a single string with parentheses --- demo/demo.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/demo/demo.py b/demo/demo.py index 987a05d4..8ac745f4 100644 --- a/demo/demo.py +++ b/demo/demo.py @@ -1,5 +1,3 @@ -from __future__ import print_function - import logging import sys From 4971d17eaa4b79528128a91910e1fe63b2afdfba Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Jan 2019 14:24:21 -0800 Subject: [PATCH 044/356] don't need future.division since we're not using the / operator --- ldclient/client.py | 2 -- ldclient/util.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 29d0c756..6d6b32c7 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -1,5 +1,3 @@ -from __future__ import division - import hashlib import hmac import threading diff --git a/ldclient/util.py b/ldclient/util.py index 4612f871..fbb2f11d 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -1,5 +1,3 @@ -from __future__ import division - import certifi import logging import sys From 0abadf1efab4637f48c251d7bceaed1d724030e5 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Jan 2019 14:24:35 -0800 Subject: [PATCH 045/356] rm unused dependency --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8787ac53..f86f3039 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ backoff>=1.4.3 certifi>=2018.4.16 expiringdict>=1.1.4 -future>=0.16.0 six>=1.10.0 pyRFC3339>=1.0 jsonpickle==0.9.3 From e228e90771c188893597b5e49c7559efac332a82 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Jan 2019 15:17:12 -0800 Subject: [PATCH 046/356] Revert "rm unused dependency" This reverts commit 0abadf1efab4637f48c251d7bceaed1d724030e5. --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index f86f3039..8787ac53 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ backoff>=1.4.3 certifi>=2018.4.16 expiringdict>=1.1.4 +future>=0.16.0 six>=1.10.0 pyRFC3339>=1.0 jsonpickle==0.9.3 From 122d7a613b3e3228e98fa63a0b01b10b038e389f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Jan 2019 17:11:06 -0800 Subject: [PATCH 047/356] don't need builtins.object unless we're defining an iterator, and even then we don't need it --- ldclient/client.py | 2 -- ldclient/sse_client.py | 4 ++++ requirements.txt | 1 - testing/test_ldclient.py | 1 - testing/test_user_filter.py | 1 - 5 files changed, 4 insertions(+), 5 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 6d6b32c7..ff96475b 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -3,8 +3,6 @@ import threading import traceback -from builtins import object - from ldclient.config import Config as Config from ldclient.event_processor import NullEventProcessor from ldclient.feature_requester import FeatureRequesterImpl diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index 7e792961..5b41413b 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -109,6 +109,10 @@ def __next__(self): return msg + # The following two lines make our iterator class compatible with both Python 2.x and 3.x, + # even though they expect different magic method names. We could accomplish the same thing + # by importing builtins.object and deriving from that, but this way it's easier to see + # what we're doing. if six.PY2: next = __next__ diff --git a/requirements.txt b/requirements.txt index 8787ac53..f86f3039 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,6 @@ backoff>=1.4.3 certifi>=2018.4.16 expiringdict>=1.1.4 -future>=0.16.0 six>=1.10.0 pyRFC3339>=1.0 jsonpickle==0.9.3 diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index a31d2324..0e6c33a2 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -1,4 +1,3 @@ -from builtins import object from ldclient.client import LDClient, Config from ldclient.event_processor import NullEventProcessor from ldclient.feature_store import InMemoryFeatureStore diff --git a/testing/test_user_filter.py b/testing/test_user_filter.py index 15550541..e1711ffb 100644 --- a/testing/test_user_filter.py +++ b/testing/test_user_filter.py @@ -1,4 +1,3 @@ -from builtins import object import json from ldclient.client import Config from ldclient.user_filter import UserFilter From 6a45e700f1cc7e12b4ad44b95c1a3b05208dc15b Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 29 Jan 2019 12:49:19 -0800 Subject: [PATCH 048/356] update docs with note on portability --- CONTRIBUTING.md | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 88668de9..fe972301 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,7 @@ Development information (for developing this module itself) 1. One-time setup: - mkvirtualenv python-client + mkvirtualenv python-client 1. When working on the project be sure to activate the python-client virtualenv using the technique of your choosing. @@ -17,11 +17,15 @@ Development information (for developing this module itself) pip install -r requirements.txt pip install -r test-requirements.txt -1. Run tests: You'll need redis running locally on its default port of 6379. +1. When running unit tests, in order for `test_feature_store.py` to run, you'll need all of the supported databases (Redis, Consul, DynamoDB) running locally on their default ports. + 1. If you want integration tests to run, set the ```LD_SDK_KEY``` environment variable to a valid production SDK Key. + 1. ```$ py.test testing``` -Developing with different python versions +1. All code must be compatible with all supported Python versions as described in README. Most portability issues are addressed by using the `six` package. We are avoiding the use of `__future__` imports, since they can easily be omitted by mistake causing code in one file to behave differently from another; instead, whenever possible, use an explicit approach that makes it clear what the desired behavior is in all Python versions (e.g. if you want to do floor division, use `//`; if you want to divide as floats, explicitly cast to floats). + +Developing with different Python versions ----------------------------------------- Example for switching to python 3: From 858e001970ea0011a4ae5b84bba70050331aff38 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 29 Jan 2019 12:50:09 -0800 Subject: [PATCH 049/356] typo --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fe972301..af5083c2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -28,6 +28,6 @@ Development information (for developing this module itself) Developing with different Python versions ----------------------------------------- -Example for switching to python 3: +Example for switching to Python 3: ```virtualenv -p `which python3` ~/.virtualenvs/python-client``` \ No newline at end of file From d4d4b8aa2b07e5328c43e90e3244e58a2006bdb6 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 30 Jan 2019 17:13:12 -0800 Subject: [PATCH 050/356] update package metadata prior to release --- setup.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index bf59d9a3..8a075cf8 100644 --- a/setup.py +++ b/setup.py @@ -19,12 +19,16 @@ def parse_requirements(filename): install_reqs = parse_requirements('requirements.txt') test_reqs = parse_requirements('test-requirements.txt') redis_reqs = parse_requirements('redis-requirements.txt') +consul_reqs = parse_requirements('consul-requirements.txt') +dynamodb_reqs = parse_requirements('dynamodb-requirements.txt') # reqs is a list of requirement # e.g. ['django==1.5.1', 'mezzanine==1.4.6'] reqs = [ir for ir in install_reqs] testreqs = [ir for ir in test_reqs] redisreqs = [ir for ir in redis_reqs] +consulreqs = [ir for ir in consul_reqs] +dynamodbreqs = [ir for ir in dynamodb_reqs] class PyTest(Command): @@ -63,11 +67,14 @@ def run(self): 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', ], extras_require={ - "redis": redisreqs + "redis": redisreqs, + "consul": consulreqs, + "dynamodb": dynamodbreqs }, tests_require=testreqs, cmdclass={'test': PyTest}, From eaf677e28bcec07e21457bae8fd14b83a647ccdb Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 11:22:39 -0800 Subject: [PATCH 051/356] add test for whether the package can be installed --- .circleci/config.yml | 5 ++++- .gitignore | 3 +-- scripts/test-packaging.sh | 13 +++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) create mode 100755 scripts/test-packaging.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 714c5ee1..bb23c00d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,7 +15,7 @@ test-template: &test-template - run: name: install requirements command: | - sudo pip install --upgrade pip setuptools; + sudo pip install --upgrade pip setuptools virtualenv; sudo pip install -r test-requirements.txt; if [[ "$CIRCLE_JOB" != "test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then sudo pip install -r consul-requirements.txt; @@ -32,6 +32,9 @@ test-template: &test-template else pytest -s --junitxml=test-reports/junit.xml testing; fi + - run: + name: test packaging/install + command: ./scripts/test-packaging.sh - store_test_results: path: test-reports - store_artifacts: diff --git a/.gitignore b/.gitignore index d988c61f..c949312e 100644 --- a/.gitignore +++ b/.gitignore @@ -66,5 +66,4 @@ p2venv .idea *.iml .vagrant -ldd/py2 -ldd/py3 +test-packaging-venv diff --git a/scripts/test-packaging.sh b/scripts/test-packaging.sh new file mode 100755 index 00000000..7ce81fa9 --- /dev/null +++ b/scripts/test-packaging.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +rm -r dist +python setup.py sdist + +VENV=`pwd`/test-packaging-venv +rm -rf $VENV +virtualenv $VENV +source $VENV/bin/activate + +pip install dist/*.tar.gz From eae17f640fa8775e88b295d1fbec021bc0305e15 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 11:46:33 -0800 Subject: [PATCH 052/356] fix build --- .circleci/config.yml | 8 +++++--- scripts/test-packaging.sh | 13 +++++++------ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index bb23c00d..6c61b0b8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -16,11 +16,13 @@ test-template: &test-template name: install requirements command: | sudo pip install --upgrade pip setuptools virtualenv; - sudo pip install -r test-requirements.txt; + virtualenv env; + source ./env/bin/activate; + pip install -r test-requirements.txt; if [[ "$CIRCLE_JOB" != "test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then - sudo pip install -r consul-requirements.txt; + pip install -r consul-requirements.txt; fi; - sudo python setup.py install; + python setup.py install; pip freeze - run: name: run tests diff --git a/scripts/test-packaging.sh b/scripts/test-packaging.sh index 7ce81fa9..8fcb7e3a 100755 --- a/scripts/test-packaging.sh +++ b/scripts/test-packaging.sh @@ -2,12 +2,13 @@ set -e -rm -r dist +rm -rf dist python setup.py sdist -VENV=`pwd`/test-packaging-venv -rm -rf $VENV -virtualenv $VENV -source $VENV/bin/activate +rm -rf test-packaging +mkdir test-packaging +cd test-packaging +virtualenv env +source env/bin/activate -pip install dist/*.tar.gz +pip install ../dist/*.tar.gz From 0892c971f40f0a240a695cccebed5e4ab43434c3 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 11:48:57 -0800 Subject: [PATCH 053/356] fix build --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6c61b0b8..0a6121c2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -27,6 +27,7 @@ test-template: &test-template - run: name: run tests command: | + source ./env/bin/activate; mkdir test-reports; if [[ "$CIRCLE_JOB" == "test-2.7" ]]; then pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing; From e21aa659a0215fdc7e7621b0445cf1b1c8351845 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 11:54:57 -0800 Subject: [PATCH 054/356] fix manifest --- MANIFEST.in | 2 ++ 1 file changed, 2 insertions(+) diff --git a/MANIFEST.in b/MANIFEST.in index 4ec6f0b0..1a398256 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,7 @@ include requirements.txt include README.txt include test-requirements.txt +include consul-requirements.txt +include dynamodb-requirements.txt include redis-requirements.txt include python2.6-requirements.txt \ No newline at end of file From 0520a9bffcad45a334d957acf23de6f4ea21ef2d Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 11:58:25 -0800 Subject: [PATCH 055/356] skip test on 3.3 --- .circleci/config.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0a6121c2..5b15a1b7 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -36,8 +36,12 @@ test-template: &test-template pytest -s --junitxml=test-reports/junit.xml testing; fi - run: - name: test packaging/install - command: ./scripts/test-packaging.sh + name: test packaging/install # this can't be run on 3.3 because the "wheel" package isn't available; + # that's OK because we never build our actual published package on 3.3 + command: | + if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then + ./scripts/test-packaging.sh; + fi - store_test_results: path: test-reports - store_artifacts: From 9deef2d8eb323fb7767d753e78e26941406bde10 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 12:14:30 -0800 Subject: [PATCH 056/356] misc fixes for 3.3 --- .circleci/config.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 5b15a1b7..fb3b8904 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,9 +15,10 @@ test-template: &test-template - run: name: install requirements command: | - sudo pip install --upgrade pip setuptools virtualenv; + sudo pip install --upgrade pip virtualenv; virtualenv env; source ./env/bin/activate; + pip install setuptools; pip install -r test-requirements.txt; if [[ "$CIRCLE_JOB" != "test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then pip install -r consul-requirements.txt; @@ -36,12 +37,8 @@ test-template: &test-template pytest -s --junitxml=test-reports/junit.xml testing; fi - run: - name: test packaging/install # this can't be run on 3.3 because the "wheel" package isn't available; - # that's OK because we never build our actual published package on 3.3 - command: | - if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then - ./scripts/test-packaging.sh; - fi + name: test packaging/install + command: ./scripts/test-packaging.sh - store_test_results: path: test-reports - store_artifacts: From e44abfac3e6f108b7041a594baedc03069b620ee Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 12:21:13 -0800 Subject: [PATCH 057/356] another 3.3 fix --- .circleci/config.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index fb3b8904..a7e1d810 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -16,14 +16,11 @@ test-template: &test-template name: install requirements command: | sudo pip install --upgrade pip virtualenv; - virtualenv env; - source ./env/bin/activate; - pip install setuptools; - pip install -r test-requirements.txt; + sudo pip install -r test-requirements.txt; if [[ "$CIRCLE_JOB" != "test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then - pip install -r consul-requirements.txt; + sudo pip install -r consul-requirements.txt; fi; - python setup.py install; + sudo python setup.py install; pip freeze - run: name: run tests @@ -38,7 +35,13 @@ test-template: &test-template fi - run: name: test packaging/install - command: ./scripts/test-packaging.sh + # Note, virtualenv isn't supported on Python 3.3 and this test requires virtualenv. But we + # never build our published package on 3.3 anyway. + command: | + if [[ "$CIRCLE_JOB" == "test-3.3" ]]; then + sudo rm -rf dist *.egg-info; + ./scripts/test-packaging.sh; + fi - store_test_results: path: test-reports - store_artifacts: From 273219aef197d4515f7d25d4ca8e308ac23feede Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 12:22:23 -0800 Subject: [PATCH 058/356] misc fixes --- .circleci/config.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a7e1d810..efc3d5ed 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -25,7 +25,6 @@ test-template: &test-template - run: name: run tests command: | - source ./env/bin/activate; mkdir test-reports; if [[ "$CIRCLE_JOB" == "test-2.7" ]]; then pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing; From b3944f9769b1c7e55efd00659676229a11792068 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 12:23:55 -0800 Subject: [PATCH 059/356] misc fixes --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index efc3d5ed..ec3070fc 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -37,7 +37,7 @@ test-template: &test-template # Note, virtualenv isn't supported on Python 3.3 and this test requires virtualenv. But we # never build our published package on 3.3 anyway. command: | - if [[ "$CIRCLE_JOB" == "test-3.3" ]]; then + if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then sudo rm -rf dist *.egg-info; ./scripts/test-packaging.sh; fi From b4792e6870a736f04531f04dd1941bd3f40ca224 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 14:53:06 -0800 Subject: [PATCH 060/356] add test for importing the built package --- .circleci/config.yml | 2 +- {scripts => test-packaging}/test-packaging.sh | 5 +++-- test-packaging/test.py | 3 +++ 3 files changed, 7 insertions(+), 3 deletions(-) rename {scripts => test-packaging}/test-packaging.sh (76%) create mode 100644 test-packaging/test.py diff --git a/.circleci/config.yml b/.circleci/config.yml index ec3070fc..603bbf54 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -39,7 +39,7 @@ test-template: &test-template command: | if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then sudo rm -rf dist *.egg-info; - ./scripts/test-packaging.sh; + ./test-packaging/test-packaging.sh; fi - store_test_results: path: test-reports diff --git a/scripts/test-packaging.sh b/test-packaging/test-packaging.sh similarity index 76% rename from scripts/test-packaging.sh rename to test-packaging/test-packaging.sh index 8fcb7e3a..50a40364 100755 --- a/scripts/test-packaging.sh +++ b/test-packaging/test-packaging.sh @@ -5,10 +5,11 @@ set -e rm -rf dist python setup.py sdist -rm -rf test-packaging -mkdir test-packaging cd test-packaging +rm -rf env virtualenv env source env/bin/activate pip install ../dist/*.tar.gz + +python test.py diff --git a/test-packaging/test.py b/test-packaging/test.py new file mode 100644 index 00000000..5337cb2e --- /dev/null +++ b/test-packaging/test.py @@ -0,0 +1,3 @@ +import ldclient + +print("Successfully installed and imported ldclient") From 7c2b501794eb4c992314d99d7fe57855c05dafc2 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 14:59:39 -0800 Subject: [PATCH 061/356] add submodule imports --- test-packaging/test.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test-packaging/test.py b/test-packaging/test.py index 5337cb2e..62f8b636 100644 --- a/test-packaging/test.py +++ b/test-packaging/test.py @@ -1,3 +1,5 @@ import ldclient +import ldclient.integrations +import ldclient.interfaces print("Successfully installed and imported ldclient") From a9d481ce7a5efca76db29f719787cf7be5fbfb21 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 15:08:17 -0800 Subject: [PATCH 062/356] ensure that all packages are included in distribution --- setup.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index 7d3c80cc..92d0c7d0 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,4 @@ -try: - from setuptools import setup, Command -except ImportError: - from distutils.core import setup +from setuptools import setup, Command import sys import uuid @@ -51,7 +48,7 @@ def run(self): version=ldclient_version, author='LaunchDarkly', author_email='team@launchdarkly.com', - packages=['ldclient'], + packages=find_packages(), url='https://github.com/launchdarkly/python-client', description='LaunchDarkly SDK for Python', long_description='LaunchDarkly SDK for Python', From 71b821f969377d403635187b8941046e31e31156 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Jan 2019 15:09:27 -0800 Subject: [PATCH 063/356] fix import --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 92d0c7d0..3c3a06b5 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -from setuptools import setup, Command +from setuptools import find_packages, setup, Command import sys import uuid From 06fc3b25b502ac1e59efb71fc449443fa79b833a Mon Sep 17 00:00:00 2001 From: Harpo roeder Date: Tue, 5 Feb 2019 10:22:29 -0800 Subject: [PATCH 064/356] add basic pipeline and install deps --- azure-pipelines.yml | 50 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 azure-pipelines.yml diff --git a/azure-pipelines.yml b/azure-pipelines.yml new file mode 100644 index 00000000..7910ee75 --- /dev/null +++ b/azure-pipelines.yml @@ -0,0 +1,50 @@ +jobs: + - job: build + pool: + vmImage: 'vs2017-win2016' + steps: + - task: PowerShell@2 + displayName: 'Setup Dynamo' + inputs: + targetType: inline + workingDirectory: $(System.DefaultWorkingDirectory) + script: | + iwr -outf dynamo.zip https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip + mkdir dynamo + Expand-Archive -Path dynamo.zip -DestinationPath dynamo + cd dynamo + javaw -D"java.library.path=./DynamoDBLocal_lib" -jar DynamoDBLocal.jar + - task: PowerShell@2 + displayName: 'Setup Consul' + inputs: + targetType: inline + workingDirectory: $(System.DefaultWorkingDirectory) + script: | + iwr -outf consul.zip https://releases.hashicorp.com/consul/1.4.2/consul_1.4.2_windows_amd64.zip + mkdir consul + Expand-Archive -Path consul.zip -DestinationPath consul + cd consul + sc.exe create "Consul" binPath="$(System.DefaultWorkingDirectory)/consul/consul.exe agent -dev" + sc.exe start "Consul" + - task: PowerShell@2 + displayName: 'Setup Redis' + inputs: + targetType: inline + workingDirectory: $(System.DefaultWorkingDirectory) + script: | + iwr -outf redis.zip https://github.com/MicrosoftArchive/redis/releases/download/win-3.0.504/Redis-x64-3.0.504.zip + mkdir redis + Expand-Archive -Path redis.zip -DestinationPath redis + cd redis + ./redis-server --service-install + ./redis-server --service-start + - task: PowerShell@2 + displayName: 'Setup SDK and Test' + inputs: + targetType: inline + workingDirectory: $(System.DefaultWorkingDirectory) + script: | + python --version + pip install -r test-requirements.txt + pip install -r consul-requirements.txt + python setup.py install From 0165540c649c9e1ac5805fef4a011bc49435a480 Mon Sep 17 00:00:00 2001 From: Harpo roeder Date: Tue, 5 Feb 2019 10:30:02 -0800 Subject: [PATCH 065/356] add pytest --- azure-pipelines.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 7910ee75..b7f19ff3 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -48,3 +48,5 @@ jobs: pip install -r test-requirements.txt pip install -r consul-requirements.txt python setup.py install + mkdir test-reports + pytest -s --junitxml=test-reports/junit.xml testing; From 7c9f4e2765edd5d6c448779bcc4bfc541ba0d49a Mon Sep 17 00:00:00 2001 From: Harpo roeder Date: Tue, 5 Feb 2019 10:44:17 -0800 Subject: [PATCH 066/356] remove explicit install of deps --- azure-pipelines.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b7f19ff3..c83d3c60 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -45,8 +45,6 @@ jobs: workingDirectory: $(System.DefaultWorkingDirectory) script: | python --version - pip install -r test-requirements.txt - pip install -r consul-requirements.txt python setup.py install mkdir test-reports pytest -s --junitxml=test-reports/junit.xml testing; From a38b9578dadc603c5254f705b2ce86f453465f63 Mon Sep 17 00:00:00 2001 From: Harpo roeder Date: Tue, 5 Feb 2019 10:51:33 -0800 Subject: [PATCH 067/356] add other db deps --- azure-pipelines.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index c83d3c60..e4f8f7af 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -45,6 +45,10 @@ jobs: workingDirectory: $(System.DefaultWorkingDirectory) script: | python --version + pip install -r test-requirements.txt + pip install -r redis-requirements.txt + pip install -r consul-requirements.txt + pip install -r dynamodb-requirements.txt python setup.py install mkdir test-reports pytest -s --junitxml=test-reports/junit.xml testing; From 0b6d28f73200314563fcef5322d3d4d315526a3d Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 12:25:17 -0800 Subject: [PATCH 068/356] major cleanup of doc comments, add Sphinx build script --- docs/Makefile | 19 +++ docs/conf.py | 174 +++++++++++++++++++++++++++ docs/index.rst | 21 ++++ docs/ldclient.rst | 83 +++++++++++++ ldclient/__init__.py | 54 +++++++-- ldclient/client.py | 90 +++++++++----- ldclient/config.py | 32 ++++- ldclient/event_processor.py | 5 + ldclient/event_summarizer.py | 5 + ldclient/feature_requester.py | 5 + ldclient/feature_store.py | 48 +++++++- ldclient/feature_store_helpers.py | 32 ++++- ldclient/file_data_source.py | 34 ++---- ldclient/fixed_thread_pool.py | 5 + ldclient/flag.py | 49 +++++--- ldclient/flags_state.py | 26 +++- ldclient/integrations.py | 193 ++++++++++++------------------ ldclient/interfaces.py | 45 ++++--- ldclient/lru_cache.py | 15 ++- ldclient/memoized_value.py | 13 +- ldclient/operators.py | 5 + ldclient/polling.py | 5 + ldclient/redis_feature_store.py | 7 +- ldclient/repeating_timer.py | 5 + ldclient/rwlock.py | 5 + ldclient/sse_client.py | 9 +- ldclient/streaming.py | 5 + ldclient/user_filter.py | 5 + ldclient/util.py | 5 + ldclient/versioned_data_kind.py | 19 ++- 30 files changed, 763 insertions(+), 255 deletions(-) create mode 100644 docs/Makefile create mode 100644 docs/conf.py create mode 100644 docs/index.rst create mode 100644 docs/ldclient.rst diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 00000000..13edc19b --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,19 @@ +# Minimal makefile for Sphinx documentation +# + +.PHONY: help install html + +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = ldclient-py +SOURCEDIR = . +BUILDDIR = build + +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +install: + pip install sphinx + +html: install + @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 00000000..479f3bc8 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/master/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + +import os +import sys + +sys.path.insert(0, os.path.abspath('..')) + +import ldclient + +# -- Project information ----------------------------------------------------- + +project = u'ldclient-py' +copyright = u'2019, LaunchDarkly' +author = u'LaunchDarkly' + +# The short X.Y version. +version = ldclient.__version__ +# The full version, including alpha/beta/rc tags. +release = ldclient.__version__ + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.viewcode', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path . +exclude_patterns = ['build'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'ldclient-pydoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'ldclient-py.tex', u'ldclient-py Documentation', + u'LaunchDarkly', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'ldclient-py', u'ldclient-py Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'ldclient-py', u'ldclient-py Documentation', + author, 'ldclient-py', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Extension configuration ------------------------------------------------- + +autodoc_default_options = { + 'members': None, + 'show-inheritance': None, + 'special-members': None, + 'undoc-members': None +} diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 00000000..735da978 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,21 @@ +.. ldclient-py documentation master file, created by + sphinx-quickstart on Mon Feb 4 13:16:49 2019. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to ldclient-py's documentation! +======================================= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + +.. automodule:: ldclient + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/ldclient.rst b/docs/ldclient.rst new file mode 100644 index 00000000..4a212c16 --- /dev/null +++ b/docs/ldclient.rst @@ -0,0 +1,83 @@ +ldclient package +================ + +Module contents +--------------- + +.. automodule:: ldclient + :members: + :undoc-members: + :show-inheritance: + +Submodules +---------- + +ldclient.client module +---------------------- + +.. automodule:: ldclient.client + :members: LDClient + :special-members: __init__ + :show-inheritance: + +ldclient.config module +---------------------- + +.. automodule:: ldclient.config + :members: + :special-members: __init__ + :show-inheritance: + +ldclient.feature\_store module +------------------------------ + +.. automodule:: ldclient.feature_store + :members: + :special-members: __init__ + :show-inheritance: + +ldclient.feature\_store\_helpers module +--------------------------------------- + +.. automodule:: ldclient.feature_store_helpers + :members: + :special-members: __init__ + :show-inheritance: + +ldclient.flag module +-------------------- + +.. automodule:: ldclient.flag + :members: EvaluationDetail + :special-members: __init__ + :show-inheritance: + +ldclient.flags\_state module +---------------------------- + +.. automodule:: ldclient.flags_state + :members: + :show-inheritance: + +ldclient.integrations module +---------------------------- + +.. automodule:: ldclient.integrations + :members: + :special-members: __init__ + :show-inheritance: + +ldclient.interfaces module +-------------------------- + +.. automodule:: ldclient.interfaces + :members: + :special-members: __init__ + :show-inheritance: + +ldclient.versioned\_data\_kind module +------------------------------------- + +.. automodule:: ldclient.versioned_data_kind + :members: + :show-inheritance: diff --git a/ldclient/__init__.py b/ldclient/__init__.py index f693d989..5be96db9 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -1,3 +1,7 @@ +""" +The ldclient module contains the most common top-level entry points for the SDK. +""" + import logging from ldclient.rwlock import ReadWriteLock @@ -20,12 +24,16 @@ __lock = ReadWriteLock() -# 2 Use Cases: -# 1. Initial setup: sets the config for the uninitialized client -# 2. Allows on-the-fly changing of the config. When this function is called after the client has been initialized -# the client will get re-initialized with the new config. In order for this to work, the return value of -# ldclient.get() should never be assigned def set_config(config): + """Sets the configuration for the shared SDK client instance. + + If this is called prior to :func:`ldclient.get()`, it stores the configuration that will be used when the + client is initialized. If it is called after the client has already been initialized, the client will be + re-initialized with the new configuration (this will result in the next call to :func:`ldclient.get()` + returning a new client instance). + + :param string sdk_key: the new SDK key + """ global __config global __client global __lock @@ -42,12 +50,18 @@ def set_config(config): __lock.unlock() -# 2 Use Cases: -# 1. Initial setup: sets the sdk key for the uninitialized client -# 2. Allows on-the-fly changing of the sdk key. When this function is called after the client has been initialized -# the client will get re-initialized with the new sdk key. In order for this to work, the return value of -# ldclient.get() should never be assigned def set_sdk_key(sdk_key): + """Sets the SDK key for the shared SDK client instance. + + If this is called prior to :func:`ldclient.get()`, it stores the SDK key that will be used when the client is + initialized. If it is called after the client has already been initialized, the client will be + re-initialized with the new SDK key (this will result in the next call to :func:`ldclient.get()` returning a + new client instance). + + If you need to set any configuration options other than the SDK key, use :func:`ldclient.set_config()` instead. + + :param string sdk_key: the new SDK key + """ global __config global __client global __lock @@ -76,6 +90,18 @@ def set_sdk_key(sdk_key): def get(): + """Returns the shared SDK client instance, using the current global configuration. + + To use the SDK as a singleton, first make sure you have called :func:`ldclient.set_sdk_key()` or + :func:`ldclient.set_config()` at startup time. Then `get()` will return the same shared + :class:`ldclient.client.LDClient` instance each time. The client will be initialized if it has + not been already. + + If you need to create multiple client instances with different configurations, instead of this + singleton approach you can call the :class:`ldclient.client.LDClient` constructor directly instead. + + :rtype: ldclient.client.LDClient + """ global __config global __client global __lock @@ -96,8 +122,14 @@ def get(): __lock.unlock() -# Add a NullHandler for Python < 2.7 compatibility class NullHandler(logging.Handler): + """A :class:`logging.Handler` implementation that does nothing. + + .. deprecated:: 6.0.0 + You should not need to use this class. It was originally used in order to support Python 2.6, + which requires that at least one logging handler must always be configured. However, the SDK + no longer supports Python 2.6. + """ def emit(self, record): pass diff --git a/ldclient/client.py b/ldclient/client.py index ff96475b..61be996e 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -1,3 +1,7 @@ +""" +This submodule contains the client class that provides most of the SDK functionality. +""" + import hashlib import hmac import threading @@ -55,6 +59,15 @@ def initialized(self): class LDClient(object): + """The LaunchDarkly SDK client object. + + Applications should configure the client at startup time and continue to use it throughout the lifetime + of the application, rather than creating instances on the fly. The best way to do this is with the + singleton methods :func:`ldclient.set_sdk_key()`, :func:`ldclient.set_config()`, and :func:`ldclient.get()`. + However, you may also call the constructor directly if you need to maintain multiple instances. + + Client instances are thread-safe. + """ def __init__(self, sdk_key=None, config=None, start_wait=5): """Constructs a new LDClient instance. @@ -63,7 +76,7 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): client instance. :param string sdk_key: the SDK key for your LaunchDarkly environment - :param Config config: optional custom configuration + :param ldclient.config.Config config: optional custom configuration :param float start_wait: the number of seconds to wait for a successful connection to LaunchDarkly """ check_uwsgi() @@ -157,9 +170,9 @@ def _send_event(self, event): def track(self, event_name, user, data=None): """Tracks that a user performed an event. - :param string event_name: The name of the event. - :param dict user: The attributes of the user. - :param data: Optional additional data associated with the event. + :param string event_name: the name of the event, which may correspond to a goal in A/B tests + :param dict user: the attributes of the user + :param data: optional additional data associated with the event """ self._sanitize_user(user) if user is None or user.get('key') is None: @@ -169,6 +182,10 @@ def track(self, event_name, user, data=None): def identify(self, user): """Registers the user. + This simply creates an analytics event that will transmit the given user properties to + LaunchDarkly, so that the user will be visible on your dashboard even if you have not + evaluated any flags for that user. It has no other effect. + :param dict user: attributes of the user to register """ self._sanitize_user(user) @@ -192,13 +209,20 @@ def is_initialized(self): def flush(self): """Flushes all pending events. + + Normally, batches of events are delivered in the background at intervals determined by the + ``flush_interval`` property of :class:`ldclient.config.Config`. Calling ``flush()`` + schedules the next event delivery to be as soon as possible; however, the delivery still + happens asynchronously on a worker thread, so this method will return immediately. """ if self._config.offline: return return self._event_processor.flush() def toggle(self, key, user, default): - """Deprecated synonym for `variation`. + """Deprecated synonym for :func:`variation()`. + + .. deprecated:: 2.0.0 """ log.warn("Deprecated method: toggle() called. Use variation() instead.") return self.variation(key, user, default) @@ -215,27 +239,18 @@ def variation(self, key, user, default): return self._evaluate_internal(key, user, default, False).value def variation_detail(self, key, user, default): - """Determines the variation of a feature flag for a user, like `variation`, but also - provides additional information about how this value was calculated. - - The return value is an EvaluationDetail object, which has three properties: - - `value`: the value that was calculated for this user (same as the return value - of `variation`) - - `variation_index`: the positional index of this value in the flag, e.g. 0 for the - first variation - or `None` if the default value was returned - - `reason`: a hash describing the main reason why this value was selected. + """Determines the variation of a feature flag for a user, like :func:`variation()`, but also + provides additional information about how this value was calculated, in the form of an + :class:`ldclient.flag.EvaluationDetail` object. - The `reason` will also be included in analytics events, if you are capturing - detailed event data for this flag. + Calling this method also causes the "reason" data to be included in analytics events, + if you are capturing detailed event data for this flag. :param string key: the unique key for the feature flag :param dict user: a dictionary containing parameters for the end user requesting the flag :param object default: the default value of the flag, to be used if the value is not available from LaunchDarkly - :return: an EvaluationDetail object describing the result + :return: an object describing the result :rtype: EvaluationDetail """ return self._evaluate_internal(key, user, default, True) @@ -307,8 +322,8 @@ def send_event(value, variation=None, flag=None, reason=None): def all_flags(self, user): """Returns all feature flag values for the given user. - This method is deprecated - please use `all_flags_state` instead. Current versions of the - client-side SDK will not generate analytics events correctly if you pass the result of `all_flags`. + This method is deprecated - please use :func:`all_flags_state()` instead. Current versions of the + client-side SDK will not generate analytics events correctly if you pass the result of ``all_flags``. :param dict user: the end user requesting the feature flags :return: a dictionary of feature flag keys to values; returns None if the client is offline, @@ -322,19 +337,27 @@ def all_flags(self, user): def all_flags_state(self, user, **kwargs): """Returns an object that encapsulates the state of all feature flags for a given user, - including the flag values and also metadata that can be used on the front end. + including the flag values and also metadata that can be used on the front end. See the + JavaScript SDK Reference Guide on + `Bootstrapping `_. This method does not send analytics events back to LaunchDarkly. :param dict user: the end user requesting the feature flags - :param kwargs: optional parameters affecting how the state is computed: set - `client_side_only=True` to limit it to only flags that are marked for use with the - client-side SDK (by default, all flags are included); set `with_reasons=True` to - include evaluation reasons in the state (see `variation_detail`); set - `details_only_for_tracked_flags=True` to omit any metadata that is normally only - used for event generation, such as flag versions and evaluation reasons, unless - the flag has event tracking or debugging turned on - :return: a FeatureFlagsState object (will never be None; its 'valid' property will be False + :param kwargs: optional parameters affecting how the state is computed - see below + + :Keyword Arguments: + * **client_side_only** (*boolean*) -- + set to True to limit it to only flags that are marked for use with the client-side SDK + (by default, all flags are included) + * **with_reasons** (*boolean*) -- + set to True to include evaluation reasons in the state (see :func:`variation_detail()`) + * **details_only_for_tracked_flags** (*boolean*) -- + set to True to omit any metadata that is normally only used for event generation, such + as flag versions and evaluation reasons, unless the flag has event tracking or debugging + turned on + + :return: a FeatureFlagsState object (will never be None; its ``valid`` property will be False if the client is offline, has not been initialized, or the user is None or has no key) :rtype: FeatureFlagsState """ @@ -381,9 +404,10 @@ def all_flags_state(self, user, **kwargs): return state def secure_mode_hash(self, user): - """Generates a hash value for a user. + """Generates a hash value for a user, for use by the JavaScript SDK. - For more info: https://github.com/launchdarkly/js-client#secure-mode + For more information, see the JavaScript SDK Reference Guide on + `Secure mode `_. :param dict user: the attributes of the user :return: a hash string that can be passed to the front end diff --git a/ldclient/config.py b/ldclient/config.py index 35af5110..2d99c72e 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -1,3 +1,9 @@ +""" +This submodule contains the :class:`Config` class for custom configuration of the SDK client. + +Note that the same class can also be imported from the ``ldclient.client`` submodule. +""" + from ldclient.event_processor import DefaultEventProcessor from ldclient.feature_store import InMemoryFeatureStore from ldclient.util import log @@ -7,6 +13,11 @@ class Config(object): + """Advanced configuration options for the SDK client. + + To use these options, create an instance of ``Config`` and pass it to either :func:`ldclient.set_config()` + if you are using the singleton client, or the :class:`ldclient.client.LDClient` constructor otherwise. + """ def __init__(self, sdk_key=None, base_uri='https://app.launchdarkly.com', @@ -59,7 +70,7 @@ def __init__(self, :param bool offline: Whether the client should be initialized in offline mode. In offline mode, default values are returned for all flags and no remote network requests are made. By default, this is false. - :type update_processor_class: (str, Config, FeatureStore) -> UpdateProcessor + :type update_processor_class: (str, ldclient.config.Config, FeatureStore) -> UpdateProcessor :param float poll_interval: The number of seconds between polls for flag updates if streaming is off. :param bool use_ldd: Whether you are using the LaunchDarkly relay proxy in daemon mode. In this configuration, the client will not use a streaming connection to listen for updates, but instead @@ -79,9 +90,9 @@ def __init__(self, By default, events will only include the user key, except for one "index" event that provides the full details for the user. :param feature_requester_class: A factory for a FeatureRequester implementation taking the sdk key and config - :type feature_requester_class: (str, Config, FeatureStore) -> FeatureRequester + :type feature_requester_class: (str, ldclient.config.Config, FeatureStore) -> FeatureRequester :param event_processor_class: A factory for an EventProcessor implementation taking the config - :type event_processor_class: (Config) -> EventProcessor + :type event_processor_class: (ldclient.config.Config) -> EventProcessor :param update_processor_class: A factory for an UpdateProcessor implementation taking the sdk key, config, and FeatureStore implementation """ @@ -118,9 +129,18 @@ def __init__(self, @classmethod def default(cls): + """Returns a ``Config`` instance with default values for all properties. + + :rtype: ldclient.config.Config + """ return cls() def copy_with_new_sdk_key(self, new_sdk_key): + """Returns a new ``Config`` instance that is the same as this one, except for having a different SDK key. + + :param string new_sdk_key: the new SDK key + :rtype: ldclient.config.Config + """ return Config(sdk_key=new_sdk_key, base_uri=self.__base_uri, events_uri=self.__events_uri, @@ -147,6 +167,8 @@ def copy_with_new_sdk_key(self, new_sdk_key): inline_users_in_events=self.__inline_users_in_events) def get_default(self, key, default): + """Used internally by the SDK client to get the default value for a flag. + """ return default if key not in self.__defaults else self.__defaults[key] @property @@ -159,6 +181,10 @@ def base_uri(self): @property def get_latest_flags_uri(self): + """Used internally, deprecated. + + .. deprecated:: 5.0.0 + """ return self.__base_uri + GET_LATEST_FEATURES_PATH @property diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 9a0cae83..03134b64 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -1,3 +1,8 @@ +""" +Implementation details of the analytics event delivery component. +""" +# currently excluded from documentation - see ldclient.rst + from collections import namedtuple from email.utils import parsedate import errno diff --git a/ldclient/event_summarizer.py b/ldclient/event_summarizer.py index 5a9f19ea..2d084ddc 100644 --- a/ldclient/event_summarizer.py +++ b/ldclient/event_summarizer.py @@ -1,3 +1,8 @@ +""" +Implementation details of the analytics event delivery component. +""" +# currently excluded from documentation - see ldclient.rst + from collections import namedtuple diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 046c594f..4414fb7a 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -1,3 +1,8 @@ +""" +Default implementation of feature flag polling requests. +""" +# currently excluded from documentation - see ldclient.rst + from collections import namedtuple import json import urllib3 diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index fccef5b5..efabe82e 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -1,3 +1,11 @@ +""" +This submodule contains basic classes related to the feature store. + +The feature store is the SDK component that holds the last known state of all feature flags, as +received from LaunchDarkly. This submodule does not include specific integrations with external +storage systems; those are in :class:`ldclient.integrations`. +""" + from collections import OrderedDict, defaultdict from ldclient.util import log from ldclient.interfaces import FeatureStore @@ -16,10 +24,11 @@ def __init__(self, expiration = DEFAULT_EXPIRATION, capacity = DEFAULT_CAPACITY): """Constructs an instance of CacheConfig. - :param float expiration: The cache TTL, in seconds. Items will be evicted from the cache after + + :param float expiration: the cache TTL, in seconds. Items will be evicted from the cache after this amount of time from the time when they were originally cached. If the time is less than or equal to zero, caching is disabled. - :param int capacity: The maximum number of items that can be in the cache at a time. + :param int capacity: the maximum number of items that can be in the cache at a time """ self._expiration = expiration self._capacity = capacity @@ -28,41 +37,58 @@ def __init__(self, def default(): """Returns an instance of CacheConfig with default properties. By default, caching is enabled. This is the same as calling the constructor with no parameters. - :rtype: CacheConfig + + :rtype: ldclient.feature_store.CacheConfig """ return CacheConfig() @staticmethod def disabled(): """Returns an instance of CacheConfig specifying that caching should be disabled. - :rtype: CacheConfig + + :rtype: ldclient.feature_store.CacheConfig """ return CacheConfig(expiration = 0) @property def enabled(self): + """Returns True if caching is enabled in this configuration. + + :rtype: bool + """ return self._expiration > 0 @property def expiration(self): + """Returns the configured cache TTL, in seconds. + + :rtype: float + """ return self._expiration @property def capacity(self): + """Returns the configured maximum number of cacheable items. + + :rtype: int + """ return self._capacity class InMemoryFeatureStore(FeatureStore): - """ - In-memory implementation of a store that holds feature flags and related data received from the streaming API. + """The default feature store implementation, which holds all data in a thread-safe data structure in memory. """ def __init__(self): + """Constructs an instance of InMemoryFeatureStore. + """ self._lock = ReadWriteLock() self._initialized = False self._items = defaultdict(dict) def get(self, kind, key, callback): + """ + """ try: self._lock.rlock() itemsOfKind = self._items[kind] @@ -78,6 +104,8 @@ def get(self, kind, key, callback): self._lock.runlock() def all(self, kind, callback): + """ + """ try: self._lock.rlock() itemsOfKind = self._items[kind] @@ -86,6 +114,8 @@ def all(self, kind, callback): self._lock.runlock() def init(self, all_data): + """ + """ try: self._lock.rlock() self._items.clear() @@ -98,6 +128,8 @@ def init(self, all_data): # noinspection PyShadowingNames def delete(self, kind, key, version): + """ + """ try: self._lock.rlock() itemsOfKind = self._items[kind] @@ -109,6 +141,8 @@ def delete(self, kind, key, version): self._lock.runlock() def upsert(self, kind, item): + """ + """ key = item['key'] try: self._lock.rlock() @@ -122,6 +156,8 @@ def upsert(self, kind, item): @property def initialized(self): + """ + """ try: self._lock.rlock() return self._initialized diff --git a/ldclient/feature_store_helpers.py b/ldclient/feature_store_helpers.py index 2ba83713..58f9a848 100644 --- a/ldclient/feature_store_helpers.py +++ b/ldclient/feature_store_helpers.py @@ -1,18 +1,28 @@ +""" +This submodule contains support code for writing feature store implementations. +""" + from expiringdict import ExpiringDict from ldclient.interfaces import FeatureStore class CachingStoreWrapper(FeatureStore): - """CachingStoreWrapper is a partial implementation of :class:ldclient.interfaces.FeatureStore that - delegates the basic functionality to an implementation of :class:ldclient.interfaces.FeatureStoreCore - - while adding optional caching behavior and other logic that would otherwise be repeated in every - feature store implementation. This makes it easier to create new database integrations by implementing - only the database-specific logic. + """A partial implementation of :class:`ldclient.interfaces.FeatureStore`. + + This class delegates the basic functionality to an implementation of + :class:`ldclient.interfaces.FeatureStoreCore` - while adding optional caching behavior and other logic + that would otherwise be repeated in every feature store implementation. This makes it easier to create + new database integrations by implementing only the database-specific logic. """ __INITED_CACHE_KEY__ = "$inited" def __init__(self, core, cache_config): + """Constructs an instance by wrapping a core implementation object. + + :param FeatureStoreCore core: the implementation object + :param ldclient.feature_store.CacheConfig cache_config: the caching parameters + """ self._core = core if cache_config.enabled: self._cache = ExpiringDict(max_len=cache_config.capacity, max_age_seconds=cache_config.expiration) @@ -21,6 +31,8 @@ def __init__(self, core, cache_config): self._inited = False def init(self, all_data): + """ + """ self._core.init_internal(all_data) if self._cache is not None: self._cache.clear() @@ -31,6 +43,8 @@ def init(self, all_data): self._inited = True def get(self, kind, key, callback=lambda x: x): + """ + """ if self._cache is not None: cache_key = self._item_cache_key(kind, key) cached_item = self._cache.get(cache_key) @@ -43,6 +57,8 @@ def get(self, kind, key, callback=lambda x: x): return callback(self._item_if_not_deleted(item)) def all(self, kind, callback=lambda x: x): + """ + """ if self._cache is not None: cache_key = self._all_cache_key(kind) cached_items = self._cache.get(cache_key) @@ -54,10 +70,14 @@ def all(self, kind, callback=lambda x: x): return callback(items) def delete(self, kind, key, version): + """ + """ deleted_item = { "key": key, "version": version, "deleted": True } self.upsert(kind, deleted_item) def upsert(self, kind, item): + """ + """ new_state = self._core.upsert_internal(kind, item) if self._cache is not None: self._cache[self._item_cache_key(kind, item.get('key'))] = [new_state] @@ -65,6 +85,8 @@ def upsert(self, kind, item): @property def initialized(self): + """ + """ if self._inited: return True if self._cache is None: diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index 61088d50..527acec5 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -1,31 +1,21 @@ +""" +Deprecated entry point for a component that has been moved. +""" +# currently excluded from documentation - see ldclient.rst + from ldclient.impl.integrations.files.file_data_source import _FileDataSource +from ldclient.interfaces import UpdateProcessor class FileDataSource(UpdateProcessor): @classmethod def factory(cls, **kwargs): - """Provides a way to use local files as a source of feature flag state. This would typically be - used in a test environment, to operate using a predetermined feature flag state without an - actual LaunchDarkly connection. - - This module and this implementation class are deprecated and may be changed or removed in the future. - Please use :func:`ldclient.integrations.Files.new_data_source()`. + """Provides a way to use local files as a source of feature flag state. - :param kwargs: - See below - - :Keyword arguments: - * **paths** (array): The paths of the source files for loading flag data. These may be absolute paths - or relative to the current working directory. Files will be parsed as JSON unless the 'pyyaml' - package is installed, in which case YAML is also allowed. - * **auto_update** (boolean): True if the data source should watch for changes to the source file(s) - and reload flags whenever there is a change. The default implementation of this feature is based on - polling the filesystem, which may not perform well; if you install the 'watchdog' package (not - included by default, to avoid adding unwanted dependencies to the SDK), its native file watching - mechanism will be used instead. Note that auto-updating will only work if all of the files you - specified have valid directory paths at startup time. - * **poll_interval** (float): The minimum interval, in seconds, between checks for file modifications - - used only if auto_update is true, and if the native file-watching mechanism from 'watchdog' is not - being used. The default value is 1 second. + .. deprecated:: 6.8.0 + This module and this implementation class are deprecated and may be changed or removed in the future. + Please use :func:`ldclient.integrations.Files.new_data_source()`. + + The keyword arguments are the same as the arguments to :func:`ldclient.integrations.Files.new_data_source()`. """ return lambda config, store, ready : _FileDataSource(store, ready, diff --git a/ldclient/fixed_thread_pool.py b/ldclient/fixed_thread_pool.py index a3c769e4..17ded510 100644 --- a/ldclient/fixed_thread_pool.py +++ b/ldclient/fixed_thread_pool.py @@ -1,3 +1,8 @@ +""" +Internal helper class for thread management. +""" +# currently excluded from documentation - see ldclient.rst + from threading import Event, Lock, Thread # noinspection PyBroadException diff --git a/ldclient/flag.py b/ldclient/flag.py index d4fcbdf3..83986092 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -1,3 +1,7 @@ +""" +This submodule contains a helper class for feature flag evaluation, as well as some implementation details. +""" + from collections import namedtuple import hashlib import logging @@ -18,10 +22,12 @@ class EvaluationDetail(object): """ - The return type of LDClient.variation_detail, combining the result of a flag evaluation - with information about how it was calculated. + The return type of :func:`ldclient.client.LDClient.variation_detail()`, combining the result of a + flag evaluation with information about how it was calculated. """ def __init__(self, value, variation_index, reason): + """Constructs an instance. + """ self.__value = value self.__variation_index = variation_index self.__reason = reason @@ -29,14 +35,17 @@ def __init__(self, value, variation_index, reason): @property def value(self): """The result of the flag evaluation. This will be either one of the flag's - variations or the default value that was passed to the variation() method. + variations or the default value that was passed to the + :func:`ldclient.client.LDClient.variation_detail()` method. """ return self.__value @property def variation_index(self): """The index of the returned value within the flag's list of variations, e.g. - 0 for the first variation - or None if the default value was returned. + 0 for the first variation -- or None if the default value was returned. + + :rtype: int """ return self.__variation_index @@ -45,28 +54,34 @@ def reason(self): """A dictionary describing the main factor that influenced the flag evaluation value. It contains the following properties: - 'kind': The general category of reason, as follows: 'OFF' - the flag was off; - 'FALLTHROUGH' - the flag was on but the user did not match any targets or rules; - 'TARGET_MATCH' - the user was specifically targeted for this flag; 'RULE_MATCH' - - the user matched one of the flag's rules; 'PREREQUISITE_FAILED' - the flag was - considered off because it had at least one prerequisite flag that did not return - the desired variation; 'ERROR' - the flag could not be evaluated due to an - unexpected error. + * ``kind``: The general category of reason, as follows: + + * ``"OFF"``: the flag was off + * ``"FALLTHROUGH"`` -- the flag was on but the user did not match any targets or rules + * ``"TARGET_MATCH"`` -- the user was specifically targeted for this flag + * ``"RULE_MATCH"`` -- the user matched one of the flag's rules + * ``"PREREQUISITE_FAILED"`` -- the flag was considered off because it had at least one + prerequisite flag that did not return the desired variation + * ``"ERROR"`` - the flag could not be evaluated due to an unexpected error. - 'ruleIndex', 'ruleId': The positional index and unique identifier of the matched - rule, if the kind was 'RULE_MATCH' + * ``ruleIndex``, ``ruleId``: The positional index and unique identifier of the matched + rule, if the kind was ``RULE_MATCH`` - 'prerequisiteKey': The flag key of the prerequisite that failed, if the kind was - 'PREREQUISITE_FAILED' + * ``prerequisiteKey``: The flag key of the prerequisite that failed, if the kind was + ``PREREQUISITE_FAILED`` - 'errorKind': further describes the nature of the error if the kind was 'ERROR', - e.g. 'FLAG_NOT_FOUND' + * ``errorKind``: further describes the nature of the error if the kind was ``ERROR``, + e.g. ``"FLAG_NOT_FOUND"`` + + :rtype: dict """ return self.__reason def is_default_value(self): """Returns True if the flag evaluated to the default value rather than one of its variations. + + :rtype: bool """ return self.__variation_index is None diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index c5a8ab41..a0ba668b 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -1,20 +1,25 @@ +""" +This submodule contains a helper class for feature flag evaluation. +""" + import json import time class FeatureFlagsState(object): """ A snapshot of the state of all feature flags with regard to a specific user, generated by - calling the client's all_flags_state method. Serializing this object to JSON, using the - to_json_dict method or jsonpickle, will produce the appropriate data structure for - bootstrapping the LaunchDarkly JavaScript client. + calling the :func:`ldclient.client.LDClient.all_flags_state()` method. Serializing this + object to JSON, using the :func:`to_json_dict` method or ``jsonpickle``, will produce the + appropriate data structure for bootstrapping the LaunchDarkly JavaScript client. See the + JavaScript SDK Reference Guide on `Bootstrapping `_. """ def __init__(self, valid): self.__flag_values = {} self.__flag_metadata = {} self.__valid = valid + # Used internally to build the state map def add_flag(self, flag, value, variation, reason, details_only_if_tracked): - """Used internally to build the state map.""" key = flag['key'] self.__flag_values[key] = value meta = {} @@ -39,11 +44,14 @@ def add_flag(self, flag, value, variation, reason, details_only_if_tracked): def valid(self): """True if this object contains a valid snapshot of feature flag state, or False if the state could not be computed (for instance, because the client was offline or there was no user). + + :rtype: bool """ return self.__valid def get_flag_value(self, key): """Returns the value of an individual feature flag at the time the state was recorded. + :param string key: the feature flag key :return: the flag's value; None if the flag returned the default value, or if there was no such flag """ @@ -51,9 +59,11 @@ def get_flag_value(self, key): def get_flag_reason(self, key): """Returns the evaluation reason for an individual feature flag at the time the state was recorded. + :param string key: the feature flag key :return: a dictionary describing the reason; None if reasons were not recorded, or if there was no such flag + :rtype: dict """ meta = self.__flag_metadata.get(key) return None if meta is None else meta.get('reason') @@ -63,7 +73,9 @@ def to_values_map(self): default value, its value will be None. Do not use this method if you are passing data to the front end to "bootstrap" the JavaScript client. - Instead, use to_json_dict. + Instead, use :func:`to_json_dict()`. + + :rtype: dict """ return self.__flag_values @@ -71,6 +83,8 @@ def to_json_dict(self): """Returns a dictionary suitable for passing as JSON, in the format used by the LaunchDarkly JavaScript SDK. Use this method if you are passing data to the front end in order to "bootstrap" the JavaScript client. + + :rtype: dict """ ret = self.__flag_values.copy() ret['$flagsState'] = self.__flag_metadata @@ -79,6 +93,8 @@ def to_json_dict(self): def to_json_string(self): """Same as to_json_dict, but serializes the JSON structure into a string. + + :rtype: string """ return json.dumps(self.to_json_dict()) diff --git a/ldclient/integrations.py b/ldclient/integrations.py index fcc89abc..a1e9d2f8 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -1,3 +1,8 @@ +""" +This submodule contains factory/configuration methods for integrating the SDK with services +other than LaunchDarkly. +""" + from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper from ldclient.impl.integrations.consul.consul_feature_store import _ConsulFeatureStoreCore @@ -19,25 +24,30 @@ def new_feature_store(host=None, prefix=None, consul_opts=None, caching=CacheConfig.default()): - """Creates a Consul-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + """Creates a Consul-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the - SDK reference guide: https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store + `SDK reference guide `_. + + To use this method, you must first install the ``python-consul`` package. Then, put the object + returned by this method into the ``feature_store`` property of your client configuration + (:class:`ldclient.config.Config`). + :: - To use this method, you must first install the `python-consul` package. Then, put the object - returned by this method into the `feature_store` property of your client configuration - (:class:ldclient.config.Config). + from ldclient.integrations import Consul + store = Consul.new_feature_store() + config = Config(feature_store=store) - Note that `python-consul` is not available for Python 3.3 or 3.4, so this feature cannot be + Note that ``python-consul`` is not available for Python 3.3 or 3.4, so this feature cannot be used in those Python versions. - :param string host: Hostname of the Consul server (uses "localhost" if omitted) - :param int port: Port of the Consul server (uses 8500 if omitted) - :param string prefix: A namespace prefix to be prepended to all Consul keys - :param dict consul_opts: Optional parameters for configuring the Consul client, if you need - to set any of them besides host and port, as defined in the python-consul API; see - https://python-consul.readthedocs.io/en/latest/#consul - :param CacheConfig caching: Specifies whether local caching should be enabled and if so, - sets the cache properties; defaults to `CacheConfig.default()` + :param string host: hostname of the Consul server (uses ``localhost`` if omitted) + :param int port: port of the Consul server (uses 8500 if omitted) + :param string prefix: a namespace prefix to be prepended to all Consul keys + :param dict consul_opts: optional parameters for configuring the Consul client, if you need + to set any of them besides host and port, as defined in the + `python-consul API `_ + :param CacheConfig caching: specifies whether local caching should be enabled and if so, + sets the cache properties; defaults to :func:`ldclient.feature_store.CacheConfig.default()` """ core = _ConsulFeatureStoreCore(host, port, prefix, consul_opts) return CachingStoreWrapper(core, caching) @@ -52,13 +62,18 @@ def new_feature_store(table_name, prefix=None, dynamodb_opts={}, caching=CacheConfig.default()): - """Creates a DynamoDB-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + """Creates a DynamoDB-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the - SDK reference guide: https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store + `SDK reference guide `_. - To use this method, you must first install the `boto3` package containing the AWS SDK gems. - Then, put the object returned by this method into the `feature_store` property of your - client configuration (:class:ldclient.config.Config). + To use this method, you must first install the ``boto3`` package containing the AWS SDK gems. + Then, put the object returned by this method into the ``feature_store`` property of your + client configuration (:class:`ldclient.config.Config`). + :: + + from ldclient.integrations import DynamoDB + store = DynamoDB.new_feature_store("my-table-name") + config = Config(feature_store=store) Note that the DynamoDB table must already exist; the LaunchDarkly SDK does not create the table automatically, because it has no way of knowing what additional properties (such as permissions @@ -67,14 +82,14 @@ def new_feature_store(table_name, By default, the DynamoDB client will try to get your AWS credentials and region name from environment variables and/or local configuration files, as described in the AWS SDK documentation. - You may also pass configuration settings in `dynamodb_opts`. - - :param string table_name: The name of an existing DynamoDB table - :param string prefix: An optional namespace prefix to be prepended to all DynamoDB keys - :param dict dynamodb_opts: Optional parameters for configuring the DynamoDB client, as defined in - the boto3 API; see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session.client - :param CacheConfig caching: Specifies whether local caching should be enabled and if so, - sets the cache properties; defaults to `CacheConfig.default()` + You may also pass configuration settings in ``dynamodb_opts``. + + :param string table_name: the name of an existing DynamoDB table + :param string prefix: an optional namespace prefix to be prepended to all DynamoDB keys + :param dict dynamodb_opts: optional parameters for configuring the DynamoDB client, as defined in + the `boto3 API `_ + :param CacheConfig caching: specifies whether local caching should be enabled and if so, + sets the cache properties; defaults to :func:`ldclient.feature_store.CacheConfig.default()` """ core = _DynamoDBFeatureStoreCore(table_name, prefix, dynamodb_opts) return CachingStoreWrapper(core, caching) @@ -92,21 +107,26 @@ def new_feature_store(url='redis://localhost:6379/0', prefix='launchdarkly', max_connections=16, caching=CacheConfig.default()): - """Creates a Redis-backed implementation of `:class:ldclient.feature_store.FeatureStore`. + """Creates a Redis-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the - SDK reference guide: https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store - - To use this method, you must first install the `redis` package. Then, put the object - returned by this method into the `feature_store` property of your client configuration - (:class:ldclient.config.Config). - - :param string url: The URL of the Redis host; defaults to `DEFAULT_URL` - :param string prefix: A namespace prefix to be prepended to all Redis keys; defaults to - `DEFAULT_PREFIX` - :param int max_connections: The maximum number of Redis connections to keep in the - connection pool; defaults to `DEFAULT_MAX_CONNECTIONS` - :param CacheConfig caching: Specifies whether local caching should be enabled and if so, - sets the cache properties; defaults to `CacheConfig.default()` + `SDK reference guide `_. + + To use this method, you must first install the ``redis`` package. Then, put the object + returned by this method into the ``feature_store`` property of your client configuration + (:class:`ldclient.config.Config`). + :: + + from ldclient.integrations import Redis + store = Redis.new_feature_store() + config = Config(feature_store=store) + + :param string url: the URL of the Redis host; defaults to ``DEFAULT_URL`` + :param string prefix: a namespace prefix to be prepended to all Redis keys; defaults to + ``DEFAULT_PREFIX`` + :param int max_connections: the maximum number of Redis connections to keep in the + connection pool; defaults to ``DEFAULT_MAX_CONNECTIONS`` + :param CacheConfig caching: specifies whether local caching should be enabled and if so, + sets the cache properties; defaults to :func:`ldclient.feature_store.CacheConfig.default()` """ core = _RedisFeatureStoreCore(url, prefix, max_connections) wrapper = CachingStoreWrapper(core, caching) @@ -124,95 +144,40 @@ def new_data_source(paths, auto_update=False, poll_interval=1, force_polling=Fal used in a test environment, to operate using a predetermined feature flag state without an actual LaunchDarkly connection. - To use this component, call `new_data_source`, specifying the file path(s) of your data file(s) - in the `path` parameter; then put the value returned by this method into the `update_processor_class` - property of your LaunchDarkly client configuration (:class:ldclient.config.Config). + To use this component, call ``new_data_source``, specifying the file path(s) of your data file(s) + in the ``paths`` parameter; then put the value returned by this method into the ``update_processor_class`` + property of your LaunchDarkly client configuration (:class:`ldclient.config.Config`). :: - data_source = LaunchDarkly::Integrations::Files.new_data_source(paths=[ myFilePath ]) + from ldclient.integrations import Files + data_source = Files.new_data_source(paths=[ myFilePath ]) config = Config(update_processor_class=data_source) This will cause the client not to connect to LaunchDarkly to get feature flags. The client may still make network connections to send analytics events, unless you have disabled - this with Config.send_events or Config.offline. - - Flag data files can be either JSON or YAML (in order to use YAML, you must install the 'pyyaml' - package). They contain an object with three possible properties: - - * "flags": Feature flag definitions. - * "flagValues": Simplified feature flags that contain only a value. - * "segments": User segment definitions. - - The format of the data in "flags" and "segments" is defined by the LaunchDarkly application - and is subject to change. Rather than trying to construct these objects yourself, it is simpler - to request existing flags directly from the LaunchDarkly server in JSON format, and use this - output as the starting point for your file. In Linux you would do this: - :: - - curl -H "Authorization: {your sdk key}" https://app.launchdarkly.com/sdk/latest-all - - The output will look something like this (but with many more properties): - :: - - { - "flags": { - "flag-key-1": { - "key": "flag-key-1", - "on": true, - "variations": [ "a", "b" ] - } - }, - "segments": { - "segment-key-1": { - "key": "segment-key-1", - "includes": [ "user-key-1" ] - } - } - } - - Data in this format allows the SDK to exactly duplicate all the kinds of flag behavior supported - by LaunchDarkly. However, in many cases you will not need this complexity, but will just want to - set specific flag keys to specific values. For that, you can use a much simpler format: - :: - - { - "flagValues": { - "my-string-flag-key": "value-1", - "my-boolean-flag-key": true, - "my-integer-flag-key": 3 - } - } - - Or, in YAML: - :: - - flagValues: - my-string-flag-key: "value-1" - my-boolean-flag-key: true - my-integer-flag-key: 1 + this in your configuration with ``send_events`` or ``offline``. - It is also possible to specify both "flags" and "flagValues", if you want some flags - to have simple values and others to have complex behavior. However, it is an error to use the - same flag key or segment key more than once, either in a single file or across multiple files. + The format of the data files is described in the SDK Reference Guide on + `Reading flags from a file `_. + Note that in order to use YAML, you will need to install the ``pyyaml`` package. If the data source encounters any error in any file-- malformed content, a missing file, or a duplicate key-- it will not load flags from any of the files. - :param array paths: The paths of the source files for loading flag data. These may be absolute paths - or relative to the current working directory. Files will be parsed as JSON unless the 'pyyaml' + :param array paths: the paths of the source files for loading flag data. These may be absolute paths + or relative to the current working directory. Files will be parsed as JSON unless the ``pyyaml`` package is installed, in which case YAML is also allowed. :param bool auto_update: (default: false) True if the data source should watch for changes to the source file(s) and reload flags whenever there is a change. The default implementation of this feature is based on - polling the filesystem, which may not perform well; if you install the 'watchdog' package (not - included by default, to avoid adding unwanted dependencies to the SDK), its native file watching - mechanism will be used instead. Note that auto-updating will only work if all of the files you - specified have valid directory paths at startup time. - :param float poll_interval: (default: 1) The minimum interval, in seconds, between checks for file - modifications-- used only if `auto_update` is true, and if the native file-watching mechanism from - `watchdog` is not being used. + polling the filesystem, which may not perform well; if you install the ``watchdog`` package, its + native file watching mechanism will be used instead. Note that auto-updating will only work if all + of the files you specified have valid directory paths at startup time. + :param float poll_interval: (default: 1) the minimum interval, in seconds, between checks for file + modifications-- used only if ``auto_update`` is true, and if the native file-watching mechanism from + ``watchdog`` is not being used. :param bool force_polling: (default: false) True if the data source should implement auto-update via polling the filesystem even if a native mechanism is available. This is mainly for SDK testing. - :return: an object (actually a lambda) to be stored in the `update_processor_class` configuration property + :return: an object (actually a lambda) to be stored in the ``update_processor_class`` configuration property """ return lambda config, store, ready : _FileDataSource(store, ready, paths, auto_update, poll_interval, force_polling) diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 9556bdfc..48c517b8 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -1,16 +1,22 @@ +""" +This submodule contains interfaces for various components of the SDK. + +They may be useful in writing new implementations of these components, or for testing. +""" + from abc import ABCMeta, abstractmethod, abstractproperty class FeatureStore(object): """ - A versioned store for feature flags and related objects received from LaunchDarkly. + Interface for a versioned store for feature flags and related objects received from LaunchDarkly. Implementations should permit concurrent access and updates. - An "object", for `FeatureStore`, is simply a dict of arbitrary data which must have at least - three properties: "key" (its unique key), "version" (the version number provided by - LaunchDarkly), and "deleted" (True if this is a placeholder for a deleted object). + An "object", for ``FeatureStore``, is simply a dict of arbitrary data which must have at least + three properties: ``key`` (its unique key), ``version`` (the version number provided by + LaunchDarkly), and ``deleted`` (True if this is a placeholder for a deleted object). - Delete and upsert requests are versioned-- if the version number in the request is less than + Delete and upsert requests are versioned: if the version number in the request is less than the currently stored version of the object, the request should be ignored. These semantics support the primary use case for the store, which synchronizes a collection @@ -22,7 +28,7 @@ class FeatureStore(object): def get(self, kind, key, callback=lambda x: x): """ Retrieves the object to which the specified key is mapped, or None if the key is not found - or the associated object has a "deleted" property of True. The retrieved object, if any (a + or the associated object has a ``deleted`` property of True. The retrieved object, if any (a dict) can be transformed by the specified callback. :param kind: The kind of object to get @@ -97,11 +103,11 @@ def initialized(self): class FeatureStoreCore(object): """ - `FeatureStoreCore` is an interface for a simplified subset of the functionality of :class:`FeatureStore`, - to be used in conjunction with :class:`feature_store_helpers.CachingStoreWrapper`. This allows developers - developers of custom `FeatureStore` implementations to avoid repeating logic that would + Interface for a simplified subset of the functionality of :class:`FeatureStore`, to be used + in conjunction with :class:`ldclient.feature_store_helpers.CachingStoreWrapper`. This allows + developers of custom ``FeatureStore`` implementations to avoid repeating logic that would commonly be needed in any such implementation, such as caching. Instead, they can implement - only `FeatureStoreCore` and then create a `CachingStoreWrapper`. + only ``FeatureStoreCore`` and then create a ``CachingStoreWrapper``. """ __metaclass__ = ABCMeta @@ -174,10 +180,8 @@ def initialized_internal(self): """ +# Internal use only. Common methods for components that perform a task in the background. class BackgroundOperation(object): - """ - Performs a task in the background - """ # noinspection PyMethodMayBeStatic def start(self): @@ -203,20 +207,24 @@ def is_alive(self): class UpdateProcessor(BackgroundOperation): """ - Responsible for retrieving Feature Flag updates from LaunchDarkly and saving them to the feature store + Interface for the component that obtains feature flag data in some way and passes it to a + :class:`FeatureStore`. The built-in implementations of this are the client's standard streaming + or polling behavior. For testing purposes, there is also :func:`ldclient.integrations.Files.new_data_source()`. """ __metaclass__ = ABCMeta def initialized(self): """ Returns whether the update processor has received feature flags and has initialized its feature store. + :rtype: bool """ class EventProcessor(object): """ - Buffers analytics events and sends them to LaunchDarkly + Interface for the component that buffers analytics events and sends them to LaunchDarkly. + The default implementation can be replaced for testing purposes. """ __metaclass__ = ABCMeta @@ -231,7 +239,7 @@ def flush(self): """ Specifies that any buffered events should be sent as soon as possible, rather than waiting for the next flush interval. This method is asynchronous, so events still may not be sent - until a later time. However, calling stop() will synchronously deliver any events that were + until a later time. However, calling ``stop()`` will synchronously deliver any events that were not yet delivered prior to shutting down. """ @@ -244,7 +252,8 @@ def stop(self): class FeatureRequester(object): """ - Requests features. + Interface for the component that acquires feature flag data in polling mode. The default + implementation can be replaced for testing purposes. """ __metaclass__ = ABCMeta @@ -254,7 +263,7 @@ def get_all(self): """ pass - def get_one(self, key): + def get_one(self, kind, key): """ Gets one Feature flag :return: diff --git a/ldclient/lru_cache.py b/ldclient/lru_cache.py index 53cbf5d2..9833287b 100644 --- a/ldclient/lru_cache.py +++ b/ldclient/lru_cache.py @@ -1,13 +1,13 @@ -''' -A dictionary-based cache that removes the oldest entries when its limit is exceeded. -Values are only refreshed by writing, not by reading. Not thread-safe. -''' +""" +Internal helper class for caching. +""" +# currently excluded from documentation - see ldclient.rst from collections import OrderedDict # Backport of Python 3.2 move_to_end method which doesn't exist in 2.7 -class OrderedDictWithReordering(OrderedDict): +class _OrderedDictWithReordering(OrderedDict): if not hasattr(OrderedDict, 'move_to_end'): # backport of Python 3.2 logic def move_to_end(self, key, last=True): @@ -28,9 +28,12 @@ def move_to_end(self, key, last=True): class SimpleLRUCache(object): + """A dictionary-based cache that removes the oldest entries when its limit is exceeded. + Values are only refreshed by writing, not by reading. Not thread-safe. + """ def __init__(self, capacity): self.capacity = capacity - self.cache = OrderedDictWithReordering() + self.cache = _OrderedDictWithReordering() def get(self, key): return self.cache.get(key) diff --git a/ldclient/memoized_value.py b/ldclient/memoized_value.py index b2c38fea..08fb2d51 100644 --- a/ldclient/memoized_value.py +++ b/ldclient/memoized_value.py @@ -1,12 +1,17 @@ -''' -Simple implementation of a thread-safe memoized value whose generator function will never be -run more than once, and whose value can be overridden by explicit assignment. -''' +""" +Internal helper class for caching. No longer used. +""" +# currently excluded from documentation - see ldclient.rst from threading import RLock class MemoizedValue(object): + """Simple implementation of a thread-safe memoized value whose generator function will never be + run more than once, and whose value can be overridden by explicit assignment. + .. deprecated:: 6.7.0 + No longer used. Retained here only in case third parties were using it for another purpose. + """ def __init__(self, generator): self.generator = generator self.inited = False diff --git a/ldclient/operators.py b/ldclient/operators.py index 88a76cd1..208edcbd 100644 --- a/ldclient/operators.py +++ b/ldclient/operators.py @@ -1,3 +1,8 @@ +""" +Implementation details of feature flag evaluation. +""" +# currently excluded from documentation - see ldclient.rst + import logging import re import semver diff --git a/ldclient/polling.py b/ldclient/polling.py index 19ed0a7d..6f1be549 100644 --- a/ldclient/polling.py +++ b/ldclient/polling.py @@ -1,3 +1,8 @@ +""" +Default implementation of the polling component. +""" +# currently excluded from documentation - see ldclient.rst + from threading import Thread from ldclient.interfaces import UpdateProcessor diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py index ff93c402..1e49d9ee 100644 --- a/ldclient/redis_feature_store.py +++ b/ldclient/redis_feature_store.py @@ -11,10 +11,11 @@ # will migrate away from exposing these concrete classes and use only the factory methods. class RedisFeatureStore(FeatureStore): - """A Redis-backed implementation of :class:`ldclient.feature_store.FeatureStore`. + """A Redis-backed implementation of :class:`ldclient.interfaces.FeatureStore`. - This module and this implementation class are deprecated and may be changed or removed in the future. - Please use :func:`ldclient.integrations.Redis.new_feature_store()`. + .. deprecated:: 6.7.0 + This module and this implementation class are deprecated and may be changed or removed in the future. + Please use :func:`ldclient.integrations.Redis.new_feature_store()`. """ def __init__(self, url='redis://localhost:6379/0', diff --git a/ldclient/repeating_timer.py b/ldclient/repeating_timer.py index 956cfbcd..49fd043c 100644 --- a/ldclient/repeating_timer.py +++ b/ldclient/repeating_timer.py @@ -1,3 +1,8 @@ +""" +Internal helper class for repeating tasks. +""" +# currently excluded from documentation - see ldclient.rst + from threading import Event, Thread class RepeatingTimer(object): diff --git a/ldclient/rwlock.py b/ldclient/rwlock.py index 8416a35c..feaa510e 100644 --- a/ldclient/rwlock.py +++ b/ldclient/rwlock.py @@ -1,3 +1,8 @@ +""" +Internal helper class for locking. +""" +# currently excluded from documentation - see ldclient.rst + import threading diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index 5b41413b..a1e0f6fb 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -1,3 +1,10 @@ +""" +Server-Sent Events implementation for streaming. + +Based on: https://bitbucket.org/btubbs/sseclient/src/a47a380a3d7182a205c0f1d5eb470013ce796b4d/sseclient.py?at=default&fileviewer=file-view-default +""" +# currently excluded from documentation - see ldclient.rst + import re import time import warnings @@ -9,8 +16,6 @@ from ldclient.util import create_http_pool_manager from ldclient.util import throw_if_unsuccessful_response -# Inspired by: https://bitbucket.org/btubbs/sseclient/src/a47a380a3d7182a205c0f1d5eb470013ce796b4d/sseclient.py?at=default&fileviewer=file-view-default - # Technically, we should support streams that mix line endings. This regex, # however, assumes that a system will provide consistent line endings. end_of_field = re.compile(r'\r\n\r\n|\r\r|\n\n') diff --git a/ldclient/streaming.py b/ldclient/streaming.py index 20599eb1..eba4d5fc 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -1,3 +1,8 @@ +""" +Default implementation of the streaming component. +""" +# currently excluded from documentation - see ldclient.rst + from collections import namedtuple import json diff --git a/ldclient/user_filter.py b/ldclient/user_filter.py index d48ab23f..6379287e 100644 --- a/ldclient/user_filter.py +++ b/ldclient/user_filter.py @@ -1,3 +1,8 @@ +""" +Internal helper class for filtering out private attributes. +""" +# currently excluded from documentation - see ldclient.rst + import jsonpickle import six diff --git a/ldclient/util.py b/ldclient/util.py index fbb2f11d..02c84ea0 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -1,3 +1,8 @@ +""" +General internal helper functions. +""" +# currently excluded from documentation - see ldclient.rst + import certifi import logging import sys diff --git a/ldclient/versioned_data_kind.py b/ldclient/versioned_data_kind.py index 04acce43..37504394 100644 --- a/ldclient/versioned_data_kind.py +++ b/ldclient/versioned_data_kind.py @@ -1,17 +1,24 @@ -from collections import namedtuple - """ -These objects denote the types of data that can be stored in the feature store and -referenced in the API. If we add another storable data type in the future, as long as it -follows the same pattern (having "key", "version", and "deleted" properties), we only need -to add a corresponding constant here and the existing store should be able to handle it. +This submodule is used only by the internals of the feature flag storage mechanism. + +If you are writing your own implementation of :class:`ldclient.integrations.FeatureStore`, the +:class:`VersionedDataKind` tuple type will be passed to the ``kind`` parameter of the feature +store methods; its ``namespace`` property tells the feature store which collection of objects is +being referenced ("features", "segments", etc.). The intention is for the feature store to treat +storable objects as completely generic JSON dictionaries, rather than having any special logic +for features or segments. """ +from collections import namedtuple + # Note that VersionedDataKind without the extra attributes is no longer used in the SDK, # but it's preserved here for backward compatibility just in case someone else used it VersionedDataKind = namedtuple('VersionedDataKind', ['namespace', 'request_api_path', 'stream_api_path']) +# Note, feature store implementors really don't need to know about this class so we could just +# not document it at all, but apparently namedtuple() creates its own docstrings so it's going +# to show up in any case. VersionedDataKindWithOrdering = namedtuple('VersionedDataKindWithOrdering', ['namespace', 'request_api_path', 'stream_api_path', 'priority', 'get_dependency_keys']) From 71534aa6a104aa9fc94d6631612a0bac26834b32 Mon Sep 17 00:00:00 2001 From: Harpo roeder Date: Tue, 5 Feb 2019 12:44:45 -0800 Subject: [PATCH 069/356] add consul to test-requirements, remove specific reference to install redis, consul, dynamo deps in azure ci --- azure-pipelines.yml | 3 --- test-requirements.txt | 1 + 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index e4f8f7af..c0d39fdb 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -46,9 +46,6 @@ jobs: script: | python --version pip install -r test-requirements.txt - pip install -r redis-requirements.txt - pip install -r consul-requirements.txt - pip install -r dynamodb-requirements.txt python setup.py install mkdir test-reports pytest -s --junitxml=test-reports/junit.xml testing; diff --git a/test-requirements.txt b/test-requirements.txt index 88cbbc2e..714c8bd2 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,3 +8,4 @@ pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 pyyaml>=3.0 watchdog>=0.9 +python-consul>=1.0.1 From 6bbd65f1dc540c4c8eca25079d160495b97258b5 Mon Sep 17 00:00:00 2001 From: Harpo roeder Date: Tue, 5 Feb 2019 12:53:36 -0800 Subject: [PATCH 070/356] Revert "add consul to test-requirements, remove specific reference to install redis, consul, dynamo deps in azure ci" This reverts commit 71534aa6a104aa9fc94d6631612a0bac26834b32. --- azure-pipelines.yml | 3 +++ test-requirements.txt | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index c0d39fdb..e4f8f7af 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -46,6 +46,9 @@ jobs: script: | python --version pip install -r test-requirements.txt + pip install -r redis-requirements.txt + pip install -r consul-requirements.txt + pip install -r dynamodb-requirements.txt python setup.py install mkdir test-reports pytest -s --junitxml=test-reports/junit.xml testing; diff --git a/test-requirements.txt b/test-requirements.txt index 714c8bd2..88cbbc2e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -8,4 +8,3 @@ pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 pyyaml>=3.0 watchdog>=0.9 -python-consul>=1.0.1 From 0ec55a76efedd72ef59f22442308747d9bac1209 Mon Sep 17 00:00:00 2001 From: Harpo roeder Date: Tue, 5 Feb 2019 12:55:29 -0800 Subject: [PATCH 071/356] remove redis and dynamo explicit dep reference --- azure-pipelines.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index e4f8f7af..b7f19ff3 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -46,9 +46,7 @@ jobs: script: | python --version pip install -r test-requirements.txt - pip install -r redis-requirements.txt pip install -r consul-requirements.txt - pip install -r dynamodb-requirements.txt python setup.py install mkdir test-reports pytest -s --junitxml=test-reports/junit.xml testing; From 49c5993af42fa554d99fd207129f62a456c3c488 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 13:45:14 -0800 Subject: [PATCH 072/356] add requirements.txt --- docs/requirements.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 docs/requirements.txt diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..ef90363c --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1 @@ +sphinx<2.0 From 5228df7abbf6d0aa5d608c354d9ccf530f3aafe7 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 13:49:47 -0800 Subject: [PATCH 073/356] add config file --- .readthedocs.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .readthedocs.yml diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 00000000..56781a23 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,12 @@ +version: 2 + +python: + version: 3.7 + install: + - requirements: docs/requirements.txt + - requirements: requirements.txt + +sphinx: + builder: html + configuration: docs/conf.py + fail_on_warning: true From f4e5c8604883c09f2cc9b2f0e191139272c3e6a5 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 15:10:44 -0800 Subject: [PATCH 074/356] break up API docs into logical groups with a better home page --- docs/README.md | 29 ++++++++++++ docs/api-extending.rst | 17 +++++++ docs/api-integrations.rst | 10 +++++ docs/api-main.rst | 40 +++++++++++++++++ docs/index.rst | 22 +++++----- docs/ldclient.rst | 83 ----------------------------------- ldclient/__init__.py | 1 + ldclient/event_processor.py | 2 +- ldclient/event_summarizer.py | 2 +- ldclient/feature_requester.py | 2 +- 10 files changed, 111 insertions(+), 97 deletions(-) create mode 100644 docs/README.md create mode 100644 docs/api-extending.rst create mode 100644 docs/api-integrations.rst create mode 100644 docs/api-main.rst delete mode 100644 docs/ldclient.rst diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..1eb6a999 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,29 @@ +# How the Python SDK documentation works + +The generated API documentation is built with [Sphinx](http://www.sphinx-doc.org/en/master/), and is hosted on [Read the Docs](https://readthedocs.org/). + +It uses the following: + +* Docstrings within the code. Docstrings can use any of the markup supported by Sphinx. +* The `.rst` files in the `docs` directory. These provide the overall page structure. +* The `conf.py` file containing Sphinx settings. + +## What to document + +Every public class, method, and module should have a docstring. Classes and methods with no docstring will not be included in the API docs. + +"Public" here means things that we want third-party developers to use. The SDK also contains many modules and classes that are not actually private (i.e. they aren't prefixed with `_`), but are for internal use only and aren't supported for any other use (we would like to reduce the amount of these in future). + +To cause a class or method in an existing module to be added to the docs, all that's necessary is to give it a docstring. + +To add a new module to the docs, give it a docstring and then add a link to it in the appropriate `api-*.rst` file, in the same format as the existing links. + +## Undocumented things + +Modules that contain only implementation details are omitted from the docs by simply not including links to them in the `.rst` files. + +Internal classes in a documented module will be omitted from the docs if they do not have any docstrings, unless they inherit from another class that has docstrings. In the latter case, the way to omit them from the docs is to edit the `.rst` file that contains the link to that module, and add a `:members:` directive under the module that specifically lists all the classes that _should_ be shown. + +## Testing + +In the `docs` directory, run `make html` to build all the docs. Then view `docs/build/html/index.html`. diff --git a/docs/api-extending.rst b/docs/api-extending.rst new file mode 100644 index 00000000..8c951904 --- /dev/null +++ b/docs/api-extending.rst @@ -0,0 +1,17 @@ +Extending the SDK +================= + +ldclient.interfaces module +-------------------------- + +.. automodule:: ldclient.interfaces + :members: + :special-members: __init__ + :show-inheritance: + +ldclient.versioned_data_kind module +----------------------------------- + +.. automodule:: ldclient.versioned_data_kind + :members: + :show-inheritance: diff --git a/docs/api-integrations.rst b/docs/api-integrations.rst new file mode 100644 index 00000000..8d8146ff --- /dev/null +++ b/docs/api-integrations.rst @@ -0,0 +1,10 @@ +Integrating with other services +=============================== + +ldclient.integrations module +---------------------------- + +.. automodule:: ldclient.integrations + :members: + :special-members: __init__ + :show-inheritance: diff --git a/docs/api-main.rst b/docs/api-main.rst new file mode 100644 index 00000000..56417ea5 --- /dev/null +++ b/docs/api-main.rst @@ -0,0 +1,40 @@ +Core API +======== + +ldclient module +--------------- + +.. automodule:: ldclient + :members: get,set_config,set_sdk_key + :show-inheritance: + +ldclient.client module +---------------------- + +.. automodule:: ldclient.client + :members: LDClient + :special-members: __init__ + :show-inheritance: + +ldclient.config module +---------------------- + +.. automodule:: ldclient.config + :members: + :special-members: __init__ + :show-inheritance: + +ldclient.flag module +-------------------- + +.. automodule:: ldclient.flag + :members: EvaluationDetail + :special-members: __init__ + :show-inheritance: + +ldclient.flags_state module +--------------------------- + +.. automodule:: ldclient.flags_state + :members: + :show-inheritance: diff --git a/docs/index.rst b/docs/index.rst index 735da978..7a9d2c73 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -3,19 +3,19 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -Welcome to ldclient-py's documentation! -======================================= +LaunchDarkly Python SDK +======================= -.. toctree:: - :maxdepth: 2 - :caption: Contents: +This is the API reference for the `LaunchDarkly `_ SDK for Python. -.. automodule:: ldclient +The latest version of the SDK can be found on `PyPI `_, and the source code is on `GitHub `_. +For more information, see LaunchDarkly's `Quickstart `_ and `SDK Reference Guide `_. -Indices and tables -================== +.. toctree:: + :maxdepth: 2 + :caption: Contents: -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` + api-main + api-integrations + api-extending diff --git a/docs/ldclient.rst b/docs/ldclient.rst deleted file mode 100644 index 4a212c16..00000000 --- a/docs/ldclient.rst +++ /dev/null @@ -1,83 +0,0 @@ -ldclient package -================ - -Module contents ---------------- - -.. automodule:: ldclient - :members: - :undoc-members: - :show-inheritance: - -Submodules ----------- - -ldclient.client module ----------------------- - -.. automodule:: ldclient.client - :members: LDClient - :special-members: __init__ - :show-inheritance: - -ldclient.config module ----------------------- - -.. automodule:: ldclient.config - :members: - :special-members: __init__ - :show-inheritance: - -ldclient.feature\_store module ------------------------------- - -.. automodule:: ldclient.feature_store - :members: - :special-members: __init__ - :show-inheritance: - -ldclient.feature\_store\_helpers module ---------------------------------------- - -.. automodule:: ldclient.feature_store_helpers - :members: - :special-members: __init__ - :show-inheritance: - -ldclient.flag module --------------------- - -.. automodule:: ldclient.flag - :members: EvaluationDetail - :special-members: __init__ - :show-inheritance: - -ldclient.flags\_state module ----------------------------- - -.. automodule:: ldclient.flags_state - :members: - :show-inheritance: - -ldclient.integrations module ----------------------------- - -.. automodule:: ldclient.integrations - :members: - :special-members: __init__ - :show-inheritance: - -ldclient.interfaces module --------------------------- - -.. automodule:: ldclient.interfaces - :members: - :special-members: __init__ - :show-inheritance: - -ldclient.versioned\_data\_kind module -------------------------------------- - -.. automodule:: ldclient.versioned_data_kind - :members: - :show-inheritance: diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 5be96db9..5dfb011f 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -122,6 +122,7 @@ def get(): __lock.unlock() +# currently hidden from documentation - see api-main.rst class NullHandler(logging.Handler): """A :class:`logging.Handler` implementation that does nothing. diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 03134b64..3267f451 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -1,7 +1,7 @@ """ Implementation details of the analytics event delivery component. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation from collections import namedtuple from email.utils import parsedate diff --git a/ldclient/event_summarizer.py b/ldclient/event_summarizer.py index 2d084ddc..ec4003b9 100644 --- a/ldclient/event_summarizer.py +++ b/ldclient/event_summarizer.py @@ -1,7 +1,7 @@ """ Implementation details of the analytics event delivery component. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation from collections import namedtuple diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 4414fb7a..11892e58 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -1,7 +1,7 @@ """ Default implementation of feature flag polling requests. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation from collections import namedtuple import json From 9445a6ebbec338c0ae6570a486f75bb9de374b98 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 15:17:20 -0800 Subject: [PATCH 075/356] misc cleanup --- docs/api-extending.rst | 8 ++++++++ ldclient/event_processor.py | 2 +- ldclient/event_summarizer.py | 2 +- ldclient/feature_requester.py | 2 +- ldclient/file_data_source.py | 2 +- ldclient/fixed_thread_pool.py | 2 +- ldclient/lru_cache.py | 2 +- ldclient/memoized_value.py | 2 +- ldclient/operators.py | 2 +- ldclient/polling.py | 2 +- ldclient/repeating_timer.py | 2 +- ldclient/rwlock.py | 2 +- ldclient/sse_client.py | 2 +- ldclient/streaming.py | 2 +- ldclient/user_filter.py | 2 +- ldclient/util.py | 2 +- 16 files changed, 23 insertions(+), 15 deletions(-) diff --git a/docs/api-extending.rst b/docs/api-extending.rst index 8c951904..4f668ce0 100644 --- a/docs/api-extending.rst +++ b/docs/api-extending.rst @@ -9,6 +9,14 @@ ldclient.interfaces module :special-members: __init__ :show-inheritance: +ldclient.feature_store_helpers module +------------------------------------- + +.. automodule:: ldclient.feature_store_helpers + :members: + :special-members: __init__ + :show-inheritance: + ldclient.versioned_data_kind module ----------------------------------- diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 3267f451..f7a9178f 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -1,7 +1,7 @@ """ Implementation details of the analytics event delivery component. """ -# currently excluded from documentation +# currently excluded from documentation - see docs/README.md from collections import namedtuple from email.utils import parsedate diff --git a/ldclient/event_summarizer.py b/ldclient/event_summarizer.py index ec4003b9..c0aa5aeb 100644 --- a/ldclient/event_summarizer.py +++ b/ldclient/event_summarizer.py @@ -1,7 +1,7 @@ """ Implementation details of the analytics event delivery component. """ -# currently excluded from documentation +# currently excluded from documentation - see docs/README.md from collections import namedtuple diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 11892e58..51aee6a0 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -1,7 +1,7 @@ """ Default implementation of feature flag polling requests. """ -# currently excluded from documentation +# currently excluded from documentation - see docs/README.md from collections import namedtuple import json diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py index 527acec5..56da8de8 100644 --- a/ldclient/file_data_source.py +++ b/ldclient/file_data_source.py @@ -1,7 +1,7 @@ """ Deprecated entry point for a component that has been moved. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md from ldclient.impl.integrations.files.file_data_source import _FileDataSource from ldclient.interfaces import UpdateProcessor diff --git a/ldclient/fixed_thread_pool.py b/ldclient/fixed_thread_pool.py index 17ded510..27fca13d 100644 --- a/ldclient/fixed_thread_pool.py +++ b/ldclient/fixed_thread_pool.py @@ -1,7 +1,7 @@ """ Internal helper class for thread management. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md from threading import Event, Lock, Thread diff --git a/ldclient/lru_cache.py b/ldclient/lru_cache.py index 9833287b..f8f18e37 100644 --- a/ldclient/lru_cache.py +++ b/ldclient/lru_cache.py @@ -1,7 +1,7 @@ """ Internal helper class for caching. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md from collections import OrderedDict diff --git a/ldclient/memoized_value.py b/ldclient/memoized_value.py index 08fb2d51..7abc944f 100644 --- a/ldclient/memoized_value.py +++ b/ldclient/memoized_value.py @@ -1,7 +1,7 @@ """ Internal helper class for caching. No longer used. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md from threading import RLock diff --git a/ldclient/operators.py b/ldclient/operators.py index 208edcbd..253e8a8b 100644 --- a/ldclient/operators.py +++ b/ldclient/operators.py @@ -1,7 +1,7 @@ """ Implementation details of feature flag evaluation. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md import logging import re diff --git a/ldclient/polling.py b/ldclient/polling.py index 6f1be549..59803a30 100644 --- a/ldclient/polling.py +++ b/ldclient/polling.py @@ -1,7 +1,7 @@ """ Default implementation of the polling component. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md from threading import Thread diff --git a/ldclient/repeating_timer.py b/ldclient/repeating_timer.py index 49fd043c..eb8aa771 100644 --- a/ldclient/repeating_timer.py +++ b/ldclient/repeating_timer.py @@ -1,7 +1,7 @@ """ Internal helper class for repeating tasks. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md from threading import Event, Thread diff --git a/ldclient/rwlock.py b/ldclient/rwlock.py index feaa510e..251d5eb4 100644 --- a/ldclient/rwlock.py +++ b/ldclient/rwlock.py @@ -1,7 +1,7 @@ """ Internal helper class for locking. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md import threading diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index a1e0f6fb..49d853c7 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -3,7 +3,7 @@ Based on: https://bitbucket.org/btubbs/sseclient/src/a47a380a3d7182a205c0f1d5eb470013ce796b4d/sseclient.py?at=default&fileviewer=file-view-default """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md import re import time diff --git a/ldclient/streaming.py b/ldclient/streaming.py index eba4d5fc..43e815a4 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -1,7 +1,7 @@ """ Default implementation of the streaming component. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md from collections import namedtuple diff --git a/ldclient/user_filter.py b/ldclient/user_filter.py index 6379287e..f7dc7f9d 100644 --- a/ldclient/user_filter.py +++ b/ldclient/user_filter.py @@ -1,7 +1,7 @@ """ Internal helper class for filtering out private attributes. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md import jsonpickle import six diff --git a/ldclient/util.py b/ldclient/util.py index 02c84ea0..b1d533a2 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -1,7 +1,7 @@ """ General internal helper functions. """ -# currently excluded from documentation - see ldclient.rst +# currently excluded from documentation - see docs/README.md import certifi import logging From c496c35485b238738731c05161af759998ca7664 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 15:19:08 -0800 Subject: [PATCH 076/356] misc cleanup --- ldclient/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 5dfb011f..8624bc10 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -122,7 +122,7 @@ def get(): __lock.unlock() -# currently hidden from documentation - see api-main.rst +# currently hidden from documentation - see docs/README.md class NullHandler(logging.Handler): """A :class:`logging.Handler` implementation that does nothing. From 68bb4e488ea24051c1a99ce6331028ddb74220d7 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 15:26:49 -0800 Subject: [PATCH 077/356] RTD config fixes --- .readthedocs.yml | 2 +- docs/requirements.txt | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 56781a23..2739d0f6 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,7 +1,7 @@ version: 2 python: - version: 3.7 + version: 3.5 install: - requirements: docs/requirements.txt - requirements: requirements.txt diff --git a/docs/requirements.txt b/docs/requirements.txt index ef90363c..5e064a36 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1 +1,10 @@ sphinx<2.0 + +backoff>=1.4.3 +certifi>=2018.4.16 +expiringdict>=1.1.4 +six>=1.10.0 +pyRFC3339>=1.0 +jsonpickle==0.9.3 +semver>=2.7.9 +urllib3>=1.22.0 From 6766920631d3877f24f7703e7de3f774d2519947 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 15:43:10 -0800 Subject: [PATCH 078/356] minor edit --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index 1eb6a999..fee89947 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,7 +14,7 @@ Every public class, method, and module should have a docstring. Classes and meth "Public" here means things that we want third-party developers to use. The SDK also contains many modules and classes that are not actually private (i.e. they aren't prefixed with `_`), but are for internal use only and aren't supported for any other use (we would like to reduce the amount of these in future). -To cause a class or method in an existing module to be added to the docs, all that's necessary is to give it a docstring. +To add an undocumented class or method in an existing module to the docs, just give it a docstring. To add a new module to the docs, give it a docstring and then add a link to it in the appropriate `api-*.rst` file, in the same format as the existing links. From 74e82c8de2f6f45d02c472302436f4d57e2e22f4 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 16:40:08 -0800 Subject: [PATCH 079/356] misc. doc comment edits --- ldclient/__init__.py | 4 ++-- ldclient/client.py | 6 +----- ldclient/config.py | 11 +++++------ ldclient/flag.py | 2 +- ldclient/flags_state.py | 2 +- 5 files changed, 10 insertions(+), 15 deletions(-) diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 8624bc10..d75b6b61 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -32,7 +32,7 @@ def set_config(config): re-initialized with the new configuration (this will result in the next call to :func:`ldclient.get()` returning a new client instance). - :param string sdk_key: the new SDK key + :param ldclient.config.Config config: the client configuration """ global __config global __client @@ -93,7 +93,7 @@ def get(): """Returns the shared SDK client instance, using the current global configuration. To use the SDK as a singleton, first make sure you have called :func:`ldclient.set_sdk_key()` or - :func:`ldclient.set_config()` at startup time. Then `get()` will return the same shared + :func:`ldclient.set_config()` at startup time. Then ``get()`` will return the same shared :class:`ldclient.client.LDClient` instance each time. The client will be initialized if it has not been already. diff --git a/ldclient/client.py b/ldclient/client.py index 61be996e..8ae8a5a1 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -71,10 +71,6 @@ class LDClient(object): def __init__(self, sdk_key=None, config=None, start_wait=5): """Constructs a new LDClient instance. - Rather than calling this constructor directly, you can call the `ldclient.set_sdk_key`, - `ldclient.set_config`, and `ldclient.get` functions to configure and use a singleton - client instance. - :param string sdk_key: the SDK key for your LaunchDarkly environment :param ldclient.config.Config config: optional custom configuration :param float start_wait: the number of seconds to wait for a successful connection to LaunchDarkly @@ -203,7 +199,7 @@ def is_offline(self): def is_initialized(self): """Returns true if the client has successfully connected to LaunchDarkly. - :rype: bool + :rtype: bool """ return self.is_offline() or self._config.use_ldd or self._update_processor.initialized() diff --git a/ldclient/config.py b/ldclient/config.py index 2d99c72e..f8ef61d0 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -166,9 +166,8 @@ def copy_with_new_sdk_key(self, new_sdk_key): user_keys_flush_interval=self.__user_keys_flush_interval, inline_users_in_events=self.__inline_users_in_events) + # for internal use only - probably should be part of the client logic def get_default(self, key, default): - """Used internally by the SDK client to get the default value for a flag. - """ return default if key not in self.__defaults else self.__defaults[key] @property @@ -179,22 +178,22 @@ def sdk_key(self): def base_uri(self): return self.__base_uri + # for internal use only - also no longer used, will remove @property def get_latest_flags_uri(self): - """Used internally, deprecated. - - .. deprecated:: 5.0.0 - """ return self.__base_uri + GET_LATEST_FEATURES_PATH + # for internal use only - should construct the URL path in the events code, not here @property def events_uri(self): return self.__events_uri + '/bulk' + # for internal use only @property def stream_base_uri(self): return self.__stream_uri + # for internal use only - should construct the URL path in the streaming code, not here @property def stream_uri(self): return self.__stream_uri + STREAM_FLAGS_PATH diff --git a/ldclient/flag.py b/ldclient/flag.py index 83986092..88739ba0 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -45,7 +45,7 @@ def variation_index(self): """The index of the returned value within the flag's list of variations, e.g. 0 for the first variation -- or None if the default value was returned. - :rtype: int + :rtype: int or None """ return self.__variation_index diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index a0ba668b..2f611aa6 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -63,7 +63,7 @@ def get_flag_reason(self, key): :param string key: the feature flag key :return: a dictionary describing the reason; None if reasons were not recorded, or if there was no such flag - :rtype: dict + :rtype: dict or None """ meta = self.__flag_metadata.get(key) return None if meta is None else meta.get('reason') From 9316b0d2068df4d53bb2102abe5efd8f3fe0cccc Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 5 Feb 2019 16:22:17 -0800 Subject: [PATCH 080/356] use RTD theme --- docs/Makefile | 2 +- docs/conf.py | 2 +- docs/requirements.txt | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/Makefile b/docs/Makefile index 13edc19b..ebce0c0b 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -13,7 +13,7 @@ help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) install: - pip install sphinx + pip install -r requirements.txt html: install @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/conf.py b/docs/conf.py index 479f3bc8..f1dc322b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -83,7 +83,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the diff --git a/docs/requirements.txt b/docs/requirements.txt index 5e064a36..f6c80357 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,5 @@ sphinx<2.0 +sphinx_rtd_theme backoff>=1.4.3 certifi>=2018.4.16 From 338910cb4d5528ea917c40d0ba2286457542b94c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 11 Feb 2019 12:54:37 -0800 Subject: [PATCH 081/356] remove jsonpickle --- ldclient/event_processor.py | 4 ++-- ldclient/user_filter.py | 1 - requirements.txt | 1 - test-requirements.txt | 1 + testing/test_flags_state.py | 2 ++ 5 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index f7a9178f..30619298 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -6,7 +6,7 @@ from collections import namedtuple from email.utils import parsedate import errno -import jsonpickle +import json from threading import Event, Lock, Thread import six import time @@ -168,7 +168,7 @@ def run(self): def _do_send(self, output_events): # noinspection PyBroadException try: - json_body = jsonpickle.encode(output_events, unpicklable=False) + json_body = json.dumps(output_events) log.debug('Sending events payload: ' + json_body) hdrs = _headers(self._config.sdk_key) hdrs['X-LaunchDarkly-Event-Schema'] = str(__CURRENT_EVENT_SCHEMA__) diff --git a/ldclient/user_filter.py b/ldclient/user_filter.py index f7dc7f9d..fe5baa39 100644 --- a/ldclient/user_filter.py +++ b/ldclient/user_filter.py @@ -3,7 +3,6 @@ """ # currently excluded from documentation - see docs/README.md -import jsonpickle import six diff --git a/requirements.txt b/requirements.txt index f86f3039..2e3cba6f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,6 +3,5 @@ certifi>=2018.4.16 expiringdict>=1.1.4 six>=1.10.0 pyRFC3339>=1.0 -jsonpickle==0.9.3 semver>=2.7.9 urllib3>=1.22.0 diff --git a/test-requirements.txt b/test-requirements.txt index 88cbbc2e..3bc09d90 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,6 +3,7 @@ pytest>=2.8 redis>=2.10.5 boto3>=1.9.71 coverage>=4.4 +jsonpickle==0.9.3 pytest-capturelog>=0.7 pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 diff --git a/testing/test_flags_state.py b/testing/test_flags_state.py index 45ea6404..f8e6d464 100644 --- a/testing/test_flags_state.py +++ b/testing/test_flags_state.py @@ -58,6 +58,8 @@ def test_can_convert_to_json_string(): str = state.to_json_string() assert json.loads(str) == obj +# We don't actually use jsonpickle in the SDK, but FeatureFlagsState has a magic method that makes it +# behave correctly in case the application uses jsonpickle to serialize it. def test_can_serialize_with_jsonpickle(): state = FeatureFlagsState(True) flag1 = { 'key': 'key1', 'version': 100, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } From f586cd11f3be50ba0f41e547d58b3eb390a3f4fd Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 11 Feb 2019 13:05:59 -0800 Subject: [PATCH 082/356] misc doc comment/readme edits prior to publishing docs --- README.md | 12 ++++++++---- ldclient/client.py | 14 ++++++++++++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 61e67050..6013179f 100644 --- a/README.md +++ b/README.md @@ -81,20 +81,22 @@ The SDK is tested with the most recent patch releases of Python 2.7, 3.3, 3.4, 3 Database integrations --------------------- -Feature flag data can be kept in a persistent store using Consul, DynamoDB, or Redis. These adapters are implemented in the `Consul`, `DynamoDB` and `Redis` classes in `ldclient.integrations`; to use them, call the `new_feature_store` method in the appropriate class, and put the returned object in the `feature_store` property of your client configuration. See [`ldclient.integrations`](https://github.com/launchdarkly/python-client-private/blob/master/ldclient/integrations.py) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store) for more information. +Feature flag data can be kept in a persistent store using Consul, DynamoDB, or Redis. These adapters are implemented in the `Consul`, `DynamoDB` and `Redis` classes in `ldclient.integrations`; to use them, call the `new_feature_store` method in the appropriate class, and put the returned object in the `feature_store` property of your client configuration. See [`ldclient.integrations`](https://launchdarkly-python-sdk.readthedocs.io/en/latest/api-integrations.html#module-ldclient.integrations) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store) for more information. Note that Consul is not supported in Python 3.3 or 3.4. Using flag data from a file --------------------------- -For testing purposes, the SDK can be made to read feature flag state from a file or files instead of connecting to LaunchDarkly. See [`file_data_source.py`](https://github.com/launchdarkly/python-client/blob/master/ldclient/file_data_source.py) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/reading-flags-from-a-file) for more details. +For testing purposes, the SDK can be made to read feature flag state from a file or files instead of connecting to LaunchDarkly. See [`ldclient.integrations.Files`](https://launchdarkly-python-sdk.readthedocs.io/en/latest/api-integrations.html#ldclient.integrations.Files) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/reading-flags-from-a-file) for more details. Learn more ------------ +---------- Check out our [documentation](http://docs.launchdarkly.com) for in-depth instructions on configuring and using LaunchDarkly. You can also head straight to the [complete reference guide for this SDK](http://docs.launchdarkly.com/docs/python-sdk-reference). +Generated API documentation is on [readthedocs.io](https://launchdarkly-python-sdk.readthedocs.io/en/latest/). + Testing ------- @@ -116,16 +118,18 @@ About LaunchDarkly * Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file. * Grant access to certain features based on user attributes, like payment plan (eg: users on the ‘gold’ plan get access to more features than users in the ‘silver’ plan). Disable parts of your application to facilitate maintenance, without taking everything offline. * LaunchDarkly provides feature flag SDKs for - * [Java](http://docs.launchdarkly.com/docs/java-sdk-reference "Java SDK") + * [Java](http://docs.launchdarkly.com/docs/java-sdk-reference "LaunchDarkly Java SDK") * [JavaScript](http://docs.launchdarkly.com/docs/js-sdk-reference "LaunchDarkly JavaScript SDK") * [PHP](http://docs.launchdarkly.com/docs/php-sdk-reference "LaunchDarkly PHP SDK") * [Python](http://docs.launchdarkly.com/docs/python-sdk-reference "LaunchDarkly Python SDK") * [Go](http://docs.launchdarkly.com/docs/go-sdk-reference "LaunchDarkly Go SDK") * [Node.JS](http://docs.launchdarkly.com/docs/node-sdk-reference "LaunchDarkly Node SDK") + * [Electron](http://docs.launchdarkly.com/docs/electron-sdk-reference "LaunchDarkly Electron SDK") * [.NET](http://docs.launchdarkly.com/docs/dotnet-sdk-reference "LaunchDarkly .Net SDK") * [Ruby](http://docs.launchdarkly.com/docs/ruby-sdk-reference "LaunchDarkly Ruby SDK") * [iOS](http://docs.launchdarkly.com/docs/ios-sdk-reference "LaunchDarkly iOS SDK") * [Android](http://docs.launchdarkly.com/docs/android-sdk-reference "LaunchDarkly Android SDK") + * [C/C++](http://docs.launchdarkly.com/docs/c-sdk-reference "LaunchDarkly C/C++ SDK") * Explore LaunchDarkly * [launchdarkly.com](http://www.launchdarkly.com/ "LaunchDarkly Main Website") for more information * [docs.launchdarkly.com](http://docs.launchdarkly.com/ "LaunchDarkly Documentation") for our documentation and SDKs diff --git a/ldclient/client.py b/ldclient/client.py index 8ae8a5a1..d1759f6f 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -166,6 +166,10 @@ def _send_event(self, event): def track(self, event_name, user, data=None): """Tracks that a user performed an event. + LaunchDarkly automatically tracks pageviews and clicks that are specified in the Goals + section of the dashboard. This can be used to track custom goals or other events that do + not currently have goals. + :param string event_name: the name of the event, which may correspond to a goal in A/B tests :param dict user: the attributes of the user :param data: optional additional data associated with the event @@ -199,12 +203,17 @@ def is_offline(self): def is_initialized(self): """Returns true if the client has successfully connected to LaunchDarkly. + If this returns false, it means that the client has not yet successfully connected to LaunchDarkly. + It might still be in the process of starting up, or it might be attempting to reconnect after an + unsuccessful attempt, or it might have received an unrecoverable error (such as an invalid SDK key) + and given up. + :rtype: bool """ return self.is_offline() or self._config.use_ldd or self._update_processor.initialized() def flush(self): - """Flushes all pending events. + """Flushes all pending analytics events. Normally, batches of events are delivered in the background at intervals determined by the ``flush_interval`` property of :class:`ldclient.config.Config`. Calling ``flush()`` @@ -400,7 +409,8 @@ def all_flags_state(self, user, **kwargs): return state def secure_mode_hash(self, user): - """Generates a hash value for a user, for use by the JavaScript SDK. + """Computes an HMAC signature of a user signed with the client's SDK key, + for use with the JavaScript SDK. For more information, see the JavaScript SDK Reference Guide on `Secure mode `_. From 9731f4de40b50ba848256e8d23bab249464edbe2 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 12 Feb 2019 16:05:01 -0800 Subject: [PATCH 083/356] add git placeholders for unused dirs --- docs/_static/.gitkeep | 0 docs/_templates/.gitkeep | 0 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 docs/_static/.gitkeep create mode 100644 docs/_templates/.gitkeep diff --git a/docs/_static/.gitkeep b/docs/_static/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/docs/_templates/.gitkeep b/docs/_templates/.gitkeep new file mode 100644 index 00000000..e69de29b From 87336db16da33840820858215949b5b88820c618 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 12 Feb 2019 16:16:35 -0800 Subject: [PATCH 084/356] use default theme --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index f1dc322b..10f481f3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -83,7 +83,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' +#html_theme = 'sphinx_rtd_theme' # ReadTheDocs will set this # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the From 2dedbc407db52f99b840a42b89255c5b2e84821b Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 25 Feb 2019 15:33:37 -0800 Subject: [PATCH 085/356] add experimentation event overrides for rules and fallthrough --- ldclient/client.py | 31 ++++++------- ldclient/flag.py | 19 +++----- ldclient/impl/event_factory.py | 71 +++++++++++++++++++++++++++++ testing/test_flag.py | 64 +++++++++++++------------- testing/test_ldclient.py | 69 ++++++++++++++++++++++++++-- testing/test_ldclient_evaluation.py | 2 +- 6 files changed, 190 insertions(+), 66 deletions(-) create mode 100644 ldclient/impl/event_factory.py diff --git a/ldclient/client.py b/ldclient/client.py index d1759f6f..32dae0ae 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -13,6 +13,7 @@ from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.flag import EvaluationDetail, evaluate, error_reason from ldclient.flags_state import FeatureFlagsState +from ldclient.impl.event_factory import _EventFactory from ldclient.interfaces import FeatureStore from ldclient.polling import PollingUpdateProcessor from ldclient.streaming import StreamingUpdateProcessor @@ -90,6 +91,8 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): self._event_processor = None self._lock = Lock() + self._event_factory_default = _EventFactory(False) + self._event_factory_with_reasons = _EventFactory(True) self._store = _FeatureStoreClientWrapper(self._config.feature_store) """ :type: FeatureStore """ @@ -241,7 +244,7 @@ def variation(self, key, user, default): available from LaunchDarkly :return: one of the flag's variation values, or the default value """ - return self._evaluate_internal(key, user, default, False).value + return self._evaluate_internal(key, user, default, self._event_factory_default).value def variation_detail(self, key, user, default): """Determines the variation of a feature flag for a user, like :func:`variation()`, but also @@ -258,9 +261,9 @@ def variation_detail(self, key, user, default): :return: an object describing the result :rtype: EvaluationDetail """ - return self._evaluate_internal(key, user, default, True) + return self._evaluate_internal(key, user, default, self._event_factory_with_reasons) - def _evaluate_internal(self, key, user, default, include_reasons_in_events): + def _evaluate_internal(self, key, user, default, event_factory): default = self._config.get_default(key, default) if self._config.offline: @@ -269,14 +272,6 @@ def _evaluate_internal(self, key, user, default, include_reasons_in_events): if user is not None: self._sanitize_user(user) - def send_event(value, variation=None, flag=None, reason=None): - self._send_event({'kind': 'feature', 'key': key, 'user': user, - 'value': value, 'variation': variation, 'default': default, - 'version': flag.get('version') if flag else None, - 'trackEvents': flag.get('trackEvents') if flag else None, - 'debugEventsUntilDate': flag.get('debugEventsUntilDate') if flag else None, - 'reason': reason if include_reasons_in_events else None}) - if not self.is_initialized(): if self._store.initialized: log.warn("Feature Flag evaluation attempted before client has initialized - using last known values from feature store for feature key: " + key) @@ -284,7 +279,7 @@ def send_event(value, variation=None, flag=None, reason=None): log.warn("Feature Flag evaluation attempted before client has initialized! Feature store unavailable - returning default: " + str(default) + " for feature key: " + key) reason = error_reason('CLIENT_NOT_READY') - send_event(default, None, None, reason) + self._send_event(event_factory.new_unknown_flag_event(key, user, default, reason)) return EvaluationDetail(default, None, reason) if user is not None and user.get('key', "") == "": @@ -296,32 +291,32 @@ def send_event(value, variation=None, flag=None, reason=None): log.error("Unexpected error while retrieving feature flag \"%s\": %s" % (key, repr(e))) log.debug(traceback.format_exc()) reason = error_reason('EXCEPTION') - send_event(default, None, None, reason) + self._send_event(event_factory.new_unknown_flag_event(key, user, default, reason)) return EvaluationDetail(default, None, reason) if not flag: reason = error_reason('FLAG_NOT_FOUND') - send_event(default, None, None, reason) + self._send_event(event_factory.new_unknown_flag_event(key, user, default, reason)) return EvaluationDetail(default, None, reason) else: if user is None or user.get('key') is None: reason = error_reason('USER_NOT_SPECIFIED') - send_event(default, None, flag, reason) + self._send_event(event_factory.new_default_event(flag, user, default, reason)) return EvaluationDetail(default, None, reason) try: - result = evaluate(flag, user, self._store, include_reasons_in_events) + result = evaluate(flag, user, self._store, event_factory) for event in result.events or []: self._send_event(event) detail = result.detail if detail.is_default_value(): detail = EvaluationDetail(default, None, detail.reason) - send_event(detail.value, detail.variation_index, flag, detail.reason) + self._send_event(event_factory.new_eval_event(flag, user, detail, default)) return detail except Exception as e: log.error("Unexpected error while evaluating feature flag \"%s\": %s" % (key, repr(e))) log.debug(traceback.format_exc()) reason = error_reason('EXCEPTION') - send_event(default, None, flag, reason) + self._send_event(event_factory.new_default_event(flag, user, default, reason)) return EvaluationDetail(default, None, reason) def all_flags(self, user): diff --git a/ldclient/flag.py b/ldclient/flag.py index 88739ba0..65f2812a 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -105,16 +105,16 @@ def error_reason(error_kind): return {'kind': 'ERROR', 'errorKind': error_kind} -def evaluate(flag, user, store, include_reasons_in_events = False): +def evaluate(flag, user, store, event_factory): prereq_events = [] - detail = _evaluate(flag, user, store, prereq_events, include_reasons_in_events) + detail = _evaluate(flag, user, store, prereq_events, event_factory) return EvalResult(detail = detail, events = prereq_events) -def _evaluate(flag, user, store, prereq_events, include_reasons_in_events): +def _evaluate(flag, user, store, prereq_events, event_factory): if not flag.get('on', False): return _get_off_value(flag, {'kind': 'OFF'}) - prereq_failure_reason = _check_prerequisites(flag, user, store, prereq_events, include_reasons_in_events) + prereq_failure_reason = _check_prerequisites(flag, user, store, prereq_events, event_factory) if prereq_failure_reason is not None: return _get_off_value(flag, prereq_failure_reason) @@ -135,7 +135,7 @@ def _evaluate(flag, user, store, prereq_events, include_reasons_in_events): return _get_value_for_variation_or_rollout(flag, flag['fallthrough'], user, {'kind': 'FALLTHROUGH'}) -def _check_prerequisites(flag, user, store, events, include_reasons_in_events): +def _check_prerequisites(flag, user, store, events, event_factory): failed_prereq = None prereq_res = None for prereq in flag.get('prerequisites') or []: @@ -144,17 +144,12 @@ def _check_prerequisites(flag, user, store, events, include_reasons_in_events): log.warn("Missing prereq flag: " + prereq.get('key')) failed_prereq = prereq else: - prereq_res = _evaluate(prereq_flag, user, store, events, include_reasons_in_events) + prereq_res = _evaluate(prereq_flag, user, store, events, event_factory) # Note that if the prerequisite flag is off, we don't consider it a match no matter what its # off variation was. But we still need to evaluate it in order to generate an event. if (not prereq_flag.get('on', False)) or prereq_res.variation_index != prereq.get('variation'): failed_prereq = prereq - event = {'kind': 'feature', 'key': prereq.get('key'), 'user': user, - 'variation': prereq_res.variation_index, 'value': prereq_res.value, - 'version': prereq_flag.get('version'), 'prereqOf': flag.get('key'), - 'trackEvents': prereq_flag.get('trackEvents'), - 'debugEventsUntilDate': prereq_flag.get('debugEventsUntilDate'), - 'reason': prereq_res.reason if prereq_res and include_reasons_in_events else None} + event = event_factory.new_eval_event(prereq_flag, user, prereq_res, None, flag) events.append(event) if failed_prereq: return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.get('key')} diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py new file mode 100644 index 00000000..0c6fcc8c --- /dev/null +++ b/ldclient/impl/event_factory.py @@ -0,0 +1,71 @@ + +# Event constructors are centralized here to avoid mistakes and repetitive logic. +# The LDClient owns two instances of _EventFactory: one that always embeds evaluation reasons +# in the events (for when variation_detail is called) and one that doesn't. +class _EventFactory(object): + def __init__(self, with_reasons): + self._with_reasons = with_reasons + + def new_eval_event(self, flag, user, detail, default_value, prereq_of_flag = None): + add_experiment_data = self._is_experiment(flag, detail.reason) + e = { + 'kind': 'feature', + 'key': flag.get('key'), + 'user': user, + 'value': detail.value, + 'variation': detail.variation_index, + 'default': default_value, + 'version': flag.get('version') + } + # the following properties are handled separately so we don't waste bandwidth on unused keys + if add_experiment_data or flag.get('trackEvents', False): + e['trackEvents'] = True + if flag.get('debugEventsUntilDate', None): + e['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') + if prereq_of_flag is not None: + e['prereqOf'] = prereq_of_flag.get('key') + if add_experiment_data or self._with_reasons: + e['reason'] = detail.reason + return e + + def new_default_event(self, flag, user, default_value, reason): + add_experiment_data = self._is_experiment(flag, reason) + e = { + 'kind': 'feature', + 'key': flag.get('key'), + 'user': user, + 'value': default_value, + 'default': default_value, + 'version': flag.get('version') + } + # the following properties are handled separately so we don't waste bandwidth on unused keys + if add_experiment_data or flag.get('trackEvents', False): + e['trackEvents'] = True + if flag.get('debugEventsUntilDate', None): + e['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') + if add_experiment_data or self._with_reasons: + e['reason'] = reason + return e + + def new_unknown_flag_event(self, key, user, default_value, reason): + e = { + 'kind': 'feature', + 'key': key, + 'user': user, + 'value': default_value, + 'default': default_value + } + if self._with_reasons: + e['reason'] = reason + return e + + def _is_experiment(self, flag, reason): + if reason is not None: + kind = reason['kind'] + if kind == 'RULE_MATCH': + index = reason['ruleIndex'] + rules = flag.get('rules') or [] + return index >= 0 and index < len(rules) and rules[index].get('trackEvents', False) + elif kind == 'FALLTHROUGH': + return flag.get('trackEventsFallthrough', False) + return False diff --git a/testing/test_flag.py b/testing/test_flag.py index 97f64af0..9ebd56b5 100644 --- a/testing/test_flag.py +++ b/testing/test_flag.py @@ -1,10 +1,12 @@ import pytest from ldclient.feature_store import InMemoryFeatureStore from ldclient.flag import EvaluationDetail, EvalResult, _bucket_user, evaluate +from ldclient.impl.event_factory import _EventFactory from ldclient.versioned_data_kind import FEATURES, SEGMENTS empty_store = InMemoryFeatureStore() +event_factory = _EventFactory(False) def make_boolean_flag_with_rules(rules): @@ -27,7 +29,7 @@ def test_flag_returns_off_variation_if_flag_is_off(): } user = { 'key': 'x' } detail = EvaluationDetail('b', 1, {'kind': 'OFF'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_none_if_flag_is_off_and_off_variation_is_unspecified(): flag = { @@ -37,7 +39,7 @@ def test_flag_returns_none_if_flag_is_off_and_off_variation_is_unspecified(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'OFF'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_error_if_off_variation_is_too_high(): flag = { @@ -48,7 +50,7 @@ def test_flag_returns_error_if_off_variation_is_too_high(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_error_if_off_variation_is_negative(): flag = { @@ -59,7 +61,7 @@ def test_flag_returns_error_if_off_variation_is_negative(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_off_variation_if_prerequisite_not_found(): flag = { @@ -72,7 +74,7 @@ def test_flag_returns_off_variation_if_prerequisite_not_found(): } user = { 'key': 'x' } detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): store = InMemoryFeatureStore() @@ -98,9 +100,9 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): store.upsert(FEATURES, flag1) user = { 'key': 'x' } detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', - 'version': 2, 'user': user, 'prereqOf': 'feature0', 'trackEvents': False, 'debugEventsUntilDate': None, 'reason': None}] - assert evaluate(flag, user, store) == EvalResult(detail, events_should_be) + events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, + 'version': 2, 'user': user, 'prereqOf': 'feature0'}] + assert evaluate(flag, user, store, event_factory) == EvalResult(detail, events_should_be) def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): store = InMemoryFeatureStore() @@ -124,9 +126,9 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): store.upsert(FEATURES, flag1) user = { 'key': 'x' } detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', - 'version': 2, 'user': user, 'prereqOf': 'feature0', 'trackEvents': False, 'debugEventsUntilDate': None, 'reason': None}] - assert evaluate(flag, user, store) == EvalResult(detail, events_should_be) + events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None, + 'version': 2, 'user': user, 'prereqOf': 'feature0'}] + assert evaluate(flag, user, store, event_factory) == EvalResult(detail, events_should_be) def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules(): store = InMemoryFeatureStore() @@ -150,9 +152,9 @@ def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_ru store.upsert(FEATURES, flag1) user = { 'key': 'x' } detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', - 'version': 2, 'user': user, 'prereqOf': 'feature0', 'trackEvents': False, 'debugEventsUntilDate': None, 'reason': None}] - assert evaluate(flag, user, store) == EvalResult(detail, events_should_be) + events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, + 'version': 2, 'user': user, 'prereqOf': 'feature0'}] + assert evaluate(flag, user, store, event_factory) == EvalResult(detail, events_should_be) def test_flag_returns_error_if_fallthrough_variation_is_too_high(): flag = { @@ -163,7 +165,7 @@ def test_flag_returns_error_if_fallthrough_variation_is_too_high(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_error_if_fallthrough_variation_is_negative(): flag = { @@ -174,7 +176,7 @@ def test_flag_returns_error_if_fallthrough_variation_is_negative(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_error_if_fallthrough_has_no_variation_or_rollout(): flag = { @@ -185,7 +187,7 @@ def test_flag_returns_error_if_fallthrough_has_no_variation_or_rollout(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_error_if_fallthrough_has_rollout_with_no_variations(): flag = { @@ -197,7 +199,7 @@ def test_flag_returns_error_if_fallthrough_has_rollout_with_no_variations(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_matches_user_from_targets(): flag = { @@ -210,35 +212,35 @@ def test_flag_matches_user_from_targets(): } user = { 'key': 'userkey' } detail = EvaluationDetail('c', 2, {'kind': 'TARGET_MATCH'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_matches_user_from_rules(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 1} flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(True, 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0, 'ruleId': 'id'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_error_if_rule_variation_is_too_high(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 999} flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_error_if_rule_variation_is_negative(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': -1} flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_error_if_rule_has_no_variation_or_rollout(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}]} flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_flag_returns_error_if_rule_has_rollout_with_no_variations(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], @@ -246,7 +248,7 @@ def test_flag_returns_error_if_rule_has_rollout_with_no_variations(): flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store) == EvalResult(detail, []) + assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) def test_segment_match_clause_retrieves_segment_from_store(): store = InMemoryFeatureStore() @@ -277,7 +279,7 @@ def test_segment_match_clause_retrieves_segment_from_store(): ] } - assert evaluate(flag, user, store).detail.value == True + assert evaluate(flag, user, store, event_factory).detail.value == True def test_segment_match_clause_falls_through_with_no_errors_if_segment_not_found(): user = { "key": "foo" } @@ -300,7 +302,7 @@ def test_segment_match_clause_falls_through_with_no_errors_if_segment_not_found( ] } - assert evaluate(flag, user, empty_store).detail.value == False + assert evaluate(flag, user, empty_store, event_factory).detail.value == False def test_clause_matches_builtin_attribute(): clause = { @@ -310,7 +312,7 @@ def test_clause_matches_builtin_attribute(): } user = { 'key': 'x', 'name': 'Bob' } flag = _make_bool_flag_from_clause(clause) - assert evaluate(flag, user, empty_store).detail.value == True + assert evaluate(flag, user, empty_store, event_factory).detail.value == True def test_clause_matches_custom_attribute(): clause = { @@ -320,7 +322,7 @@ def test_clause_matches_custom_attribute(): } user = { 'key': 'x', 'name': 'Bob', 'custom': { 'legs': 4 } } flag = _make_bool_flag_from_clause(clause) - assert evaluate(flag, user, empty_store).detail.value == True + assert evaluate(flag, user, empty_store, event_factory).detail.value == True def test_clause_returns_false_for_missing_attribute(): clause = { @@ -330,7 +332,7 @@ def test_clause_returns_false_for_missing_attribute(): } user = { 'key': 'x', 'name': 'Bob' } flag = _make_bool_flag_from_clause(clause) - assert evaluate(flag, user, empty_store).detail.value == False + assert evaluate(flag, user, empty_store, event_factory).detail.value == False def test_clause_can_be_negated(): clause = { @@ -341,7 +343,7 @@ def test_clause_can_be_negated(): } user = { 'key': 'x', 'name': 'Bob' } flag = _make_bool_flag_from_clause(clause) - assert evaluate(flag, user, empty_store).detail.value == False + assert evaluate(flag, user, empty_store, event_factory).detail.value == False def _make_bool_flag_from_clause(clause): @@ -374,7 +376,6 @@ def test_bucket_by_user_key(): assert bucket == pytest.approx(0.10343106) def test_bucket_by_int_attr(): - feature = { u'key': u'hashKey', u'salt': u'saltyA' } user = { u'key': u'userKey', u'custom': { @@ -388,7 +389,6 @@ def test_bucket_by_int_attr(): assert bucket2 == bucket def test_bucket_by_float_attr_not_allowed(): - feature = { u'key': u'hashKey', u'salt': u'saltyA' } user = { u'key': u'userKey', u'custom': { diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 0e6c33a2..900d5947 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -200,6 +200,69 @@ def test_event_for_existing_feature_with_reason(): e['debugEventsUntilDate'] == 1000) +def test_event_for_existing_feature_with_tracked_rule(): + feature = { + 'key': 'feature.key', + 'version': 100, + 'salt': u'', + 'on': True, + 'rules': [ + { + 'clauses': [ + { 'attribute': 'key', 'op': 'in', 'values': [ user['key'] ] } + ], + 'variation': 0, + 'trackEvents': True, + 'id': 'rule_id' + } + ], + 'variations': [ 'value' ] + } + store = InMemoryFeatureStore() + store.init({FEATURES: {feature['key']: feature}}) + client = make_client(store) + assert 'value' == client.variation(feature['key'], user, default='default') + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == feature['key'] and + e['user'] == user and + e['version'] == feature['version'] and + e['value'] == 'value' and + e['variation'] == 0 and + e['reason'] == { 'kind': 'RULE_MATCH', 'ruleIndex': 0, 'ruleId': 'rule_id' } and + e['default'] == 'default' and + e['trackEvents'] == True and + e.get('debugEventsUntilDate') is None) + + +def test_event_for_existing_feature_with_tracked_fallthrough(): + feature = { + 'key': 'feature.key', + 'version': 100, + 'salt': u'', + 'on': True, + 'rules': [], + 'fallthrough': { 'variation': 0 }, + 'variations': [ 'value' ], + 'trackEventsFallthrough': True + } + store = InMemoryFeatureStore() + store.init({FEATURES: {feature['key']: feature}}) + client = make_client(store) + assert 'value' == client.variation(feature['key'], user, default='default') + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == feature['key'] and + e['user'] == user and + e['version'] == feature['version'] and + e['value'] == 'value' and + e['variation'] == 0 and + e['reason'] == { 'kind': 'FALLTHROUGH' } and + e['default'] == 'default' and + e['trackEvents'] == True and + e.get('debugEventsUntilDate') is None) + + def test_event_for_unknown_feature(): store = InMemoryFeatureStore() store.init({FEATURES: {}}) @@ -210,7 +273,7 @@ def test_event_for_unknown_feature(): e['key'] == 'feature.key' and e['user'] == user and e['value'] == 'default' and - e['variation'] == None and + e.get('variation') is None and e['default'] == 'default') @@ -228,7 +291,7 @@ def test_event_for_existing_feature_with_no_user(): e['user'] == None and e['version'] == feature['version'] and e['value'] == 'default' and - e['variation'] == None and + e.get('variation') is None and e['default'] == 'default' and e['trackEvents'] == True and e['debugEventsUntilDate'] == 1000) @@ -249,7 +312,7 @@ def test_event_for_existing_feature_with_no_user_key(): e['user'] == bad_user and e['version'] == feature['version'] and e['value'] == 'default' and - e['variation'] == None and + e.get('variation') is None and e['default'] == 'default' and e['trackEvents'] == True and e['debugEventsUntilDate'] == 1000) diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index be925a5c..f716c5de 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -123,7 +123,7 @@ def test_variation_detail_when_user_is_none(): expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'USER_NOT_SPECIFIED'}) assert expected == client.variation_detail('feature.key', None, default='default') -def test_variation_when_user_has_no_key(): +def test_variation_detail_when_user_has_no_key(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) From 6846ba16dbad54ebdbe51039ce0d2e69005bf101 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 25 Feb 2019 15:44:01 -0800 Subject: [PATCH 086/356] a little more test coverage --- testing/test_ldclient.py | 63 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 900d5947..1293d19a 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -235,6 +235,41 @@ def test_event_for_existing_feature_with_tracked_rule(): e.get('debugEventsUntilDate') is None) +def test_event_for_existing_feature_with_untracked_rule(): + feature = { + 'key': 'feature.key', + 'version': 100, + 'salt': u'', + 'on': True, + 'rules': [ + { + 'clauses': [ + { 'attribute': 'key', 'op': 'in', 'values': [ user['key'] ] } + ], + 'variation': 0, + 'trackEvents': False, + 'id': 'rule_id' + } + ], + 'variations': [ 'value' ] + } + store = InMemoryFeatureStore() + store.init({FEATURES: {feature['key']: feature}}) + client = make_client(store) + assert 'value' == client.variation(feature['key'], user, default='default') + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == feature['key'] and + e['user'] == user and + e['version'] == feature['version'] and + e['value'] == 'value' and + e['variation'] == 0 and + e.get('reason') is None and + e['default'] == 'default' and + e.get('trackEvents', False) == False and + e.get('debugEventsUntilDate') is None) + + def test_event_for_existing_feature_with_tracked_fallthrough(): feature = { 'key': 'feature.key', @@ -263,6 +298,34 @@ def test_event_for_existing_feature_with_tracked_fallthrough(): e.get('debugEventsUntilDate') is None) +def test_event_for_existing_feature_with_untracked_fallthrough(): + feature = { + 'key': 'feature.key', + 'version': 100, + 'salt': u'', + 'on': True, + 'rules': [], + 'fallthrough': { 'variation': 0 }, + 'variations': [ 'value' ], + 'trackEventsFallthrough': False + } + store = InMemoryFeatureStore() + store.init({FEATURES: {feature['key']: feature}}) + client = make_client(store) + assert 'value' == client.variation(feature['key'], user, default='default') + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == feature['key'] and + e['user'] == user and + e['version'] == feature['version'] and + e['value'] == 'value' and + e['variation'] == 0 and + e.get('reason') is None and + e['default'] == 'default' and + e.get('trackEvents', False) == False and + e.get('debugEventsUntilDate') is None) + + def test_event_for_unknown_feature(): store = InMemoryFeatureStore() store.init({FEATURES: {}}) From c514216e4c97ce19fe38422dc302448ad1b3d7b6 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 25 Feb 2019 16:14:20 -0800 Subject: [PATCH 087/356] rm unnecessary logic --- ldclient/impl/event_factory.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index 0c6fcc8c..f738fd69 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -29,7 +29,6 @@ def new_eval_event(self, flag, user, detail, default_value, prereq_of_flag = Non return e def new_default_event(self, flag, user, default_value, reason): - add_experiment_data = self._is_experiment(flag, reason) e = { 'kind': 'feature', 'key': flag.get('key'), @@ -39,11 +38,11 @@ def new_default_event(self, flag, user, default_value, reason): 'version': flag.get('version') } # the following properties are handled separately so we don't waste bandwidth on unused keys - if add_experiment_data or flag.get('trackEvents', False): + if flag.get('trackEvents', False): e['trackEvents'] = True if flag.get('debugEventsUntilDate', None): e['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') - if add_experiment_data or self._with_reasons: + if self._with_reasons: e['reason'] = reason return e From afab05deea1575064c637b37e004d5bb555a1c97 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 26 Feb 2019 15:14:04 -0800 Subject: [PATCH 088/356] more factory methods --- ldclient/client.py | 4 ++-- ldclient/impl/event_factory.py | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 32dae0ae..cff6f1d6 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -180,7 +180,7 @@ def track(self, event_name, user, data=None): self._sanitize_user(user) if user is None or user.get('key') is None: log.warn("Missing user or user key when calling track().") - self._send_event({'kind': 'custom', 'key': event_name, 'user': user, 'data': data}) + self._send_event(self._event_factory_default.new_custom_event(event_name, user, data)) def identify(self, user): """Registers the user. @@ -194,7 +194,7 @@ def identify(self, user): self._sanitize_user(user) if user is None or user.get('key') is None: log.warn("Missing user or user key when calling identify().") - self._send_event({'kind': 'identify', 'key': user.get('key'), 'user': user}) + self._send_event(self._event_factory_default.new_identify_event(user)) def is_offline(self): """Returns true if the client is in offline mode. diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index f738fd69..7b8b725f 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -2,6 +2,10 @@ # Event constructors are centralized here to avoid mistakes and repetitive logic. # The LDClient owns two instances of _EventFactory: one that always embeds evaluation reasons # in the events (for when variation_detail is called) and one that doesn't. +# +# Note that none of these methods fill in the "creationDate" property, because in the Python +# client, that is done by DefaultEventProcessor.send_event(). + class _EventFactory(object): def __init__(self, with_reasons): self._with_reasons = with_reasons @@ -58,6 +62,21 @@ def new_unknown_flag_event(self, key, user, default_value, reason): e['reason'] = reason return e + def new_identify_event(self, user): + return { + 'kind': 'identify', + 'key': user.get('key'), + 'user': user + } + + def new_custom_event(self, event_name, user, data): + return { + 'kind': 'custom', + 'key': event_name, + 'user': user, + 'data': data + } + def _is_experiment(self, flag, reason): if reason is not None: kind = reason['kind'] From 84198a3b03b15c49bfaa0fb1604d3702e71422a8 Mon Sep 17 00:00:00 2001 From: Harpo Roeder Date: Fri, 1 Mar 2019 23:16:23 +0000 Subject: [PATCH 089/356] try python -m instead of pytest directly --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index b7f19ff3..a09727ec 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -49,4 +49,4 @@ jobs: pip install -r consul-requirements.txt python setup.py install mkdir test-reports - pytest -s --junitxml=test-reports/junit.xml testing; + python -m pytest -s --junitxml=test-reports/junit.xml testing; From 80411dd8054bbcdf7e1aa6eb15e346f9988e58b7 Mon Sep 17 00:00:00 2001 From: Harpo Roeder Date: Fri, 1 Mar 2019 23:23:49 +0000 Subject: [PATCH 090/356] add setuptools --- azure-pipelines.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a09727ec..c3d5980f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -45,6 +45,7 @@ jobs: workingDirectory: $(System.DefaultWorkingDirectory) script: | python --version + pip install setuptools pip install -r test-requirements.txt pip install -r consul-requirements.txt python setup.py install From 52c0a195337d009502783dd11c0796436231017f Mon Sep 17 00:00:00 2001 From: Harpo Roeder Date: Fri, 1 Mar 2019 23:28:29 +0000 Subject: [PATCH 091/356] use python -m for all of pip --- azure-pipelines.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index c3d5980f..68418351 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -45,9 +45,9 @@ jobs: workingDirectory: $(System.DefaultWorkingDirectory) script: | python --version - pip install setuptools - pip install -r test-requirements.txt - pip install -r consul-requirements.txt + python -m pip install setuptools + python -m pip install -r test-requirements.txt + python -m pip install -r consul-requirements.txt python setup.py install mkdir test-reports python -m pytest -s --junitxml=test-reports/junit.xml testing; From 5bdea5f7dd7d3210eaca482e16fce3e857044e5f Mon Sep 17 00:00:00 2001 From: Harpo Roeder Date: Fri, 1 Mar 2019 23:35:48 +0000 Subject: [PATCH 092/356] add UsePythonVersion task --- azure-pipelines.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 68418351..27ab27d2 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -38,6 +38,10 @@ jobs: cd redis ./redis-server --service-install ./redis-server --service-start + - task: UsePythonVersion@0 + inputs: + versionSpec: '2.7' + addToPath: true - task: PowerShell@2 displayName: 'Setup SDK and Test' inputs: @@ -45,9 +49,9 @@ jobs: workingDirectory: $(System.DefaultWorkingDirectory) script: | python --version - python -m pip install setuptools - python -m pip install -r test-requirements.txt - python -m pip install -r consul-requirements.txt + pip install setuptools + pip install -r test-requirements.txt + pip install -r consul-requirements.txt python setup.py install mkdir test-reports python -m pytest -s --junitxml=test-reports/junit.xml testing; From 60a66a8c1ad1ffbc7b5c6f52f08edd15bc5cb5ce Mon Sep 17 00:00:00 2001 From: Harpo Roeder Date: Fri, 1 Mar 2019 23:38:56 +0000 Subject: [PATCH 093/356] fix indent --- azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 27ab27d2..f27c0d01 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -40,8 +40,8 @@ jobs: ./redis-server --service-start - task: UsePythonVersion@0 inputs: - versionSpec: '2.7' - addToPath: true + versionSpec: '2.7' + addToPath: true - task: PowerShell@2 displayName: 'Setup SDK and Test' inputs: From 1907d75eb9da888ca06e5a3af6984cebe6b0490d Mon Sep 17 00:00:00 2001 From: Harpo Roeder Date: Fri, 1 Mar 2019 23:42:52 +0000 Subject: [PATCH 094/356] remove manually adding setuptools --- azure-pipelines.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f27c0d01..be768073 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -49,7 +49,6 @@ jobs: workingDirectory: $(System.DefaultWorkingDirectory) script: | python --version - pip install setuptools pip install -r test-requirements.txt pip install -r consul-requirements.txt python setup.py install From 7cdf9fcf517f48f88c5adbc72879f4b4513f3c53 Mon Sep 17 00:00:00 2001 From: Harpo Roeder Date: Sat, 2 Mar 2019 00:07:07 +0000 Subject: [PATCH 095/356] add on 3.7 stages --- azure-pipelines.yml | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index be768073..a1f89bf3 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -43,7 +43,7 @@ jobs: versionSpec: '2.7' addToPath: true - task: PowerShell@2 - displayName: 'Setup SDK and Test' + displayName: 'Setup SDK and Test 2.7' inputs: targetType: inline workingDirectory: $(System.DefaultWorkingDirectory) @@ -53,4 +53,20 @@ jobs: pip install -r consul-requirements.txt python setup.py install mkdir test-reports - python -m pytest -s --junitxml=test-reports/junit.xml testing; + python -m pytest -s --junitxml=test-reports27/junit.xml testing; + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.7' + addToPath: true + - task: PowerShell@2 + displayName: 'Setup SDK and Test 3.7' + inputs: + targetType: inline + workingDirectory: $(System.DefaultWorkingDirectory) + script: | + python --version + pip install -r test-requirements.txt + pip install -r consul-requirements.txt + python setup.py install + mkdir test-reports + python -m pytest -s --junitxml=test-reports37/junit.xml testing; From 1023d45a0631699698b6f240c45d1105be52f448 Mon Sep 17 00:00:00 2001 From: Harpo Roeder Date: Sat, 2 Mar 2019 00:12:22 +0000 Subject: [PATCH 096/356] fix mkdir for reports --- azure-pipelines.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a1f89bf3..126e5bf3 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -52,7 +52,7 @@ jobs: pip install -r test-requirements.txt pip install -r consul-requirements.txt python setup.py install - mkdir test-reports + mkdir test-reports27 python -m pytest -s --junitxml=test-reports27/junit.xml testing; - task: UsePythonVersion@0 inputs: @@ -68,5 +68,5 @@ jobs: pip install -r test-requirements.txt pip install -r consul-requirements.txt python setup.py install - mkdir test-reports + mkdir test-reports37 python -m pytest -s --junitxml=test-reports37/junit.xml testing; From b9778b68b74d486e37d1770d1cfe02853c2d1fc5 Mon Sep 17 00:00:00 2001 From: Harpo Roeder Date: Sat, 2 Mar 2019 00:17:37 +0000 Subject: [PATCH 097/356] upload test artifacts --- azure-pipelines.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 126e5bf3..af1f3342 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -70,3 +70,15 @@ jobs: python setup.py install mkdir test-reports37 python -m pytest -s --junitxml=test-reports37/junit.xml testing; + - task: CopyFiles@2 + inputs: + targetFolder: $(Build.ArtifactStagingDirectory)/test-reports27 + sourceFolder: $(System.DefaultWorkingDirectory)/test-reports27 + - task: CopyFiles@2 + inputs: + targetFolder: $(Build.ArtifactStagingDirectory)/test-reports37 + sourceFolder: $(System.DefaultWorkingDirectory)/test-reports37 + - task: PublishBuildArtifacts@1 + inputs: + pathtoPublish: '$(Build.ArtifactStagingDirectory)' + artifactName: reports From e5d5e4135bbc525e1996e6af81363e0f5cd7ecd1 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 26 Mar 2019 11:18:15 -0700 Subject: [PATCH 098/356] skip trying to load pyyaml in Python 3.3 --- .circleci/config.yml | 3 +++ test-requirements.txt | 1 - test-yaml-requirements.txt | 1 + testing/test_file_data_source.py | 9 +++++++++ 4 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 test-yaml-requirements.txt diff --git a/.circleci/config.yml b/.circleci/config.yml index 603bbf54..a0d2c45f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -17,6 +17,9 @@ test-template: &test-template command: | sudo pip install --upgrade pip virtualenv; sudo pip install -r test-requirements.txt; + if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then + sudo pip install -r test-yaml-requirements.txt; + fi; if [[ "$CIRCLE_JOB" != "test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then sudo pip install -r consul-requirements.txt; fi; diff --git a/test-requirements.txt b/test-requirements.txt index 3bc09d90..ccde3818 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,5 +7,4 @@ jsonpickle==0.9.3 pytest-capturelog>=0.7 pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 -pyyaml>=3.0 watchdog>=0.9 diff --git a/test-yaml-requirements.txt b/test-yaml-requirements.txt new file mode 100644 index 00000000..fb5e7f76 --- /dev/null +++ b/test-yaml-requirements.txt @@ -0,0 +1 @@ +pyyaml>=3.0 diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 2e232ec8..277c9312 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -12,6 +12,13 @@ from ldclient.integrations import Files from ldclient.versioned_data_kind import FEATURES, SEGMENTS +have_yaml = False +try: + import yaml + have_yaml = True +except ImportError: + pass + all_flag_keys = [ 'flag1', 'flag2' ] all_properties_json = ''' @@ -128,6 +135,8 @@ def test_loads_flags_on_start_from_json(): os.remove(path) def test_loads_flags_on_start_from_yaml(): + if not have_yaml: + return path = make_temp_file(all_properties_yaml) try: source = make_data_source(paths = path) From fd883cdeef56ae05e1958392d2da0da2cae3ca28 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 26 Mar 2019 12:24:03 -0700 Subject: [PATCH 099/356] can't use watchdog in Python 3.3 --- .circleci/config.yml | 2 +- test-filesource-optional-requirements.txt | 2 ++ test-requirements.txt | 1 - test-yaml-requirements.txt | 1 - 4 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 test-filesource-optional-requirements.txt delete mode 100644 test-yaml-requirements.txt diff --git a/.circleci/config.yml b/.circleci/config.yml index a0d2c45f..46e2166e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -18,7 +18,7 @@ test-template: &test-template sudo pip install --upgrade pip virtualenv; sudo pip install -r test-requirements.txt; if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then - sudo pip install -r test-yaml-requirements.txt; + sudo pip install -r test-filesource-optional-requirements.txt; fi; if [[ "$CIRCLE_JOB" != "test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then sudo pip install -r consul-requirements.txt; diff --git a/test-filesource-optional-requirements.txt b/test-filesource-optional-requirements.txt new file mode 100644 index 00000000..e0a0e284 --- /dev/null +++ b/test-filesource-optional-requirements.txt @@ -0,0 +1,2 @@ +pyyaml>=3.0 +watchdog>=0.9 diff --git a/test-requirements.txt b/test-requirements.txt index ccde3818..bc5b43f2 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,4 +7,3 @@ jsonpickle==0.9.3 pytest-capturelog>=0.7 pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 -watchdog>=0.9 diff --git a/test-yaml-requirements.txt b/test-yaml-requirements.txt deleted file mode 100644 index fb5e7f76..00000000 --- a/test-yaml-requirements.txt +++ /dev/null @@ -1 +0,0 @@ -pyyaml>=3.0 From b3dc4c4f1f91bbbf87a0739525c052b3d57d37cc Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 26 Mar 2019 14:41:22 -0700 Subject: [PATCH 100/356] mark test as skipped --- testing/test_file_data_source.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 277c9312..78ab5359 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -136,7 +136,7 @@ def test_loads_flags_on_start_from_json(): def test_loads_flags_on_start_from_yaml(): if not have_yaml: - return + pytest.skip("skipping file source test with YAML because pyyaml isn't available") path = make_temp_file(all_properties_yaml) try: source = make_data_source(paths = path) From 803a79466dbf7be881aa98adac596241ce0e23de Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 28 Mar 2019 17:21:44 -0700 Subject: [PATCH 101/356] coerce user attributes into strings when necessary, don't send events without valid users --- ldclient/client.py | 8 +-- ldclient/event_processor.py | 25 +++++++--- ldclient/flag.py | 8 ++- ldclient/util.py | 14 ++++++ testing/test_event_processor.py | 86 ++++++++++++++++++++++++++++++++- testing/test_flag.py | 23 +++++++++ testing/test_ldclient.py | 39 --------------- 7 files changed, 150 insertions(+), 53 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index d1759f6f..edb9f28f 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -174,10 +174,10 @@ def track(self, event_name, user, data=None): :param dict user: the attributes of the user :param data: optional additional data associated with the event """ - self._sanitize_user(user) if user is None or user.get('key') is None: log.warn("Missing user or user key when calling track().") - self._send_event({'kind': 'custom', 'key': event_name, 'user': user, 'data': data}) + else: + self._send_event({'kind': 'custom', 'key': event_name, 'user': user, 'data': data}) def identify(self, user): """Registers the user. @@ -188,10 +188,10 @@ def identify(self, user): :param dict user: attributes of the user to register """ - self._sanitize_user(user) if user is None or user.get('key') is None: log.warn("Missing user or user key when calling identify().") - self._send_event({'kind': 'identify', 'key': user.get('key'), 'user': user}) + else: + self._send_event({'kind': 'identify', 'key': str(user.get('key')), 'user': user}) def is_offline(self): """Returns true if the client is in offline mode. diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 30619298..b5b0e370 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -29,11 +29,13 @@ from ldclient.util import _headers from ldclient.util import create_http_pool_manager from ldclient.util import log -from ldclient.util import http_error_message, is_http_error_recoverable, throw_if_unsuccessful_response +from ldclient.util import http_error_message, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response __MAX_FLUSH_THREADS__ = 5 __CURRENT_EVENT_SCHEMA__ = 3 +__USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__ = [ "key", "secondary", "ip", "country", "email", "firstName", "lastName", "avatar", "name" ] + class NullEventProcessor(EventProcessor): def __init__(self): @@ -84,9 +86,9 @@ def make_output_event(self, e): 'prereqOf': e.get('prereqOf') } if self._inline_users or is_debug: - out['user'] = self._user_filter.filter_user_props(e['user']) + out['user'] = self._process_user(e) else: - out['userKey'] = e['user'].get('key') + out['userKey'] = self._get_userkey(e) if e.get('reason'): out['reason'] = e.get('reason') return out @@ -94,8 +96,8 @@ def make_output_event(self, e): return { 'kind': 'identify', 'creationDate': e['creationDate'], - 'key': e['user'].get('key'), - 'user': self._user_filter.filter_user_props(e['user']) + 'key': self._get_userkey(e), + 'user': self._process_user(e) } elif kind == 'custom': out = { @@ -105,15 +107,15 @@ def make_output_event(self, e): 'data': e.get('data') } if self._inline_users: - out['user'] = self._user_filter.filter_user_props(e['user']) + out['user'] = self._process_user(e) else: - out['userKey'] = e['user'].get('key') + out['userKey'] = self._get_userkey(e) return out elif kind == 'index': return { 'kind': 'index', 'creationDate': e['creationDate'], - 'user': self._user_filter.filter_user_props(e['user']) + 'user': self._process_user(e) } else: return e @@ -146,6 +148,13 @@ def make_summary_event(self, summary): 'endDate': summary.end_date, 'features': flags_out } + + def _process_user(self, event): + filtered = self._user_filter.filter_user_props(event['user']) + return stringify_attrs(filtered, __USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__) + + def _get_userkey(self, event): + return str(event['user'].get('key')) class EventPayloadSendTask(object): diff --git a/ldclient/flag.py b/ldclient/flag.py index 88739ba0..dceb699c 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -10,6 +10,7 @@ import sys from ldclient import operators +from ldclient.util import stringify_attrs from ldclient.versioned_data_kind import FEATURES, SEGMENTS __LONG_SCALE__ = float(0xFFFFFFFFFFFFFFF) @@ -17,6 +18,10 @@ __BUILTINS__ = ["key", "ip", "country", "email", "firstName", "lastName", "avatar", "name", "anonymous"] +__USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__ = [ "key", "secondary" ] +# Currently we are not stringifying the rest of the built-in attributes prior to evaluation, only for events. +# This is because it could affect evaluation results for existing users (ch35206). + log = logging.getLogger(sys.modules[__name__].__name__) @@ -106,8 +111,9 @@ def error_reason(error_kind): def evaluate(flag, user, store, include_reasons_in_events = False): + sanitized_user = stringify_attrs(user, __USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__) prereq_events = [] - detail = _evaluate(flag, user, store, prereq_events, include_reasons_in_events) + detail = _evaluate(flag, sanitized_user, store, prereq_events, include_reasons_in_events) return EvalResult(detail = detail, events = prereq_events) def _evaluate(flag, user, store, prereq_events, include_reasons_in_events): diff --git a/ldclient/util.py b/ldclient/util.py index b1d533a2..229030b8 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -5,6 +5,7 @@ import certifi import logging +import six import sys import urllib3 @@ -111,3 +112,16 @@ def http_error_message(status, context, retryable_message = "will retry"): context, retryable_message if is_http_error_recoverable(status) else "giving up permanently" ) + + +def stringify_attrs(attrdict, attrs): + if attrdict is None: + return None + newdict = None + for attr in attrs: + val = attrdict.get(attr) + if val is not None and not isinstance(val, six.string_types): + if newdict is None: + newdict = attrdict.copy() + newdict[attr] = str(val) + return attrdict if newdict is None else newdict diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index f4ad9ab8..a2e110b2 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -17,6 +17,36 @@ 'key': 'userkey', 'privateAttrs': [ 'name' ] } +numeric_user = { + 'key': 1, + 'secondary': 2, + 'ip': 3, + 'country': 4, + 'email': 5, + 'firstName': 6, + 'lastName': 7, + 'avatar': 8, + 'name': 9, + 'anonymous': False, + 'custom': { + 'age': 99 + } +} +stringified_numeric_user = { + 'key': '1', + 'secondary': '2', + 'ip': '3', + 'country': '4', + 'email': '5', + 'firstName': '6', + 'lastName': '7', + 'avatar': '8', + 'name': '9', + 'anonymous': False, + 'custom': { + 'age': 99 + } +} ep = None mock_http = None @@ -65,6 +95,21 @@ def test_user_is_filtered_in_identify_event(): 'user': filtered_user }] +def test_user_attrs_are_stringified_in_identify_event(): + setup_processor(Config()) + + e = { 'kind': 'identify', 'user': numeric_user } + ep.send_event(e) + + output = flush_and_get_events() + assert len(output) == 1 + assert output == [{ + 'kind': 'identify', + 'creationDate': e['creationDate'], + 'key': stringified_numeric_user['key'], + 'user': stringified_numeric_user + }] + def test_individual_feature_event_is_queued_with_index_event(): setup_processor(Config()) @@ -95,6 +140,21 @@ def test_user_is_filtered_in_index_event(): check_feature_event(output[1], e, False, None) check_summary_event(output[2]) +def test_user_attrs_are_stringified_in_index_event(): + setup_processor(Config()) + + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + ep.send_event(e) + + output = flush_and_get_events() + assert len(output) == 3 + check_index_event(output[0], e, stringified_numeric_user) + check_feature_event(output[1], e, False, None) + check_summary_event(output[2]) + def test_feature_event_can_contain_inline_user(): setup_processor(Config(inline_users_in_events = True)) @@ -123,6 +183,20 @@ def test_user_is_filtered_in_feature_event(): check_feature_event(output[0], e, False, filtered_user) check_summary_event(output[1]) +def test_user_attrs_are_stringified_in_feature_event(): + setup_processor(Config(inline_users_in_events = True)) + + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + ep.send_event(e) + + output = flush_and_get_events() + assert len(output) == 2 + check_feature_event(output[0], e, False, stringified_numeric_user) + check_summary_event(output[1]) + def test_index_event_is_still_generated_if_inline_users_is_true_but_feature_event_is_not_tracked(): setup_processor(Config(inline_users_in_events = True)) @@ -346,6 +420,16 @@ def test_user_is_filtered_in_custom_event(): assert len(output) == 1 check_custom_event(output[0], e, filtered_user) +def test_user_attrs_are_stringified_in_custom_event(): + setup_processor(Config(inline_users_in_events = True)) + + e = { 'kind': 'custom', 'key': 'eventkey', 'user': numeric_user, 'data': { 'thing': 'stuff '} } + ep.send_event(e) + + output = flush_and_get_events() + assert len(output) == 1 + check_custom_event(output[0], e, stringified_numeric_user) + def test_nothing_is_sent_if_there_are_no_events(): setup_processor(Config()) ep.flush() @@ -426,7 +510,7 @@ def check_feature_event(data, source, debug, inline_user): assert data.get('value') == source.get('value') assert data.get('default') == source.get('default') if inline_user is None: - assert data['userKey'] == source['user']['key'] + assert data['userKey'] == str(source['user']['key']) else: assert data['user'] == inline_user diff --git a/testing/test_flag.py b/testing/test_flag.py index 97f64af0..9ca4b05a 100644 --- a/testing/test_flag.py +++ b/testing/test_flag.py @@ -248,6 +248,29 @@ def test_flag_returns_error_if_rule_has_rollout_with_no_variations(): detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert evaluate(flag, user, empty_store) == EvalResult(detail, []) +def test_user_key_is_coerced_to_string_for_evaluation(): + clause = { 'attribute': 'key', 'op': 'in', 'values': [ '999' ] } + flag = _make_bool_flag_from_clause(clause) + user = { 'key': 999 } + assert evaluate(flag, user, empty_store).detail.value == True + +def test_secondary_key_is_coerced_to_string_for_evaluation(): + # We can't really verify that the rollout calculation works correctly, but we can at least + # make sure it doesn't error out if there's a non-string secondary value (ch35189) + rule = { + 'id': 'ruleid', + 'clauses': [ + { 'attribute': 'key', 'op': 'in', 'values': [ 'userkey' ] } + ], + 'rollout': { + 'salt': '', + 'variations': [ { 'weight': 100000, 'variation': 1 } ] + } + } + flag = make_boolean_flag_with_rules([rule]) + user = { 'key': 'userkey', 'secondary': 999 } + assert evaluate(flag, user, empty_store).detail.value == True + def test_segment_match_clause_retrieves_segment_from_store(): store = InMemoryFeatureStore() segment = { diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 0e6c33a2..90bdeb4c 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -29,25 +29,6 @@ } } -numeric_key_user = {} - -sanitized_numeric_key_user = { - u'key': '33', - u'custom': { - u'bizzle': u'def' - } -} - - -def setup_function(function): - global numeric_key_user - numeric_key_user = { - u'key': 33, - u'custom': { - u'bizzle': u'def' - } - } - def make_client(store): return LDClient(config=Config(sdk_key = 'SDK_KEY', @@ -90,11 +71,6 @@ def test_toggle_offline(): assert offline_client.variation('feature.key', user, default=None) is None -def test_sanitize_user(): - client._sanitize_user(numeric_key_user) - assert numeric_key_user == sanitized_numeric_key_user - - def test_identify(): client.identify(user) @@ -102,13 +78,6 @@ def test_identify(): assert e['kind'] == 'identify' and e['key'] == u'xyz' and e['user'] == user -def test_identify_numeric_key_user(): - client.identify(numeric_key_user) - - e = get_first_event(client) - assert e['kind'] == 'identify' and e['key'] == '33' and e['user'] == sanitized_numeric_key_user - - def test_track(): client.track('my_event', user, 42) @@ -116,14 +85,6 @@ def test_track(): assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 -def test_track_numeric_key_user(): - client.track('my_event', numeric_key_user, 42) - - e = get_first_event(client) - assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == sanitized_numeric_key_user \ - and e['data'] == 42 - - def test_defaults(): my_client = LDClient(config=Config(base_uri="http://localhost:3000", defaults={"foo": "bar"}, From b7035a567c42f5d25d8cfd4f660fb533fbedd805 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 28 Mar 2019 17:50:35 -0700 Subject: [PATCH 102/356] more unit tests --- testing/test_ldclient.py | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 90bdeb4c..12746857 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -50,7 +50,15 @@ def make_off_flag_with_value(key, value): def get_first_event(c): - return c._event_processor._events.pop(0) + e = c._event_processor._events.pop(0) + c._event_processor._events = [] + return e + + +def count_events(c): + n = len(c._event_processor._events) + c._event_processor._events = [] + return n def test_ctor_both_sdk_keys_set(): @@ -78,6 +86,16 @@ def test_identify(): assert e['kind'] == 'identify' and e['key'] == u'xyz' and e['user'] == user +def test_identify_no_user(): + client.identify(None) + assert count_events(client) == 0 + + +def test_identify_no_user_key(): + client.identify({ 'name': 'nokey' }) + assert count_events(client) == 0 + + def test_track(): client.track('my_event', user, 42) @@ -85,6 +103,16 @@ def test_track(): assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 +def test_track_no_user(): + client.track('my_event', None) + assert count_events(client) == 0 + + +def test_track_no_user_key(): + client.track('my_event', { 'name': 'nokey' }) + assert count_events(client) == 0 + + def test_defaults(): my_client = LDClient(config=Config(base_uri="http://localhost:3000", defaults={"foo": "bar"}, From 44101b236b756dd32257ffca7f31a637235a4a99 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 28 Mar 2019 18:07:36 -0700 Subject: [PATCH 103/356] remove redundant sanitize step --- ldclient/client.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index edb9f28f..f0b973a2 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -266,9 +266,6 @@ def _evaluate_internal(self, key, user, default, include_reasons_in_events): if self._config.offline: return EvaluationDetail(default, None, error_reason('CLIENT_NOT_READY')) - if user is not None: - self._sanitize_user(user) - def send_event(value, variation=None, flag=None, reason=None): self._send_event({'kind': 'feature', 'key': key, 'user': user, 'value': value, 'variation': variation, 'default': default, @@ -423,10 +420,5 @@ def secure_mode_hash(self, user): return "" return hmac.new(self._config.sdk_key.encode(), user.get('key').encode(), hashlib.sha256).hexdigest() - @staticmethod - def _sanitize_user(user): - if 'key' in user: - user['key'] = str(user['key']) - __all__ = ['LDClient', 'Config'] From ddfb3c2a910878d8d36fa5bb6b11cd197c0a7bc5 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 9 Apr 2019 11:57:17 -0700 Subject: [PATCH 104/356] ensure that client components are cleaned up correct in every configuration --- ldclient/client.py | 81 ++++++------ ldclient/event_processor.py | 20 --- ldclient/impl/stubs.py | 39 ++++++ testing/test_ldclient.py | 246 ++++++++++++++++++++++-------------- 4 files changed, 232 insertions(+), 154 deletions(-) create mode 100644 ldclient/impl/stubs.py diff --git a/ldclient/client.py b/ldclient/client.py index f0b973a2..a16cce12 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -8,11 +8,11 @@ import traceback from ldclient.config import Config as Config -from ldclient.event_processor import NullEventProcessor from ldclient.feature_requester import FeatureRequesterImpl from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.flag import EvaluationDetail, evaluate, error_reason from ldclient.flags_state import FeatureFlagsState +from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor from ldclient.interfaces import FeatureStore from ldclient.polling import PollingUpdateProcessor from ldclient.streaming import StreamingUpdateProcessor @@ -94,45 +94,21 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): self._store = _FeatureStoreClientWrapper(self._config.feature_store) """ :type: FeatureStore """ - if self._config.offline or not self._config.send_events: - self._event_processor = NullEventProcessor() - else: - self._event_processor = self._config.event_processor_class(self._config) - if self._config.offline: log.info("Started LaunchDarkly Client in offline mode") - return if self._config.use_ldd: log.info("Started LaunchDarkly Client in LDD mode") - return - update_processor_ready = threading.Event() - - if self._config.update_processor_class: - log.info("Using user-specified update processor: " + str(self._config.update_processor_class)) - self._update_processor = self._config.update_processor_class( - self._config, self._store, update_processor_ready) - else: - if self._config.feature_requester_class: - feature_requester = self._config.feature_requester_class(self._config) - else: - feature_requester = FeatureRequesterImpl(self._config) - """ :type: FeatureRequester """ - - if self._config.stream: - self._update_processor = StreamingUpdateProcessor( - self._config, feature_requester, self._store, update_processor_ready) - else: - log.info("Disabling streaming API") - log.warn("You should only disable the streaming API if instructed to do so by LaunchDarkly support") - self._update_processor = PollingUpdateProcessor( - self._config, feature_requester, self._store, update_processor_ready) - """ :type: UpdateProcessor """ + self._event_processor = self._make_event_processor(self._config) + update_processor_ready = threading.Event() + self._update_processor = self._make_update_processor(self._config, self._store, update_processor_ready) self._update_processor.start() - log.info("Waiting up to " + str(start_wait) + " seconds for LaunchDarkly client to initialize...") - update_processor_ready.wait(start_wait) + + if start_wait > 0 and not self._config.offline and not self._config.use_ldd: + log.info("Waiting up to " + str(start_wait) + " seconds for LaunchDarkly client to initialize...") + update_processor_ready.wait(start_wait) if self._update_processor.initialized() is True: log.info("Started LaunchDarkly Client: OK") @@ -140,6 +116,32 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): log.warn("Initialization timeout exceeded for LaunchDarkly Client or an error occurred. " "Feature Flags may not yet be available.") + def _make_event_processor(self, config): + if config.offline or not config.send_events: + return NullEventProcessor() + return config.event_processor_class(config) + + def _make_update_processor(self, config, store, ready): + if config.update_processor_class: + log.info("Using user-specified update processor: " + str(config.update_processor_class)) + return self._config.update_processor_class(config, store, ready) + + if config.offline or config.use_ldd: + return NullUpdateProcessor(config, store, ready) + + if config.feature_requester_class: + feature_requester = config.feature_requester_class(config) + else: + feature_requester = FeatureRequesterImpl(config) + """ :type: FeatureRequester """ + + if config.stream: + return StreamingUpdateProcessor(config, feature_requester, store, ready) + + log.info("Disabling streaming API") + log.warn("You should only disable the streaming API if instructed to do so by LaunchDarkly support") + return PollingUpdateProcessor(config, feature_requester, store, ready) + def get_sdk_key(self): """Returns the configured SDK key. @@ -153,13 +155,16 @@ def close(self): Do not attempt to use the client after calling this method. """ log.info("Closing LaunchDarkly client..") - if self.is_offline(): - return - if self._event_processor: - self._event_processor.stop() - if self._update_processor and self._update_processor.is_alive(): - self._update_processor.stop() + self._event_processor.stop() + self._update_processor.stop() + # These magic methods allow a client object to be automatically cleaned up by the "with" scope operator + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + def _send_event(self, event): self._event_processor.send_event(event) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index b5b0e370..2bd4f322 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -37,26 +37,6 @@ __USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__ = [ "key", "secondary", "ip", "country", "email", "firstName", "lastName", "avatar", "name" ] -class NullEventProcessor(EventProcessor): - def __init__(self): - pass - - def start(self): - pass - - def stop(self): - pass - - def is_alive(self): - return False - - def send_event(self, event): - pass - - def flush(self): - pass - - EventProcessorMessage = namedtuple('EventProcessorMessage', ['type', 'param']) diff --git a/ldclient/impl/stubs.py b/ldclient/impl/stubs.py new file mode 100644 index 00000000..30d0eea8 --- /dev/null +++ b/ldclient/impl/stubs.py @@ -0,0 +1,39 @@ + +from ldclient.interfaces import EventProcessor, UpdateProcessor + + +class NullEventProcessor(EventProcessor): + def __init__(self): + pass + + def start(self): + pass + + def stop(self): + pass + + def is_alive(self): + return False + + def send_event(self, event): + pass + + def flush(self): + pass + + +class NullUpdateProcessor(UpdateProcessor): + def __init__(self, config, store, ready): + self._ready = ready + + def start(self): + self._ready.set() + + def stop(self): + pass + + def is_alive(self): + return False + + def initialized(self): + return True diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 12746857..1c19fe16 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -1,8 +1,13 @@ from ldclient.client import LDClient, Config -from ldclient.event_processor import NullEventProcessor +from ldclient.event_processor import DefaultEventProcessor from ldclient.feature_store import InMemoryFeatureStore +from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor from ldclient.interfaces import UpdateProcessor +from ldclient.polling import PollingUpdateProcessor +from ldclient.streaming import StreamingUpdateProcessor from ldclient.versioned_data_kind import FEATURES, SEGMENTS + +import logging import pytest from testing.stub_util import CapturingFeatureStore, MockEventProcessor, MockUpdateProcessor from testing.sync_util import wait_until @@ -13,14 +18,8 @@ import Queue as queue -client = LDClient(config=Config(base_uri="http://localhost:3000", - event_processor_class = MockEventProcessor, update_processor_class = MockUpdateProcessor)) -offline_client = LDClient(config= - Config(sdk_key="secret", base_uri="http://localhost:3000", - offline=True)) -no_send_events_client = LDClient(config= - Config(sdk_key="secret", base_uri="http://localhost:3000", - update_processor_class = MockUpdateProcessor, send_events=False)) +unreachable_uri="http://fake" + user = { u'key': u'xyz', @@ -30,14 +29,32 @@ } -def make_client(store): +def make_client(store = InMemoryFeatureStore()): return LDClient(config=Config(sdk_key = 'SDK_KEY', - base_uri="http://localhost:3000", + base_uri=unreachable_uri, + events_uri=unreachable_uri, + stream_uri=unreachable_uri, event_processor_class=MockEventProcessor, update_processor_class=MockUpdateProcessor, feature_store=store)) +def make_offline_client(): + return LDClient(config=Config(sdk_key="secret", + offline=True, + base_uri=unreachable_uri, + events_uri=unreachable_uri, + stream_uri=unreachable_uri)) + + +def make_ldd_client(): + return LDClient(config=Config(sdk_key="secret", + use_ldd=True, + base_uri=unreachable_uri, + events_uri=unreachable_uri, + stream_uri=unreachable_uri)) + + def make_off_flag_with_value(key, value): return { u'key': key, @@ -68,56 +85,91 @@ def test_ctor_both_sdk_keys_set(): def test_client_has_null_event_processor_if_offline(): - assert isinstance(offline_client._event_processor, NullEventProcessor) + with make_offline_client() as client: + assert isinstance(client._event_processor, NullEventProcessor) def test_client_has_null_event_processor_if_send_events_off(): - assert isinstance(no_send_events_client._event_processor, NullEventProcessor) + config = Config(sdk_key="secret", base_uri=unreachable_uri, + update_processor_class = MockUpdateProcessor, send_events=False) + with LDClient(config=config) as client: + assert isinstance(client._event_processor, NullEventProcessor) + + +def test_client_has_normal_event_processor_in_ldd_mode(): + with make_ldd_client() as client: + assert isinstance(client._event_processor, DefaultEventProcessor) + + +def test_client_has_null_update_processor_in_offline_mode(): + with make_offline_client() as client: + assert isinstance(client._update_processor, NullUpdateProcessor) + + +def test_client_has_null_update_processor_in_ldd_mode(): + with make_ldd_client() as client: + assert isinstance(client._update_processor, NullUpdateProcessor) + + +def test_client_has_streaming_processor_by_default(): + config = Config(sdk_key="secret", base_uri=unreachable_uri, stream_uri=unreachable_uri, send_events=False) + with LDClient(config=config, start_wait=0) as client: + assert isinstance(client._update_processor, StreamingUpdateProcessor) + + +def test_client_has_polling_processor_if_streaming_is_disabled(): + config = Config(sdk_key="secret", stream=False, base_uri=unreachable_uri, stream_uri=unreachable_uri, send_events=False) + with LDClient(config=config, start_wait=0) as client: + assert isinstance(client._update_processor, PollingUpdateProcessor) def test_toggle_offline(): - assert offline_client.variation('feature.key', user, default=None) is None + with make_offline_client() as client: + assert client.variation('feature.key', user, default=None) is None def test_identify(): - client.identify(user) - - e = get_first_event(client) - assert e['kind'] == 'identify' and e['key'] == u'xyz' and e['user'] == user + with make_client() as client: + client.identify(user) + e = get_first_event(client) + assert e['kind'] == 'identify' and e['key'] == u'xyz' and e['user'] == user def test_identify_no_user(): - client.identify(None) - assert count_events(client) == 0 + with make_client() as client: + client.identify(None) + assert count_events(client) == 0 def test_identify_no_user_key(): - client.identify({ 'name': 'nokey' }) - assert count_events(client) == 0 + with make_client() as client: + client.identify({ 'name': 'nokey' }) + assert count_events(client) == 0 def test_track(): - client.track('my_event', user, 42) - - e = get_first_event(client) - assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 + with make_client() as client: + client.track('my_event', user, 42) + e = get_first_event(client) + assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 def test_track_no_user(): - client.track('my_event', None) - assert count_events(client) == 0 + with make_client() as client: + client.track('my_event', None) + assert count_events(client) == 0 def test_track_no_user_key(): - client.track('my_event', { 'name': 'nokey' }) - assert count_events(client) == 0 + with make_client() as client: + client.track('my_event', { 'name': 'nokey' }) + assert count_events(client) == 0 def test_defaults(): - my_client = LDClient(config=Config(base_uri="http://localhost:3000", - defaults={"foo": "bar"}, - offline=True)) - assert "bar" == my_client.variation('foo', user, default=None) + config=Config(base_uri="http://localhost:3000", defaults={"foo": "bar"}, offline=True) + with LDClient(config=config) as client: + assert "bar" == client.variation('foo', user, default=None) def test_defaults_and_online(): @@ -144,7 +196,8 @@ def test_defaults_and_online_no_default(): def test_no_defaults(): - assert "bar" == offline_client.variation('foo', user, default="bar") + with make_offline_client() as client: + assert "bar" == client.variation('foo', user, default="bar") def test_event_for_existing_feature(): @@ -153,19 +206,19 @@ def test_event_for_existing_feature(): feature['debugEventsUntilDate'] = 1000 store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) - client = make_client(store) - assert 'value' == client.variation('feature.key', user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == user and - e['version'] == feature['version'] and - e['value'] == 'value' and - e['variation'] == 0 and - e.get('reason') is None and - e['default'] == 'default' and - e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000) + with make_client(store) as client: + assert 'value' == client.variation('feature.key', user, default='default') + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == 'feature.key' and + e['user'] == user and + e['version'] == feature['version'] and + e['value'] == 'value' and + e['variation'] == 0 and + e.get('reason') is None and + e['default'] == 'default' and + e['trackEvents'] == True and + e['debugEventsUntilDate'] == 1000) def test_event_for_existing_feature_with_reason(): @@ -174,33 +227,33 @@ def test_event_for_existing_feature_with_reason(): feature['debugEventsUntilDate'] = 1000 store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) - client = make_client(store) - assert 'value' == client.variation_detail('feature.key', user, default='default').value - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == user and - e['version'] == feature['version'] and - e['value'] == 'value' and - e['variation'] == 0 and - e['reason'] == {'kind': 'OFF'} and - e['default'] == 'default' and - e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000) + with make_client(store) as client: + assert 'value' == client.variation_detail('feature.key', user, default='default').value + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == 'feature.key' and + e['user'] == user and + e['version'] == feature['version'] and + e['value'] == 'value' and + e['variation'] == 0 and + e['reason'] == {'kind': 'OFF'} and + e['default'] == 'default' and + e['trackEvents'] == True and + e['debugEventsUntilDate'] == 1000) def test_event_for_unknown_feature(): store = InMemoryFeatureStore() store.init({FEATURES: {}}) - client = make_client(store) - assert 'default' == client.variation('feature.key', user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == user and - e['value'] == 'default' and - e['variation'] == None and - e['default'] == 'default') + with make_client(store) as client: + assert 'default' == client.variation('feature.key', user, default='default') + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == 'feature.key' and + e['user'] == user and + e['value'] == 'default' and + e['variation'] == None and + e['default'] == 'default') def test_event_for_existing_feature_with_no_user(): @@ -209,18 +262,18 @@ def test_event_for_existing_feature_with_no_user(): feature['debugEventsUntilDate'] = 1000 store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) - client = make_client(store) - assert 'default' == client.variation('feature.key', None, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == None and - e['version'] == feature['version'] and - e['value'] == 'default' and - e['variation'] == None and - e['default'] == 'default' and - e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000) + with make_client(store) as client: + assert 'default' == client.variation('feature.key', None, default='default') + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == 'feature.key' and + e['user'] == None and + e['version'] == feature['version'] and + e['value'] == 'default' and + e['variation'] == None and + e['default'] == 'default' and + e['trackEvents'] == True and + e['debugEventsUntilDate'] == 1000) def test_event_for_existing_feature_with_no_user_key(): @@ -229,24 +282,25 @@ def test_event_for_existing_feature_with_no_user_key(): feature['debugEventsUntilDate'] = 1000 store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) - client = make_client(store) - bad_user = { u'name': u'Bob' } - assert 'default' == client.variation('feature.key', bad_user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == bad_user and - e['version'] == feature['version'] and - e['value'] == 'default' and - e['variation'] == None and - e['default'] == 'default' and - e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000) + with make_client(store) as client: + bad_user = { u'name': u'Bob' } + assert 'default' == client.variation('feature.key', bad_user, default='default') + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == 'feature.key' and + e['user'] == bad_user and + e['version'] == feature['version'] and + e['value'] == 'default' and + e['variation'] == None and + e['default'] == 'default' and + e['trackEvents'] == True and + e['debugEventsUntilDate'] == 1000) def test_secure_mode_hash(): user = {'key': 'Message'} - assert offline_client.secure_mode_hash(user) == "aa747c502a898200f9e4fa21bac68136f886a0e27aec70ba06daf2e2a5cb5597" + with make_offline_client() as client: + assert client.secure_mode_hash(user) == "aa747c502a898200f9e4fa21bac68136f886a0e27aec70ba06daf2e2a5cb5597" dependency_ordering_test_data = { From 758568447c52d563f855b25f5fe0830fd12f264c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 9 Apr 2019 16:24:20 -0700 Subject: [PATCH 105/356] miscellaneous test fixes --- ldclient/client.py | 2 +- testing/test_ldclient.py | 2 ++ testing/test_ldclient_evaluation.py | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index a16cce12..16d91b0a 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -124,7 +124,7 @@ def _make_event_processor(self, config): def _make_update_processor(self, config, store, ready): if config.update_processor_class: log.info("Using user-specified update processor: " + str(config.update_processor_class)) - return self._config.update_processor_class(config, store, ready) + return config.update_processor_class(config, store, ready) if config.offline or config.use_ldd: return NullUpdateProcessor(config, store, ready) diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 1c19fe16..e1ee3910 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -111,12 +111,14 @@ def test_client_has_null_update_processor_in_ldd_mode(): assert isinstance(client._update_processor, NullUpdateProcessor) +@pytest.mark.skip("Can't currently use a live stream processor in tests because its error logging will disrupt other tests.") def test_client_has_streaming_processor_by_default(): config = Config(sdk_key="secret", base_uri=unreachable_uri, stream_uri=unreachable_uri, send_events=False) with LDClient(config=config, start_wait=0) as client: assert isinstance(client._update_processor, StreamingUpdateProcessor) +@pytest.mark.skip("Can't currently use a live polling processor in tests because its error logging will disrupt other tests.") def test_client_has_polling_processor_if_streaming_is_disabled(): config = Config(sdk_key="secret", stream=False, base_uri=unreachable_uri, stream_uri=unreachable_uri, send_events=False) with LDClient(config=config, start_wait=0) as client: diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index be925a5c..f716c5de 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -123,7 +123,7 @@ def test_variation_detail_when_user_is_none(): expected = EvaluationDetail('default', None, {'kind': 'ERROR', 'errorKind': 'USER_NOT_SPECIFIED'}) assert expected == client.variation_detail('feature.key', None, default='default') -def test_variation_when_user_has_no_key(): +def test_variation_detail_when_user_has_no_key(): feature = make_off_flag_with_value('feature.key', 'value') store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) From 3b16ebf1b8938c5a0a798d792d1845e4e46642c6 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Sat, 13 Apr 2019 16:40:52 -0700 Subject: [PATCH 106/356] support metric value with track() --- ldclient/client.py | 5 +++-- ldclient/event_processor.py | 7 +++++-- ldclient/impl/event_factory.py | 12 ++++++++---- testing/test_event_processor.py | 3 ++- testing/test_ldclient.py | 16 +++++++++++++++- 5 files changed, 33 insertions(+), 10 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 1d816d2d..ce17f5e4 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -171,7 +171,7 @@ def __exit__(self, type, value, traceback): def _send_event(self, event): self._event_processor.send_event(event) - def track(self, event_name, user, data=None): + def track(self, event_name, user, data=None, metric_value=None): """Tracks that a user performed an event. LaunchDarkly automatically tracks pageviews and clicks that are specified in the Goals @@ -181,11 +181,12 @@ def track(self, event_name, user, data=None): :param string event_name: the name of the event, which may correspond to a goal in A/B tests :param dict user: the attributes of the user :param data: optional additional data associated with the event + :param metric_value: optional numeric value that can be used in analytics """ if user is None or user.get('key') is None: log.warn("Missing user or user key when calling track().") else: - self._send_event(self._event_factory_default.new_custom_event(event_name, user, data)) + self._send_event(self._event_factory_default.new_custom_event(event_name, user, data, metric_value)) def identify(self, user): """Registers the user. diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 2bd4f322..d7f96af5 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -83,13 +83,16 @@ def make_output_event(self, e): out = { 'kind': 'custom', 'creationDate': e['creationDate'], - 'key': e['key'], - 'data': e.get('data') + 'key': e['key'] } if self._inline_users: out['user'] = self._process_user(e) else: out['userKey'] = self._get_userkey(e) + if e.get('data') is not None: + out['data'] = e['data'] + if e.get('metricValue') is not None: + out['metricValue'] = e['metricValue'] return out elif kind == 'index': return { diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index b3c559f2..d2a62ad8 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -69,13 +69,17 @@ def new_identify_event(self, user): 'user': user } - def new_custom_event(self, event_name, user, data): - return { + def new_custom_event(self, event_name, user, data, metric_value): + e = { 'kind': 'custom', 'key': event_name, - 'user': user, - 'data': data + 'user': user } + if data is not None: + e['data'] = data + if metric_value is not None: + e['metricValue'] = metric_value + return e def _is_experiment(self, flag, reason): if reason is not None: diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index a2e110b2..dfb4983f 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -392,7 +392,7 @@ def test_nontracked_events_are_summarized(): def test_custom_event_is_queued_with_user(): setup_processor(Config()) - e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } + e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '}, 'metricValue': 1.5 } ep.send_event(e) output = flush_and_get_events() @@ -523,6 +523,7 @@ def check_custom_event(data, source, inline_user): assert data['userKey'] == source['user']['key'] else: assert data['user'] == inline_user + assert data.get('metricValue') == source.get('metricValue') def check_summary_event(data): assert data['kind'] == 'summary' diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index d7177bcc..a6789e4d 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -150,10 +150,24 @@ def test_identify_no_user_key(): def test_track(): + with make_client() as client: + client.track('my_event', user) + e = get_first_event(client) + assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e.get('data') is None and e.get('metricValue') is None + + +def test_track_with_data(): with make_client() as client: client.track('my_event', user, 42) e = get_first_event(client) - assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 + assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 and e.get('metricValue') is None + + +def test_track_with_metric_value(): + with make_client() as client: + client.track('my_event', user, 42, 1.5) + e = get_first_event(client) + assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 and e.get('metricValue') == 1.5 def test_track_no_user(): From 2f6961df61a14542d8a973ab5a89f7343acf4cab Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 16 Apr 2019 18:39:04 -0700 Subject: [PATCH 107/356] update method description --- ldclient/client.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ldclient/client.py b/ldclient/client.py index ce17f5e4..6e74ea3b 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -181,7 +181,9 @@ def track(self, event_name, user, data=None, metric_value=None): :param string event_name: the name of the event, which may correspond to a goal in A/B tests :param dict user: the attributes of the user :param data: optional additional data associated with the event - :param metric_value: optional numeric value that can be used in analytics + :param metric_value: a numeric value used by the LaunchDarkly experimentation feature in + numeric custom metrics. Can be omitted if this event is used by only non-numeric metrics. + This field will also be returned as part of the custom event for Data Export. """ if user is None or user.get('key') is None: log.warn("Missing user or user key when calling track().") From 902be0206056c64c6e4285863171d735f8f21413 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 25 Apr 2019 20:15:29 -0700 Subject: [PATCH 108/356] update readme format and repo links --- CONTRIBUTING.md | 52 +++++++++++------- README.md | 129 ++++++++------------------------------------- scripts/release.sh | 4 +- setup.py | 2 +- 4 files changed, 58 insertions(+), 129 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index af5083c2..91c39924 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,33 +1,47 @@ -Contributing ------------- +# Contributing to the LaunchDarkly Server-side SDK for Python -We encourage pull-requests and other contributions from the community. We've also published an [SDK contributor's guide](http://docs.launchdarkly.com/docs/sdk-contributors-guide) that provides a detailed explanation of how our SDKs work. +LaunchDarkly has published an [SDK contributor's guide](https://docs.launchdarkly.com/docs/sdk-contributors-guide) that provides a detailed explanation of how our SDKs work. See below for additional information on how to contribute to this SDK. -Development information (for developing this module itself) ------------------------------------------------------------ +## Submitting bug reports and feature requests + +The LaunchDarkly SDK team monitors the issue tracker associated with the `launchdarkly/python-server-sdk` SDK repository. Bug reports and feature requests specific to this SDK should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days. -1. One-time setup: +## Submitting pull requests + +We encourage pull requests and other contributions from the community. Before submitting pull requests, ensure that all temporary or unintended code is removed. Don't worry about adding reviewers to the pull request; the LaunchDarkly SDK team will add themselves. The SDK team will acknowledge all pull requests within two business days. - mkvirtualenv python-client +## Build instructions -1. When working on the project be sure to activate the python-client virtualenv using the technique of your choosing. +### Setup -1. Install requirements (run-time & test): +It's advisable to use `virtualenv` to create a development environment within the project directory: - pip install -r requirements.txt - pip install -r test-requirements.txt +``` +mkvirtualenv python-client +source ./python-client/bin/activate +``` -1. When running unit tests, in order for `test_feature_store.py` to run, you'll need all of the supported databases (Redis, Consul, DynamoDB) running locally on their default ports. +To install the runtime and test requirements: -1. If you want integration tests to run, set the ```LD_SDK_KEY``` environment variable to a valid production SDK Key. +``` +pip install -r requirements.txt +pip install -r test-requirements.txt +``` -1. ```$ py.test testing``` +The additional requirements files `consul-requirements.txt`, `dynamodb-requirements.txt`, `redis-requirements.txt`, and `test-filesource-optional-requirements.txt` can also be installed if you need to test the corresponding features. -1. All code must be compatible with all supported Python versions as described in README. Most portability issues are addressed by using the `six` package. We are avoiding the use of `__future__` imports, since they can easily be omitted by mistake causing code in one file to behave differently from another; instead, whenever possible, use an explicit approach that makes it clear what the desired behavior is in all Python versions (e.g. if you want to do floor division, use `//`; if you want to divide as floats, explicitly cast to floats). +### Testing -Developing with different Python versions ------------------------------------------ +To run all unit tests: -Example for switching to Python 3: +``` +pytest +``` -```virtualenv -p `which python3` ~/.virtualenvs/python-client``` \ No newline at end of file +There are also integration tests that can be run against the LaunchDarkly service. To enable them, set the environment variable `LD_SDK_KEY` to a valid production SDK Key. + +### Portability + +Most portability issues are addressed by using the `six` package. We are avoiding the use of `__future__` imports, since they can easily be omitted by mistake causing code in one file to behave differently from another; instead, whenever possible, use an explicit approach that makes it clear what the desired behavior is in all Python versions (e.g. if you want to do floor division, use `//`; if you want to divide as floats, explicitly cast to floats). + +It is preferable to run tests against all supported minor versions of Python (as described in `README.md` under Requirements), or at least the lowest and highest versions, prior to submitting a pull request. However, LaunchDarkly's CI tests will run automatically against all supported versions. diff --git a/README.md b/README.md index 6013179f..74e2fb23 100644 --- a/README.md +++ b/README.md @@ -1,138 +1,53 @@ -LaunchDarkly SDK for Python -=========================== +# LaunchDarkly Server-side SDK for Python -[![Circle CI](https://img.shields.io/circleci/project/launchdarkly/python-client.png)](https://circleci.com/gh/launchdarkly/python-client) +[![Circle CI](https://img.shields.io/circleci/project/launchdarkly/python-server-sdk.png)](https://circleci.com/gh/launchdarkly/python-server-sdk) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Flaunchdarkly%2Fpython-client.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Flaunchdarkly%2Fpython-client?ref=badge_shield) +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Flaunchdarkly%2Fpython-server-sdk.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Flaunchdarkly%2Fpython-server-sdk?ref=badge_shield) [![PyPI](https://img.shields.io/pypi/v/ldclient-py.svg?maxAge=2592000)](https://pypi.python.org/pypi/ldclient-py) [![PyPI](https://img.shields.io/pypi/pyversions/ldclient-py.svg)](https://pypi.python.org/pypi/ldclient-py) -[![Twitter Follow](https://img.shields.io/twitter/follow/launchdarkly.svg?style=social&label=Follow&maxAge=2592000)](https://twitter.com/intent/follow?screen_name=launchdarkly) - -Supported Python versions -------------------------- - -This version of the LaunchDarkly SDK is compatible with Python 2.7, and Python 3.3 through 3.7. - -Quick setup ------------ - -1. Install the Python SDK with `pip` - - pip install ldclient-py - -2. Configure the library with your sdk key: - - import ldclient - -3. Get the client: - - ldclient.set_sdk_key("your sdk key") - client = ldclient.get() - - -HTTPS proxy ------------- -Python's standard HTTP library provides built-in support for the use of a HTTPS proxy. If the HTTPS_PROXY environment variable is present then the SDK will proxy all network requests through the URL provided. +## LaunchDarkly overview -How to set the HTTPS_PROXY environment variable on Mac/Linux systems: -``` -export HTTPS_PROXY=https://web-proxy.domain.com:8080 -``` - - -How to set the HTTPS_PROXY environment variable on Windows systems: -``` -set HTTPS_PROXY=https://web-proxy.domain.com:8080 -``` - -Or it can be set from within python: -``` -os.environ["https_proxy"] = "https://web-proxy.domain.com:8080" -``` - -If your proxy requires authentication then you can prefix the URN with your login information: -``` -export HTTPS_PROXY=http://user:pass@web-proxy.domain.com:8080 -``` -or -``` -set HTTPS_PROXY=http://user:pass@web-proxy.domain.com:8080 -``` - - -Your first feature flag ------------------------ - -1. Create a new feature flag on your [dashboard](https://app.launchdarkly.com) -2. In your application code, use the feature's key to check whether the flag is on for each user: - - if client.variation("your.flag.key", {"key": "user@test.com"}, False): - # application code to show the feature - else: - # the code to run if the feature is off - -Supported Python versions -------------------------- - -The SDK is tested with the most recent patch releases of Python 2.7, 3.3, 3.4, 3.5, and 3.6. Python 2.6 is no longer supported. - -Database integrations ---------------------- +[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves over 100 billion feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/docs/getting-started) using LaunchDarkly today! + +[![Twitter Follow](https://img.shields.io/twitter/follow/launchdarkly.svg?style=social&label=Follow&maxAge=2592000)](https://twitter.com/intent/follow?screen_name=launchdarkly) -Feature flag data can be kept in a persistent store using Consul, DynamoDB, or Redis. These adapters are implemented in the `Consul`, `DynamoDB` and `Redis` classes in `ldclient.integrations`; to use them, call the `new_feature_store` method in the appropriate class, and put the returned object in the `feature_store` property of your client configuration. See [`ldclient.integrations`](https://launchdarkly-python-sdk.readthedocs.io/en/latest/api-integrations.html#module-ldclient.integrations) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store) for more information. +## Supported Python versions -Note that Consul is not supported in Python 3.3 or 3.4. +This version of the LaunchDarkly SDK is compatible with Python 2.7 and 3.3 through 3.7. It is tested with the most recent patch releases of those versions. Python 2.6 is no longer supported. -Using flag data from a file ---------------------------- +## Getting started -For testing purposes, the SDK can be made to read feature flag state from a file or files instead of connecting to LaunchDarkly. See [`ldclient.integrations.Files`](https://launchdarkly-python-sdk.readthedocs.io/en/latest/api-integrations.html#ldclient.integrations.Files) and the [SDK reference guide](https://docs.launchdarkly.com/v2.0/docs/reading-flags-from-a-file) for more details. +Refer to the [SDK reference guide](https://docs.launchdarkly.com/docs/python-sdk-reference) for instructions on getting started with using the SDK. -Learn more ----------- +## Learn more Check out our [documentation](http://docs.launchdarkly.com) for in-depth instructions on configuring and using LaunchDarkly. You can also head straight to the [complete reference guide for this SDK](http://docs.launchdarkly.com/docs/python-sdk-reference). Generated API documentation is on [readthedocs.io](https://launchdarkly-python-sdk.readthedocs.io/en/latest/). -Testing -------- +## Testing We run integration tests for all our SDKs using a centralized test harness. This approach gives us the ability to test for consistency across SDKs, as well as test networking behavior in a long-running application. These tests cover each method in the SDK, and verify that event sending, flag evaluation, stream reconnection, and other aspects of the SDK all behave correctly. -[![Test Coverage](https://codeclimate.com/github/launchdarkly/python-client/badges/coverage.svg)](https://codeclimate.com/github/launchdarkly/python-client/coverage) The Code Climate coverage does not include the coverage provided by this integration test harness. +[![Test Coverage](https://codeclimate.com/github/launchdarkly/python-server-sdk/badges/coverage.svg)](https://codeclimate.com/github/launchdarkly/python-server-sdk/coverage) The Code Climate coverage does not include the coverage provided by this integration test harness. -Contributing ------------- +## Contributing -See [CONTRIBUTING](CONTRIBUTING.md) for more information. +We encourage pull requests and other contributions from the community. Check out our [contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute to this SDK. -About LaunchDarkly ------------------- +## About LaunchDarkly * LaunchDarkly is a continuous delivery platform that provides feature flags as a service and allows developers to iterate quickly and safely. We allow you to easily flag your features and manage them from the LaunchDarkly dashboard. With LaunchDarkly, you can: * Roll out a new feature to a subset of your users (like a group of users who opt-in to a beta tester group), gathering feedback and bug reports from real-world use cases. * Gradually roll out a feature to an increasing percentage of users, and track the effect that the feature has on key metrics (for instance, how likely is a user to complete a purchase if they have feature A versus feature B?). * Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file. * Grant access to certain features based on user attributes, like payment plan (eg: users on the ‘gold’ plan get access to more features than users in the ‘silver’ plan). Disable parts of your application to facilitate maintenance, without taking everything offline. -* LaunchDarkly provides feature flag SDKs for - * [Java](http://docs.launchdarkly.com/docs/java-sdk-reference "LaunchDarkly Java SDK") - * [JavaScript](http://docs.launchdarkly.com/docs/js-sdk-reference "LaunchDarkly JavaScript SDK") - * [PHP](http://docs.launchdarkly.com/docs/php-sdk-reference "LaunchDarkly PHP SDK") - * [Python](http://docs.launchdarkly.com/docs/python-sdk-reference "LaunchDarkly Python SDK") - * [Go](http://docs.launchdarkly.com/docs/go-sdk-reference "LaunchDarkly Go SDK") - * [Node.JS](http://docs.launchdarkly.com/docs/node-sdk-reference "LaunchDarkly Node SDK") - * [Electron](http://docs.launchdarkly.com/docs/electron-sdk-reference "LaunchDarkly Electron SDK") - * [.NET](http://docs.launchdarkly.com/docs/dotnet-sdk-reference "LaunchDarkly .Net SDK") - * [Ruby](http://docs.launchdarkly.com/docs/ruby-sdk-reference "LaunchDarkly Ruby SDK") - * [iOS](http://docs.launchdarkly.com/docs/ios-sdk-reference "LaunchDarkly iOS SDK") - * [Android](http://docs.launchdarkly.com/docs/android-sdk-reference "LaunchDarkly Android SDK") - * [C/C++](http://docs.launchdarkly.com/docs/c-sdk-reference "LaunchDarkly C/C++ SDK") +* LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Check out [our documentation](https://docs.launchdarkly.com/docs) for a complete list. * Explore LaunchDarkly - * [launchdarkly.com](http://www.launchdarkly.com/ "LaunchDarkly Main Website") for more information - * [docs.launchdarkly.com](http://docs.launchdarkly.com/ "LaunchDarkly Documentation") for our documentation and SDKs - * [apidocs.launchdarkly.com](http://apidocs.launchdarkly.com/ "LaunchDarkly API Documentation") for our API documentation - * [blog.launchdarkly.com](http://blog.launchdarkly.com/ "LaunchDarkly Blog Documentation") for the latest product updates + * [launchdarkly.com](https://www.launchdarkly.com/ "LaunchDarkly Main Website") for more information + * [docs.launchdarkly.com](https://docs.launchdarkly.com/ "LaunchDarkly Documentation") for our documentation and SDK reference guides + * [apidocs.launchdarkly.com](https://apidocs.launchdarkly.com/ "LaunchDarkly API Documentation") for our API documentation + * [blog.launchdarkly.com](https://blog.launchdarkly.com/ "LaunchDarkly Blog Documentation") for the latest product updates * [Feature Flagging Guide](https://github.com/launchdarkly/featureflags/ "Feature Flagging Guide") for best practices and strategies diff --git a/scripts/release.sh b/scripts/release.sh index 089dae25..0f1808b7 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -9,7 +9,7 @@ # When done you should commit and push the changes made. set -uxe -echo "Starting python-client release." +echo "Starting python-server-sdk release." VERSION=$1 @@ -28,4 +28,4 @@ python setup.py sdist pip install twine python -m twine upload dist/* -echo "Done with python-client release" +echo "Done with python-server-sdk release" diff --git a/setup.py b/setup.py index 012def24..ee3faef9 100644 --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ def run(self): author='LaunchDarkly', author_email='team@launchdarkly.com', packages=find_packages(), - url='https://github.com/launchdarkly/python-client', + url='https://github.com/launchdarkly/python-server-sdk', description='LaunchDarkly SDK for Python', long_description='LaunchDarkly SDK for Python', install_requires=reqs, From f41f2ccc210a872df2445825e5e837eb9d3cf5f7 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 25 Apr 2019 20:28:37 -0700 Subject: [PATCH 109/356] allow unit tests to be run without databases --- CONTRIBUTING.md | 2 ++ testing/test_feature_store.py | 30 +++++++++++++++++------------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 91c39924..697a6753 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -38,6 +38,8 @@ To run all unit tests: pytest ``` +By default, the full unit test suite includes live tests of the integrations for Consul, DynamoDB, and Redis. Those tests expect you to have instances of all of those databases running locally. To skip them, set the environment variable `LD_SKIP_DATABASE_TESTS=1` before running the tests. + There are also integration tests that can be run against the LaunchDarkly service. To enable them, set the environment variable `LD_SDK_KEY` to a valid production SDK Key. ### Portability diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index ce0150cf..04267c16 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -1,5 +1,6 @@ import boto3 import json +import os import pytest import redis import time @@ -165,19 +166,22 @@ def _clear_data(self): class TestFeatureStore: - params = [ - InMemoryTester(), - RedisTester(CacheConfig.default()), - RedisTester(CacheConfig.disabled()), - RedisWithDeprecatedConstructorTester(CacheConfig.default()), - RedisWithDeprecatedConstructorTester(CacheConfig.disabled()), - DynamoDBTester(CacheConfig.default()), - DynamoDBTester(CacheConfig.disabled()) - ] - - if have_consul: - params.append(ConsulTester(CacheConfig.default())) - params.append(ConsulTester(CacheConfig.disabled())) + if os.environ.get('LD_SKIP_DATABASE_TESTS') == '1': + params = [ + InMemoryTester() + ] + else: + params = [ + RedisTester(CacheConfig.default()), + RedisTester(CacheConfig.disabled()), + RedisWithDeprecatedConstructorTester(CacheConfig.default()), + RedisWithDeprecatedConstructorTester(CacheConfig.disabled()), + DynamoDBTester(CacheConfig.default()), + DynamoDBTester(CacheConfig.disabled()) + ] + if have_consul: + params.append(ConsulTester(CacheConfig.default())) + params.append(ConsulTester(CacheConfig.disabled())) @pytest.fixture(params=params) def tester(self, request): From d764fd8dee20f7b5ab9e3c10f55712b7baf3447d Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 25 Apr 2019 20:29:40 -0700 Subject: [PATCH 110/356] add missing test --- testing/test_feature_store.py | 1 + 1 file changed, 1 insertion(+) diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index 04267c16..d64a25f8 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -172,6 +172,7 @@ class TestFeatureStore: ] else: params = [ + InMemoryTester(), RedisTester(CacheConfig.default()), RedisTester(CacheConfig.disabled()), RedisWithDeprecatedConstructorTester(CacheConfig.default()), From ea5d8e8a24dddde4f4ca2636032ce338b53a69f8 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 26 Apr 2019 10:52:16 -0700 Subject: [PATCH 111/356] rm FOSSA link/badge --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 74e2fb23..5766279f 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,6 @@ [![Circle CI](https://img.shields.io/circleci/project/launchdarkly/python-server-sdk.png)](https://circleci.com/gh/launchdarkly/python-server-sdk) -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Flaunchdarkly%2Fpython-server-sdk.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Flaunchdarkly%2Fpython-server-sdk?ref=badge_shield) - [![PyPI](https://img.shields.io/pypi/v/ldclient-py.svg?maxAge=2592000)](https://pypi.python.org/pypi/ldclient-py) [![PyPI](https://img.shields.io/pypi/pyversions/ldclient-py.svg)](https://pypi.python.org/pypi/ldclient-py) From 6ed12f1aa46240dbc1bfcd6d1307a1ac2f5f1f54 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 26 Apr 2019 12:09:24 -0700 Subject: [PATCH 112/356] misc fixes --- CONTRIBUTING.md | 4 ++-- README.md | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 697a6753..2027062b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ LaunchDarkly has published an [SDK contributor's guide](https://docs.launchdarkl ## Submitting bug reports and feature requests -The LaunchDarkly SDK team monitors the issue tracker associated with the `launchdarkly/python-server-sdk` SDK repository. Bug reports and feature requests specific to this SDK should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days. +The LaunchDarkly SDK team monitors the [issue tracker](https://github.com/launchdarkly/python-server-sdk/issues) in the SDK repository. Bug reports and feature requests specific to this SDK should be filed in this issue tracker. The SDK team will respond to all newly filed issues within two business days. ## Submitting pull requests @@ -14,7 +14,7 @@ We encourage pull requests and other contributions from the community. Before su ### Setup -It's advisable to use `virtualenv` to create a development environment within the project directory: +It's advisable to use [`virtualenv`](https://virtualenv.pypa.io/) to create a development environment within the project directory: ``` mkvirtualenv python-client diff --git a/README.md b/README.md index 5766279f..be38186a 100644 --- a/README.md +++ b/README.md @@ -29,8 +29,6 @@ Generated API documentation is on [readthedocs.io](https://launchdarkly-python-s We run integration tests for all our SDKs using a centralized test harness. This approach gives us the ability to test for consistency across SDKs, as well as test networking behavior in a long-running application. These tests cover each method in the SDK, and verify that event sending, flag evaluation, stream reconnection, and other aspects of the SDK all behave correctly. -[![Test Coverage](https://codeclimate.com/github/launchdarkly/python-server-sdk/badges/coverage.svg)](https://codeclimate.com/github/launchdarkly/python-server-sdk/coverage) The Code Climate coverage does not include the coverage provided by this integration test harness. - ## Contributing We encourage pull requests and other contributions from the community. Check out our [contributing guidelines](CONTRIBUTING.md) for instructions on how to contribute to this SDK. From cbac044ea647870b5949ab0c704011e4f3c7ef56 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 26 Apr 2019 13:18:46 -0700 Subject: [PATCH 113/356] minor doc link fix --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 7a9d2c73..909ac27d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,7 +8,7 @@ LaunchDarkly Python SDK This is the API reference for the `LaunchDarkly `_ SDK for Python. -The latest version of the SDK can be found on `PyPI `_, and the source code is on `GitHub `_. +The latest version of the SDK can be found on `PyPI `_, and the source code is on `GitHub `_. For more information, see LaunchDarkly's `Quickstart `_ and `SDK Reference Guide `_. From 52c3b23649d59324618a6a1015af26a933e020d5 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 1 May 2019 11:41:26 -0700 Subject: [PATCH 114/356] fix skipping of database tests --- testing/test_feature_store.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index d64a25f8..90af57ca 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -19,6 +19,8 @@ from ldclient.redis_feature_store import RedisFeatureStore from ldclient.versioned_data_kind import FEATURES +skip_db_tests = os.environ.get('LD_SKIP_DATABASE_TESTS') == '1' + class InMemoryTester(object): def init_store(self): @@ -166,7 +168,7 @@ def _clear_data(self): class TestFeatureStore: - if os.environ.get('LD_SKIP_DATABASE_TESTS') == '1': + if skip_db_tests: params = [ InMemoryTester() ] @@ -321,6 +323,7 @@ def test_stores_with_different_prefixes_are_independent(self, tester): assert items == { 'flagB1': flag_b1, 'flagB2': flag_b2 } +@pytest.mark.skipif(skip_db_tests, reason="skipping database tests") class TestRedisFeatureStoreExtraTests: def test_upsert_race_condition_against_external_client_with_higher_version(self): other_client = redis.StrictRedis(host='localhost', port=6379, db=0) From 6161055385c842bbd234de20c6c6e45f76068057 Mon Sep 17 00:00:00 2001 From: Ben Woskow <48036130+bwoskow-ld@users.noreply.github.com> Date: Wed, 1 May 2019 13:21:36 -0700 Subject: [PATCH 115/356] renaming the package to launchdarkly-server-sdk (#108) --- CONTRIBUTING.md | 4 ++-- README.md | 4 ++-- docs/Makefile | 2 +- docs/conf.py | 12 ++++++------ docs/index.rst | 4 ++-- setup.py | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2027062b..7d2a9b8a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,8 +17,8 @@ We encourage pull requests and other contributions from the community. Before su It's advisable to use [`virtualenv`](https://virtualenv.pypa.io/) to create a development environment within the project directory: ``` -mkvirtualenv python-client -source ./python-client/bin/activate +mkvirtualenv python-server-sdk +source ~/.virtualenvs/python-server-sdk/bin/activate ``` To install the runtime and test requirements: diff --git a/README.md b/README.md index be38186a..7858bbc9 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,8 @@ [![Circle CI](https://img.shields.io/circleci/project/launchdarkly/python-server-sdk.png)](https://circleci.com/gh/launchdarkly/python-server-sdk) -[![PyPI](https://img.shields.io/pypi/v/ldclient-py.svg?maxAge=2592000)](https://pypi.python.org/pypi/ldclient-py) -[![PyPI](https://img.shields.io/pypi/pyversions/ldclient-py.svg)](https://pypi.python.org/pypi/ldclient-py) +[![PyPI](https://img.shields.io/pypi/v/launchdarkly-server-sdk.svg?maxAge=2592000)](https://pypi.python.org/pypi/launchdarkly-server-sdk) +[![PyPI](https://img.shields.io/pypi/pyversions/launchdarkly-server-sdk.svg)](https://pypi.python.org/pypi/launchdarkly-server-sdk) ## LaunchDarkly overview diff --git a/docs/Makefile b/docs/Makefile index ebce0c0b..aea5aff6 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -5,7 +5,7 @@ SPHINXOPTS = SPHINXBUILD = sphinx-build -SPHINXPROJ = ldclient-py +SPHINXPROJ = launchdarkly-server-sdk SOURCEDIR = . BUILDDIR = build diff --git a/docs/conf.py b/docs/conf.py index 10f481f3..9e3db965 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -25,7 +25,7 @@ # -- Project information ----------------------------------------------------- -project = u'ldclient-py' +project = u'launchdarkly-server-sdk' copyright = u'2019, LaunchDarkly' author = u'LaunchDarkly' @@ -110,7 +110,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = 'ldclient-pydoc' +htmlhelp_basename = 'launchdarkly-server-sdk-doc' # -- Options for LaTeX output ------------------------------------------------ @@ -137,7 +137,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'ldclient-py.tex', u'ldclient-py Documentation', + (master_doc, 'launchdarkly-server-sdk.tex', u'launchdarkly-server-sdk Documentation', u'LaunchDarkly', 'manual'), ] @@ -147,7 +147,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'ldclient-py', u'ldclient-py Documentation', + (master_doc, 'launchdarkly-server-sdk', u'launchdarkly-server-sdk Documentation', [author], 1) ] @@ -158,8 +158,8 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'ldclient-py', u'ldclient-py Documentation', - author, 'ldclient-py', 'One line description of project.', + (master_doc, 'launchdarkly-server-sdk', u'launchdarkly-server-sdk Documentation', + author, 'launchdarkly-server-sdk', 'One line description of project.', 'Miscellaneous'), ] diff --git a/docs/index.rst b/docs/index.rst index 909ac27d..1be4daca 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,4 +1,4 @@ -.. ldclient-py documentation master file, created by +.. launchdarkly-server-sdk documentation master file, created by sphinx-quickstart on Mon Feb 4 13:16:49 2019. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. @@ -8,7 +8,7 @@ LaunchDarkly Python SDK This is the API reference for the `LaunchDarkly `_ SDK for Python. -The latest version of the SDK can be found on `PyPI `_, and the source code is on `GitHub `_. +The latest version of the SDK can be found on `PyPI `_, and the source code is on `GitHub `_. For more information, see LaunchDarkly's `Quickstart `_ and `SDK Reference Guide `_. diff --git a/setup.py b/setup.py index b69dece9..41ccf721 100644 --- a/setup.py +++ b/setup.py @@ -44,7 +44,7 @@ def run(self): raise SystemExit(errno) setup( - name='ldclient-py', + name='launchdarkly-server-sdk', version=ldclient_version, author='LaunchDarkly', author_email='team@launchdarkly.com', From 34b15f5b5ceb9243fc7a259322308c7f9466d02c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 11 Jun 2019 12:05:10 -0700 Subject: [PATCH 116/356] use log.warning(), not log.warn() or warnings.warn() --- ldclient/client.py | 26 +++++++++---------- ldclient/config.py | 2 +- ldclient/flag.py | 2 +- .../integrations/files/file_data_source.py | 2 +- ldclient/operators.py | 8 +++--- ldclient/sse_client.py | 4 +-- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 16d91b0a..5a65201a 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -82,7 +82,7 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): "Only one of either is expected") if sdk_key is not None: - log.warn("Deprecated sdk_key argument was passed to init. Use config object instead.") + log.warning("Deprecated sdk_key argument was passed to init. Use config object instead.") self._config = Config(sdk_key=sdk_key) else: self._config = config or Config.default() @@ -113,7 +113,7 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): if self._update_processor.initialized() is True: log.info("Started LaunchDarkly Client: OK") else: - log.warn("Initialization timeout exceeded for LaunchDarkly Client or an error occurred. " + log.warning("Initialization timeout exceeded for LaunchDarkly Client or an error occurred. " "Feature Flags may not yet be available.") def _make_event_processor(self, config): @@ -139,7 +139,7 @@ def _make_update_processor(self, config, store, ready): return StreamingUpdateProcessor(config, feature_requester, store, ready) log.info("Disabling streaming API") - log.warn("You should only disable the streaming API if instructed to do so by LaunchDarkly support") + log.warning("You should only disable the streaming API if instructed to do so by LaunchDarkly support") return PollingUpdateProcessor(config, feature_requester, store, ready) def get_sdk_key(self): @@ -180,7 +180,7 @@ def track(self, event_name, user, data=None): :param data: optional additional data associated with the event """ if user is None or user.get('key') is None: - log.warn("Missing user or user key when calling track().") + log.warning("Missing user or user key when calling track().") else: self._send_event({'kind': 'custom', 'key': event_name, 'user': user, 'data': data}) @@ -194,7 +194,7 @@ def identify(self, user): :param dict user: attributes of the user to register """ if user is None or user.get('key') is None: - log.warn("Missing user or user key when calling identify().") + log.warning("Missing user or user key when calling identify().") else: self._send_event({'kind': 'identify', 'key': str(user.get('key')), 'user': user}) @@ -234,7 +234,7 @@ def toggle(self, key, user, default): .. deprecated:: 2.0.0 """ - log.warn("Deprecated method: toggle() called. Use variation() instead.") + log.warning("Deprecated method: toggle() called. Use variation() instead.") return self.variation(key, user, default) def variation(self, key, user, default): @@ -281,16 +281,16 @@ def send_event(value, variation=None, flag=None, reason=None): if not self.is_initialized(): if self._store.initialized: - log.warn("Feature Flag evaluation attempted before client has initialized - using last known values from feature store for feature key: " + key) + log.warning("Feature Flag evaluation attempted before client has initialized - using last known values from feature store for feature key: " + key) else: - log.warn("Feature Flag evaluation attempted before client has initialized! Feature store unavailable - returning default: " + log.warning("Feature Flag evaluation attempted before client has initialized! Feature store unavailable - returning default: " + str(default) + " for feature key: " + key) reason = error_reason('CLIENT_NOT_READY') send_event(default, None, None, reason) return EvaluationDetail(default, None, reason) if user is not None and user.get('key', "") == "": - log.warn("User key is blank. Flag evaluation will proceed, but the user will not be stored in LaunchDarkly.") + log.warning("User key is blank. Flag evaluation will proceed, but the user will not be stored in LaunchDarkly.") try: flag = self._store.get(FEATURES, key, lambda x: x) @@ -369,18 +369,18 @@ def all_flags_state(self, user, **kwargs): :rtype: FeatureFlagsState """ if self._config.offline: - log.warn("all_flags_state() called, but client is in offline mode. Returning empty state") + log.warning("all_flags_state() called, but client is in offline mode. Returning empty state") return FeatureFlagsState(False) if not self.is_initialized(): if self._store.initialized: - log.warn("all_flags_state() called before client has finished initializing! Using last known values from feature store") + log.warning("all_flags_state() called before client has finished initializing! Using last known values from feature store") else: - log.warn("all_flags_state() called before client has finished initializing! Feature store unavailable - returning empty state") + log.warning("all_flags_state() called before client has finished initializing! Feature store unavailable - returning empty state") return FeatureFlagsState(False) if user is None or user.get('key') is None: - log.warn("User or user key is None when calling all_flags_state(). Returning empty state.") + log.warning("User or user key is None when calling all_flags_state(). Returning empty state.") return FeatureFlagsState(False) state = FeatureFlagsState(True) diff --git a/ldclient/config.py b/ldclient/config.py index f8ef61d0..b0283d95 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -280,4 +280,4 @@ def inline_users_in_events(self): def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key is '': - log.warn("Missing or blank sdk_key.") + log.warning("Missing or blank sdk_key.") diff --git a/ldclient/flag.py b/ldclient/flag.py index dceb699c..c7515e63 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -147,7 +147,7 @@ def _check_prerequisites(flag, user, store, events, include_reasons_in_events): for prereq in flag.get('prerequisites') or []: prereq_flag = store.get(FEATURES, prereq.get('key'), lambda x: x) if prereq_flag is None: - log.warn("Missing prereq flag: " + prereq.get('key')) + log.warning("Missing prereq flag: " + prereq.get('key')) failed_prereq = prereq else: prereq_res = _evaluate(prereq_flag, user, store, events, include_reasons_in_events) diff --git a/ldclient/impl/integrations/files/file_data_source.py b/ldclient/impl/integrations/files/file_data_source.py index 9ba6e561..785a3851 100644 --- a/ldclient/impl/integrations/files/file_data_source.py +++ b/ldclient/impl/integrations/files/file_data_source.py @@ -107,7 +107,7 @@ def _start_auto_updater(self): try: resolved_paths.append(os.path.realpath(path)) except: - log.warn('Cannot watch for changes to data file "%s" because it is an invalid path' % path) + log.warning('Cannot watch for changes to data file "%s" because it is an invalid path' % path) if have_watchdog and not self._force_polling: return _FileDataSource.WatchdogAutoUpdater(resolved_paths, self._load_all) else: diff --git a/ldclient/operators.py b/ldclient/operators.py index 253e8a8b..158455ca 100644 --- a/ldclient/operators.py +++ b/ldclient/operators.py @@ -27,7 +27,7 @@ def _string_operator(u, c, fn): def _numeric_operator(u, c, fn): # bool is a subtype of int, and we don't want to try and compare it as a number. if isinstance(input, bool): - log.warn("Got unexpected bool type when attempting to parse time") + log.warning("Got unexpected bool type when attempting to parse time") return None if isinstance(u, Number): @@ -44,7 +44,7 @@ def _parse_time(input): # bool is a subtype of int, and we don't want to try and compare it as a time. if isinstance(input, bool): - log.warn("Got unexpected bool type when attempting to parse time") + log.warning("Got unexpected bool type when attempting to parse time") return None if isinstance(input, Number): @@ -56,10 +56,10 @@ def _parse_time(input): timestamp = (parsed_time - epoch).total_seconds() return timestamp * 1000.0 except Exception as e: - log.warn("Couldn't parse timestamp:" + str(input) + " with message: " + str(e)) + log.warning("Couldn't parse timestamp:" + str(input) + " with message: " + str(e)) return None - log.warn("Got unexpected type: " + type(input) + " with value: " + str(input) + " when attempting to parse time") + log.warning("Got unexpected type: " + type(input) + " with value: " + str(input) + " when attempting to parse time") return None def _time_operator(u, c, fn): diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index 49d853c7..fcd255a3 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -7,13 +7,13 @@ import re import time -import warnings import six import urllib3 from ldclient.util import create_http_pool_manager +from ldclient.util import log from ldclient.util import throw_if_unsuccessful_response # Technically, we should support streams that mix line endings. This regex, @@ -158,7 +158,7 @@ def parse(cls, raw): m = cls.sse_line_pattern.match(line) if m is None: # Malformed line. Discard but warn. - warnings.warn('Invalid SSE line: "%s"' % line, SyntaxWarning) + log.warning('Invalid SSE line: "%s"' % line) continue name = m.groupdict()['name'] From c990266e46818e26ee026660e067a5a907eef447 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 16:19:27 -0700 Subject: [PATCH 117/356] drop events when inbox is full --- ldclient/event_processor.py | 55 ++++++++++++++++++++++----------- testing/test_event_processor.py | 30 ++++++++++++++++++ 2 files changed, 67 insertions(+), 18 deletions(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 2bd4f322..cf52a2fb 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -187,7 +187,7 @@ def __init__(self, capacity): def add_event(self, event): if len(self._events) >= self._capacity: if not self._exceeded_capacity: - log.warning("Event queue is full-- dropped an event") + log.warning("Exceeded event queue capacity. Increase capacity to avoid dropping events.") self._exceeded_capacity = True else: self._events.append(event) @@ -205,13 +205,13 @@ def clear(self): class EventDispatcher(object): - def __init__(self, queue, config, http_client): - self._queue = queue + def __init__(self, inbox, config, http_client): + self._inbox = inbox self._config = config self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl) if http_client is None else http_client self._close_http = (http_client is None) # so we know whether to close it later self._disabled = False - self._buffer = EventBuffer(config.events_max_pending) + self._outbox = EventBuffer(config.events_max_pending) self._user_keys = SimpleLRUCache(config.user_keys_capacity) self._formatter = EventOutputFormatter(config) self._last_known_past_time = 0 @@ -226,7 +226,7 @@ def _run_main_loop(self): log.info("Starting event processor") while True: try: - message = self._queue.get(block=True) + message = self._inbox.get(block=True) if message.type == 'event': self._process_event(message.param) elif message.type == 'flush': @@ -248,7 +248,7 @@ def _process_event(self, event): return # Always record the event in the summarizer. - self._buffer.add_to_summary(event) + self._outbox.add_to_summary(event) # Decide whether to add the event to the payload. Feature events may be added twice, once for # the event (if tracked) and once for debugging. @@ -271,13 +271,13 @@ def _process_event(self, event): if add_index_event: ie = { 'kind': 'index', 'creationDate': event['creationDate'], 'user': user } - self._buffer.add_event(ie) + self._outbox.add_event(ie) if add_full_event: - self._buffer.add_event(event) + self._outbox.add_event(event) if add_debug_event: debug_event = event.copy() debug_event['debug'] = True - self._buffer.add_event(debug_event) + self._outbox.add_event(debug_event) # Add to the set of users we've noticed, and return true if the user was already known to us. def notice_user(self, user): @@ -298,13 +298,13 @@ def _should_debug_event(self, event): def _trigger_flush(self): if self._disabled: return - payload = self._buffer.get_payload() + payload = self._outbox.get_payload() if len(payload.events) > 0 or len(payload.summary.counters) > 0: task = EventPayloadSendTask(self._http, self._config, self._formatter, payload, self._handle_response) if self._flush_workers.execute(task.run): # The events have been handed off to a flush worker; clear them from our buffer. - self._buffer.clear() + self._outbox.clear() else: # We're already at our limit of concurrent flushes; leave the events in the buffer. pass @@ -330,22 +330,23 @@ def _do_shutdown(self): class DefaultEventProcessor(EventProcessor): - def __init__(self, config, http=None): - self._queue = queue.Queue(config.events_max_pending) + def __init__(self, config, http=None, dispatcher_class=None): + self._inbox = queue.Queue(config.events_max_pending) + self._inbox_full = False self._flush_timer = RepeatingTimer(config.flush_interval, self.flush) self._users_flush_timer = RepeatingTimer(config.user_keys_flush_interval, self._flush_users) self._flush_timer.start() self._users_flush_timer.start() self._close_lock = Lock() self._closed = False - EventDispatcher(self._queue, config, http) + (dispatcher_class or EventDispatcher)(self._inbox, config, http) def send_event(self, event): event['creationDate'] = int(time.time() * 1000) - self._queue.put(EventProcessorMessage('event', event)) + self._post_to_inbox(EventProcessorMessage('event', event)) def flush(self): - self._queue.put(EventProcessorMessage('flush', None)) + self._post_to_inbox(EventProcessorMessage('flush', None)) def stop(self): with self._close_lock: @@ -355,10 +356,21 @@ def stop(self): self._flush_timer.stop() self._users_flush_timer.stop() self.flush() + # Note that here we are not calling _post_to_inbox, because we *do* want to wait if the inbox + # is full; an orderly shutdown can't happen unless these messages are received. self._post_message_and_wait('stop') + def _post_to_inbox(self, message): + try: + self._inbox.put(message, block=False) + except queue.Full: + if not self._inbox_full: + # possible race condition here, but it's of no real consequence - we'd just get an extra log line + self._inbox_full = True + log.warning("Events are being produced faster than they can be processed; some events will be dropped") + def _flush_users(self): - self._queue.put(EventProcessorMessage('flush_users', None)) + self._inbox.put(EventProcessorMessage('flush_users', None)) # Used only in tests def _wait_until_inactive(self): @@ -366,5 +378,12 @@ def _wait_until_inactive(self): def _post_message_and_wait(self, type): reply = Event() - self._queue.put(EventProcessorMessage(type, reply)) + self._inbox.put(EventProcessorMessage(type, reply)) reply.wait() + + # These magic methods allow use of the "with" block in tests + def __enter__(self): + return self + + def __exit__(self, tyep, value, traceback): + self.stop() diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index a2e110b2..8faa78d3 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -1,5 +1,6 @@ import json import pytest +from threading import Thread import time from ldclient.config import Config @@ -460,6 +461,35 @@ def test_will_still_send_after_429_error(): def test_will_still_send_after_500_error(): verify_recoverable_http_error(500) +def test_does_not_block_on_full_inbox(): + config = Config(events_max_pending=1) # this sets the size of both the inbox and the outbox to 1 + ep_inbox_holder = [ None ] + ep_inbox = None + + def dispatcher_factory(inbox, config, http): + ep_inbox_holder[0] = inbox # it's an array because otherwise it's hard for a closure to modify a variable + return None # the dispatcher object itself doesn't matter, we only manipulate the inbox + def event_consumer(): + while True: + message = ep_inbox.get(block=True) + if message.type == 'stop': + message.param.set() + return + def start_consuming_events(): + Thread(target=event_consumer).start() + + with DefaultEventProcessor(config, mock_http, dispatcher_factory) as ep: + ep_inbox = ep_inbox_holder[0] + event1 = { 'kind': 'custom', 'key': 'event1', 'user': user } + event2 = { 'kind': 'custom', 'key': 'event2', 'user': user } + ep.send_event(event1) + ep.send_event(event2) # this event should be dropped - inbox is full + message1 = ep_inbox.get(block=False) + had_no_more = ep_inbox.empty() + start_consuming_events() + assert message1.param == event1 + assert had_no_more + def verify_unrecoverable_http_error(status): setup_processor(Config(sdk_key = 'SDK_KEY')) From e436f77d4374c7ae052f7aea095db0a550a4c01c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 16:26:44 -0700 Subject: [PATCH 118/356] rm obsolete pytest.raises parameter --- testing/test_feature_store_helpers.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/test_feature_store_helpers.py b/testing/test_feature_store_helpers.py index 77ccb6f8..0e2da11b 100644 --- a/testing/test_feature_store_helpers.py +++ b/testing/test_feature_store_helpers.py @@ -137,7 +137,7 @@ def test_get_can_throw_exception(self, cached): core = MockCore() wrapper = make_wrapper(core, cached) core.error = CustomError() - with pytest.raises(CustomError, message="expected exception"): + with pytest.raises(CustomError): wrapper.get(THINGS, "key", lambda x: x) @pytest.mark.parametrize("cached", [False, True]) @@ -204,7 +204,7 @@ def test_get_all_can_throw_exception(self, cached): core = MockCore() wrapper = make_wrapper(core, cached) core.error = CustomError() - with pytest.raises(CustomError, message="expected exception"): + with pytest.raises(CustomError): wrapper.all(THINGS) @pytest.mark.parametrize("cached", [False, True]) @@ -255,7 +255,7 @@ def test_upsert_can_throw_exception(self, cached): core = MockCore() wrapper = make_wrapper(core, cached) core.error = CustomError() - with pytest.raises(CustomError, message="expected exception"): + with pytest.raises(CustomError): wrapper.upsert(THINGS, { "key": "x", "version": 1 }) @pytest.mark.parametrize("cached", [False, True]) @@ -281,7 +281,7 @@ def test_delete_can_throw_exception(self, cached): core = MockCore() wrapper = make_wrapper(core, cached) core.error = CustomError() - with pytest.raises(CustomError, message="expected exception"): + with pytest.raises(CustomError): wrapper.delete(THINGS, "x", 1) def test_uncached_initialized_queries_state_only_until_inited(self): From 1e068c9dd649df80fed8efbca38fd65b56803623 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 17:14:40 -0700 Subject: [PATCH 119/356] clean up test state management --- testing/test_event_processor.py | 653 +++++++++++++++----------------- 1 file changed, 314 insertions(+), 339 deletions(-) diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 8faa78d3..08568b87 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -67,384 +67,361 @@ def setup_processor(config): def test_identify_event_is_queued(): - setup_processor(Config()) - - e = { 'kind': 'identify', 'user': user } - ep.send_event(e) - - output = flush_and_get_events() - assert len(output) == 1 - assert output == [{ - 'kind': 'identify', - 'creationDate': e['creationDate'], - 'key': user['key'], - 'user': user - }] + with DefaultEventProcessor(Config(), mock_http) as ep: + e = { 'kind': 'identify', 'user': user } + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 1 + assert output == [{ + 'kind': 'identify', + 'creationDate': e['creationDate'], + 'key': user['key'], + 'user': user + }] def test_user_is_filtered_in_identify_event(): - setup_processor(Config(all_attributes_private = True)) - - e = { 'kind': 'identify', 'user': user } - ep.send_event(e) - - output = flush_and_get_events() - assert len(output) == 1 - assert output == [{ - 'kind': 'identify', - 'creationDate': e['creationDate'], - 'key': user['key'], - 'user': filtered_user - }] + with DefaultEventProcessor(Config(all_attributes_private = True), mock_http) as ep: + e = { 'kind': 'identify', 'user': user } + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 1 + assert output == [{ + 'kind': 'identify', + 'creationDate': e['creationDate'], + 'key': user['key'], + 'user': filtered_user + }] def test_user_attrs_are_stringified_in_identify_event(): - setup_processor(Config()) - - e = { 'kind': 'identify', 'user': numeric_user } - ep.send_event(e) - - output = flush_and_get_events() - assert len(output) == 1 - assert output == [{ - 'kind': 'identify', - 'creationDate': e['creationDate'], - 'key': stringified_numeric_user['key'], - 'user': stringified_numeric_user - }] + with DefaultEventProcessor(Config(), mock_http) as ep: + e = { 'kind': 'identify', 'user': numeric_user } + ep.send_event(e) + + output = flush_and_get_events(ep) + assert len(output) == 1 + assert output == [{ + 'kind': 'identify', + 'creationDate': e['creationDate'], + 'key': stringified_numeric_user['key'], + 'user': stringified_numeric_user + }] def test_individual_feature_event_is_queued_with_index_event(): - setup_processor(Config()) - - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) + with DefaultEventProcessor(Config(), mock_http) as ep: + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 3 - check_index_event(output[0], e, user) - check_feature_event(output[1], e, False, None) - check_summary_event(output[2]) + output = flush_and_get_events(ep) + assert len(output) == 3 + check_index_event(output[0], e, user) + check_feature_event(output[1], e, False, None) + check_summary_event(output[2]) def test_user_is_filtered_in_index_event(): - setup_processor(Config(all_attributes_private = True)) - - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) + with DefaultEventProcessor(Config(all_attributes_private = True), mock_http) as ep: + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 3 - check_index_event(output[0], e, filtered_user) - check_feature_event(output[1], e, False, None) - check_summary_event(output[2]) + output = flush_and_get_events(ep) + assert len(output) == 3 + check_index_event(output[0], e, filtered_user) + check_feature_event(output[1], e, False, None) + check_summary_event(output[2]) def test_user_attrs_are_stringified_in_index_event(): - setup_processor(Config()) - - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) + with DefaultEventProcessor(Config(), mock_http) as ep: + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 3 - check_index_event(output[0], e, stringified_numeric_user) - check_feature_event(output[1], e, False, None) - check_summary_event(output[2]) + output = flush_and_get_events(ep) + assert len(output) == 3 + check_index_event(output[0], e, stringified_numeric_user) + check_feature_event(output[1], e, False, None) + check_summary_event(output[2]) def test_feature_event_can_contain_inline_user(): - setup_processor(Config(inline_users_in_events = True)) - - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) + with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 2 - check_feature_event(output[0], e, False, user) - check_summary_event(output[1]) + output = flush_and_get_events(ep) + assert len(output) == 2 + check_feature_event(output[0], e, False, user) + check_summary_event(output[1]) def test_user_is_filtered_in_feature_event(): - setup_processor(Config(inline_users_in_events = True, all_attributes_private = True)) - - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) + with DefaultEventProcessor(Config(inline_users_in_events = True, all_attributes_private = True), mock_http) as ep: + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 2 - check_feature_event(output[0], e, False, filtered_user) - check_summary_event(output[1]) + output = flush_and_get_events(ep) + assert len(output) == 2 + check_feature_event(output[0], e, False, filtered_user) + check_summary_event(output[1]) def test_user_attrs_are_stringified_in_feature_event(): - setup_processor(Config(inline_users_in_events = True)) - - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) + with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 2 - check_feature_event(output[0], e, False, stringified_numeric_user) - check_summary_event(output[1]) + output = flush_and_get_events(ep) + assert len(output) == 2 + check_feature_event(output[0], e, False, stringified_numeric_user) + check_summary_event(output[1]) def test_index_event_is_still_generated_if_inline_users_is_true_but_feature_event_is_not_tracked(): - setup_processor(Config(inline_users_in_events = True)) - - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': False - } - ep.send_event(e) + with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': False + } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 2 - check_index_event(output[0], e, user) - check_summary_event(output[1]) + output = flush_and_get_events(ep) + assert len(output) == 2 + check_index_event(output[0], e, user) + check_summary_event(output[1]) def test_two_events_for_same_user_only_produce_one_index_event(): - setup_processor(Config(user_keys_flush_interval = 300)) - - e0 = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - e1 = e0.copy(); - ep.send_event(e0) - ep.send_event(e1) + with DefaultEventProcessor(Config(user_keys_flush_interval = 300), mock_http) as ep: + e0 = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + e1 = e0.copy(); + ep.send_event(e0) + ep.send_event(e1) - output = flush_and_get_events() - assert len(output) == 4 - check_index_event(output[0], e0, user) - check_feature_event(output[1], e0, False, None) - check_feature_event(output[2], e1, False, None) - check_summary_event(output[3]) + output = flush_and_get_events(ep) + assert len(output) == 4 + check_index_event(output[0], e0, user) + check_feature_event(output[1], e0, False, None) + check_feature_event(output[2], e1, False, None) + check_summary_event(output[3]) def test_new_index_event_is_added_if_user_cache_has_been_cleared(): - setup_processor(Config(user_keys_flush_interval = 0.1)) - - e0 = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - e1 = e0.copy(); - ep.send_event(e0); - time.sleep(0.2) - ep.send_event(e1) - - output = flush_and_get_events() - assert len(output) == 5 - check_index_event(output[0], e0, user) - check_feature_event(output[1], e0, False, None) - check_index_event(output[2], e1, user) - check_feature_event(output[3], e1, False, None) - check_summary_event(output[4]) + with DefaultEventProcessor(Config(user_keys_flush_interval = 0.1), mock_http) as ep: + e0 = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + e1 = e0.copy(); + ep.send_event(e0); + time.sleep(0.2) + ep.send_event(e1) + + output = flush_and_get_events(ep) + assert len(output) == 5 + check_index_event(output[0], e0, user) + check_feature_event(output[1], e0, False, None) + check_index_event(output[2], e1, user) + check_feature_event(output[3], e1, False, None) + check_summary_event(output[4]) def test_event_kind_is_debug_if_flag_is_temporarily_in_debug_mode(): - setup_processor(Config()) - - future_time = now() + 100000 - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', - 'trackEvents': False, 'debugEventsUntilDate': future_time - } - ep.send_event(e) + with DefaultEventProcessor(Config(), mock_http) as ep: + future_time = now() + 100000 + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', + 'trackEvents': False, 'debugEventsUntilDate': future_time + } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 3 - check_index_event(output[0], e, user) - check_feature_event(output[1], e, True, user) - check_summary_event(output[2]) + output = flush_and_get_events(ep) + assert len(output) == 3 + check_index_event(output[0], e, user) + check_feature_event(output[1], e, True, user) + check_summary_event(output[2]) def test_event_can_be_both_tracked_and_debugged(): - setup_processor(Config()) - - future_time = now() + 100000 - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', - 'trackEvents': True, 'debugEventsUntilDate': future_time - } - ep.send_event(e) + with DefaultEventProcessor(Config(), mock_http) as ep: + future_time = now() + 100000 + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', + 'trackEvents': True, 'debugEventsUntilDate': future_time + } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 4 - check_index_event(output[0], e, user) - check_feature_event(output[1], e, False, None) - check_feature_event(output[2], e, True, user) - check_summary_event(output[3]) + output = flush_and_get_events(ep) + assert len(output) == 4 + check_index_event(output[0], e, user) + check_feature_event(output[1], e, False, None) + check_feature_event(output[2], e, True, user) + check_summary_event(output[3]) def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_server_time(): - setup_processor(Config()) - - # Pick a server time that is somewhat behind the client time - server_time = now() - 20000 - - # Send and flush an event we don't care about, just to set the last server time - mock_http.set_server_time(server_time) - ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) - flush_and_get_events() - - # Now send an event with debug mode on, with a "debug until" time that is further in - # the future than the server time, but in the past compared to the client. - debug_until = server_time + 1000 - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', - 'trackEvents': False, 'debugEventsUntilDate': debug_until - } - ep.send_event(e) + with DefaultEventProcessor(Config(), mock_http) as ep: + # Pick a server time that is somewhat behind the client time + server_time = now() - 20000 + + # Send and flush an event we don't care about, just to set the last server time + mock_http.set_server_time(server_time) + ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) + flush_and_get_events(ep) + + # Now send an event with debug mode on, with a "debug until" time that is further in + # the future than the server time, but in the past compared to the client. + debug_until = server_time + 1000 + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', + 'trackEvents': False, 'debugEventsUntilDate': debug_until + } + ep.send_event(e) - # Should get a summary event only, not a full feature event - output = flush_and_get_events() - assert len(output) == 2 - check_index_event(output[0], e, user) - check_summary_event(output[1]) + # Should get a summary event only, not a full feature event + output = flush_and_get_events(ep) + assert len(output) == 2 + check_index_event(output[0], e, user) + check_summary_event(output[1]) def test_debug_mode_expires_based_on_server_time_if_server_time_is_later_than_client_time(): - setup_processor(Config()) - - # Pick a server time that is somewhat ahead of the client time - server_time = now() + 20000 - - # Send and flush an event we don't care about, just to set the last server time - mock_http.set_server_time(server_time) - ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) - flush_and_get_events() - - # Now send an event with debug mode on, with a "debug until" time that is further in - # the future than the client time, but in the past compared to the server. - debug_until = server_time - 1000 - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', - 'trackEvents': False, 'debugEventsUntilDate': debug_until - } - ep.send_event(e) + with DefaultEventProcessor(Config(), mock_http) as ep: + # Pick a server time that is somewhat ahead of the client time + server_time = now() + 20000 + + # Send and flush an event we don't care about, just to set the last server time + mock_http.set_server_time(server_time) + ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) + flush_and_get_events(ep) + + # Now send an event with debug mode on, with a "debug until" time that is further in + # the future than the client time, but in the past compared to the server. + debug_until = server_time - 1000 + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', + 'trackEvents': False, 'debugEventsUntilDate': debug_until + } + ep.send_event(e) - # Should get a summary event only, not a full feature event - output = flush_and_get_events() - assert len(output) == 2 - check_index_event(output[0], e, user) - check_summary_event(output[1]) + # Should get a summary event only, not a full feature event + output = flush_and_get_events(ep) + assert len(output) == 2 + check_index_event(output[0], e, user) + check_summary_event(output[1]) def test_two_feature_events_for_same_user_generate_only_one_index_event(): - setup_processor(Config()) - - e1 = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value1', 'default': 'default', 'trackEvents': False - } - e2 = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 2, 'value': 'value2', 'default': 'default', 'trackEvents': False - } - ep.send_event(e1) - ep.send_event(e2) + with DefaultEventProcessor(Config(), mock_http) as ep: + e1 = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value1', 'default': 'default', 'trackEvents': False + } + e2 = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 2, 'value': 'value2', 'default': 'default', 'trackEvents': False + } + ep.send_event(e1) + ep.send_event(e2) - output = flush_and_get_events() - assert len(output) == 2 - check_index_event(output[0], e1, user) - check_summary_event(output[1]) + output = flush_and_get_events(ep) + assert len(output) == 2 + check_index_event(output[0], e1, user) + check_summary_event(output[1]) def test_nontracked_events_are_summarized(): - setup_processor(Config()) - - e1 = { - 'kind': 'feature', 'key': 'flagkey1', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value1', 'default': 'default1', 'trackEvents': False - } - e2 = { - 'kind': 'feature', 'key': 'flagkey2', 'version': 22, 'user': user, - 'variation': 2, 'value': 'value2', 'default': 'default2', 'trackEvents': False - } - ep.send_event(e1) - ep.send_event(e2) - - output = flush_and_get_events() - assert len(output) == 2 - check_index_event(output[0], e1, user) - se = output[1] - assert se['kind'] == 'summary' - assert se['startDate'] == e1['creationDate'] - assert se['endDate'] == e2['creationDate'] - assert se['features'] == { - 'flagkey1': { - 'default': 'default1', - 'counters': [ { 'version': 11, 'variation': 1, 'value': 'value1', 'count': 1 } ] - }, - 'flagkey2': { - 'default': 'default2', - 'counters': [ { 'version': 22, 'variation': 2, 'value': 'value2', 'count': 1 } ] + with DefaultEventProcessor(Config(), mock_http) as ep: + e1 = { + 'kind': 'feature', 'key': 'flagkey1', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value1', 'default': 'default1', 'trackEvents': False + } + e2 = { + 'kind': 'feature', 'key': 'flagkey2', 'version': 22, 'user': user, + 'variation': 2, 'value': 'value2', 'default': 'default2', 'trackEvents': False + } + ep.send_event(e1) + ep.send_event(e2) + + output = flush_and_get_events(ep) + assert len(output) == 2 + check_index_event(output[0], e1, user) + se = output[1] + assert se['kind'] == 'summary' + assert se['startDate'] == e1['creationDate'] + assert se['endDate'] == e2['creationDate'] + assert se['features'] == { + 'flagkey1': { + 'default': 'default1', + 'counters': [ { 'version': 11, 'variation': 1, 'value': 'value1', 'count': 1 } ] + }, + 'flagkey2': { + 'default': 'default2', + 'counters': [ { 'version': 22, 'variation': 2, 'value': 'value2', 'count': 1 } ] + } } - } def test_custom_event_is_queued_with_user(): - setup_processor(Config()) - - e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } - ep.send_event(e) + with DefaultEventProcessor(Config(), mock_http) as ep: + e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 2 - check_index_event(output[0], e, user) - check_custom_event(output[1], e, None) + output = flush_and_get_events(ep) + assert len(output) == 2 + check_index_event(output[0], e, user) + check_custom_event(output[1], e, None) def test_custom_event_can_contain_inline_user(): - setup_processor(Config(inline_users_in_events = True)) + with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } + ep.send_event(e) - e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } - ep.send_event(e) - - output = flush_and_get_events() - assert len(output) == 1 - check_custom_event(output[0], e, user) + output = flush_and_get_events(ep) + assert len(output) == 1 + check_custom_event(output[0], e, user) def test_user_is_filtered_in_custom_event(): - setup_processor(Config(inline_users_in_events = True, all_attributes_private = True)) - - e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } - ep.send_event(e) + with DefaultEventProcessor(Config(inline_users_in_events = True, all_attributes_private = True), mock_http) as ep: + e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } + ep.send_event(e) - output = flush_and_get_events() - assert len(output) == 1 - check_custom_event(output[0], e, filtered_user) + output = flush_and_get_events(ep) + assert len(output) == 1 + check_custom_event(output[0], e, filtered_user) def test_user_attrs_are_stringified_in_custom_event(): - setup_processor(Config(inline_users_in_events = True)) + with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + e = { 'kind': 'custom', 'key': 'eventkey', 'user': numeric_user, 'data': { 'thing': 'stuff '} } + ep.send_event(e) - e = { 'kind': 'custom', 'key': 'eventkey', 'user': numeric_user, 'data': { 'thing': 'stuff '} } - ep.send_event(e) - - output = flush_and_get_events() - assert len(output) == 1 - check_custom_event(output[0], e, stringified_numeric_user) + output = flush_and_get_events(ep) + assert len(output) == 1 + check_custom_event(output[0], e, stringified_numeric_user) def test_nothing_is_sent_if_there_are_no_events(): - setup_processor(Config()) - ep.flush() - ep._wait_until_inactive() - assert mock_http.request_data is None + with DefaultEventProcessor(Config(), mock_http) as ep: + ep.flush() + ep._wait_until_inactive() + assert mock_http.request_data is None def test_sdk_key_is_sent(): - setup_processor(Config(sdk_key = 'SDK_KEY')) - - ep.send_event({ 'kind': 'identify', 'user': user }) - ep.flush() - ep._wait_until_inactive() + with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() - assert mock_http.request_headers.get('Authorization') is 'SDK_KEY' + assert mock_http.request_headers.get('Authorization') is 'SDK_KEY' def test_no_more_payloads_are_sent_after_401_error(): verify_unrecoverable_http_error(401) @@ -491,34 +468,32 @@ def start_consuming_events(): assert had_no_more def verify_unrecoverable_http_error(status): - setup_processor(Config(sdk_key = 'SDK_KEY')) - - mock_http.set_response_status(status) - ep.send_event({ 'kind': 'identify', 'user': user }) - ep.flush() - ep._wait_until_inactive() - mock_http.reset() - - ep.send_event({ 'kind': 'identify', 'user': user }) - ep.flush() - ep._wait_until_inactive() - assert mock_http.request_data is None + with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: + mock_http.set_response_status(status) + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + mock_http.reset() + + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + assert mock_http.request_data is None def verify_recoverable_http_error(status): - setup_processor(Config(sdk_key = 'SDK_KEY')) - - mock_http.set_response_status(status) - ep.send_event({ 'kind': 'identify', 'user': user }) - ep.flush() - ep._wait_until_inactive() - mock_http.reset() - - ep.send_event({ 'kind': 'identify', 'user': user }) - ep.flush() - ep._wait_until_inactive() - assert mock_http.request_data is not None - -def flush_and_get_events(): + with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: + mock_http.set_response_status(status) + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + mock_http.reset() + + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + assert mock_http.request_data is not None + +def flush_and_get_events(ep): ep.flush() ep._wait_until_inactive() if mock_http.request_data is None: From 4b74fcff401d8dcac94822920a18d1de1fcafc1c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 17:18:46 -0700 Subject: [PATCH 120/356] typo --- ldclient/event_processor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index cf52a2fb..74baf37d 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -385,5 +385,5 @@ def _post_message_and_wait(self, type): def __enter__(self): return self - def __exit__(self, tyep, value, traceback): + def __exit__(self, type, value, traceback): self.stop() From ee7a51c14d42006887b2809366495f42fa3f402a Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 20:30:57 -0700 Subject: [PATCH 121/356] store the package version in just one place --- .ldrelease/update-version.sh | 9 --------- scripts/release.sh | 7 +------ setup.py | 6 ++---- 3 files changed, 3 insertions(+), 19 deletions(-) delete mode 100755 .ldrelease/update-version.sh diff --git a/.ldrelease/update-version.sh b/.ldrelease/update-version.sh deleted file mode 100755 index a8edafa1..00000000 --- a/.ldrelease/update-version.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -# Update version in ldclient/version.py -echo "VERSION = \"${LD_RELEASE_VERSION}\"" > ldclient/version.py - -# Update version in setup.py -SETUP_PY_TEMP=./setup.py.tmp -sed "s/ldclient_version=.*/ldclient_version='${LD_RELEASE_VERSION}'/g" setup.py > ${SETUP_PY_TEMP} -mv ${SETUP_PY_TEMP} setup.py diff --git a/scripts/release.sh b/scripts/release.sh index 0f1808b7..d2b24e73 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -13,14 +13,9 @@ echo "Starting python-server-sdk release." VERSION=$1 -#Update version in ldclient/version.py +# Update version in ldclient/version.py - setup.py references this constant echo "VERSION = \"${VERSION}\"" > ldclient/version.py -# Update version in setup.py -SETUP_PY_TEMP=./setup.py.tmp -sed "s/ldclient_version=.*/ldclient_version='${VERSION}'/g" setup.py > ${SETUP_PY_TEMP} -mv ${SETUP_PY_TEMP} setup.py - # Prepare distribution python setup.py sdist diff --git a/setup.py b/setup.py index 2aec3cf0..9b110b4f 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,5 @@ from setuptools import find_packages, setup, Command +import ldclient import sys import uuid @@ -9,9 +10,6 @@ def parse_requirements(filename): lineiter = (line.strip() for line in open(filename)) return [line for line in lineiter if line and not line.startswith("#")] - -ldclient_version='6.9.4' - # parse_requirements() returns generator of pip.req.InstallRequirement objects install_reqs = parse_requirements('requirements.txt') test_reqs = parse_requirements('test-requirements.txt') @@ -45,7 +43,7 @@ def run(self): setup( name='launchdarkly-server-sdk', - version=ldclient_version, + version=ldclient.VERSION, author='LaunchDarkly', author_email='team@launchdarkly.com', packages=find_packages(), From 1c10e1e293c63dadd67f1d0ca2610f8b18b7a9a7 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 20:37:50 -0700 Subject: [PATCH 122/356] fix package reference --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 9b110b4f..1d305a9e 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,5 @@ from setuptools import find_packages, setup, Command -import ldclient +from ldclient.version import VERSION import sys import uuid @@ -43,7 +43,7 @@ def run(self): setup( name='launchdarkly-server-sdk', - version=ldclient.VERSION, + version=VERSION, author='LaunchDarkly', author_email='team@launchdarkly.com', packages=find_packages(), From d9c96dd03a1369b270b5cb276713a20ce3fa47eb Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 20:42:16 -0700 Subject: [PATCH 123/356] add requirements --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 46e2166e..9d26ec77 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -16,6 +16,7 @@ test-template: &test-template name: install requirements command: | sudo pip install --upgrade pip virtualenv; + sudo pip install -r requirements.txt; sudo pip install -r test-requirements.txt; if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then sudo pip install -r test-filesource-optional-requirements.txt; From 73d20f733cad0c6717e4e34ecfc82db38a754448 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 21:11:14 -0700 Subject: [PATCH 124/356] don't import ldclient.version directly --- setup.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 1d305a9e..a0f4452f 100644 --- a/setup.py +++ b/setup.py @@ -1,10 +1,16 @@ from setuptools import find_packages, setup, Command -from ldclient.version import VERSION import sys import uuid - +# Get VERSION constant from ldclient.version - we can't simply import that module because +# ldclient/__init__.py imports all kinds of stuff that requires dependencies we may not have +# loaded yet. Based on https://packaging.python.org/guides/single-sourcing-package-version/ +version_module_globals = {} +with open('./ldclient/version.py') as f: + exec(f.read(), version_module_globals) +ldclient_version = version_module_globals['VERSION'] + def parse_requirements(filename): """ load requirements from a pip requirements file """ lineiter = (line.strip() for line in open(filename)) @@ -43,7 +49,7 @@ def run(self): setup( name='launchdarkly-server-sdk', - version=VERSION, + version=ldclient_version, author='LaunchDarkly', author_email='team@launchdarkly.com', packages=find_packages(), From 0a0aa8fae746f51183350cd999abb46d9b492285 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 21:11:19 -0700 Subject: [PATCH 125/356] Revert "add requirements" This reverts commit d9c96dd03a1369b270b5cb276713a20ce3fa47eb. --- .circleci/config.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9d26ec77..46e2166e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -16,7 +16,6 @@ test-template: &test-template name: install requirements command: | sudo pip install --upgrade pip virtualenv; - sudo pip install -r requirements.txt; sudo pip install -r test-requirements.txt; if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then sudo pip install -r test-filesource-optional-requirements.txt; From 11f0da63242cf195a989b29bebb9f0f6438d2323 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 21:30:48 -0700 Subject: [PATCH 126/356] fix merge error + adjust for some event properties now being optional --- ldclient/client.py | 2 +- ldclient/event_processor.py | 2 +- ldclient/event_summarizer.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index ac087c4d..825d542c 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -397,7 +397,7 @@ def all_flags_state(self, user, **kwargs): if client_only and not flag.get('clientSide', False): continue try: - detail = evaluate(flag, user, self._store, False).detail + detail = evaluate(flag, user, self._store, self._event_factory_default).detail state.add_flag(flag, detail.value, detail.variation_index, detail.reason if with_reasons else None, details_only_if_tracked) except Exception as e: diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 5a532861..f66e0e57 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -259,7 +259,7 @@ def _process_event(self, event): add_debug_event = False add_index_event = False if event['kind'] == "feature": - add_full_event = event['trackEvents'] + add_full_event = event.get('trackEvents') add_debug_event = self._should_debug_event(event) else: add_full_event = True diff --git a/ldclient/event_summarizer.py b/ldclient/event_summarizer.py index c0aa5aeb..64956fdc 100644 --- a/ldclient/event_summarizer.py +++ b/ldclient/event_summarizer.py @@ -20,7 +20,7 @@ def __init__(self): """ def summarize_event(self, event): if event['kind'] == 'feature': - counter_key = (event['key'], event['variation'], event['version']) + counter_key = (event['key'], event.get('variation'), event['version']) counter_val = self.counters.get(counter_key) if counter_val is None: counter_val = { 'count': 1, 'value': event['value'], 'default': event.get('default') } From 17bfa5ab690ace0d1ca924351a657c3b8dc4c36d Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 19 Aug 2019 23:05:26 -0700 Subject: [PATCH 127/356] fix summary logic again for now-optional event properties --- ldclient/event_summarizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/event_summarizer.py b/ldclient/event_summarizer.py index 64956fdc..e046a347 100644 --- a/ldclient/event_summarizer.py +++ b/ldclient/event_summarizer.py @@ -20,7 +20,7 @@ def __init__(self): """ def summarize_event(self, event): if event['kind'] == 'feature': - counter_key = (event['key'], event.get('variation'), event['version']) + counter_key = (event['key'], event.get('variation'), event.get('version')) counter_val = self.counters.get(counter_key) if counter_val is None: counter_val = { 'count': 1, 'value': event['value'], 'default': event.get('default') } From a5da01068ecb320960f5b30f5493c1a299266082 Mon Sep 17 00:00:00 2001 From: Gabor Angeli Date: Fri, 25 Oct 2019 16:54:04 -0700 Subject: [PATCH 128/356] Allow explicitly proxying only ld requests (#130) --- ldclient/config.py | 10 +++++++++- ldclient/event_processor.py | 2 +- ldclient/feature_requester.py | 2 +- ldclient/sse_client.py | 5 +++-- ldclient/streaming.py | 3 ++- ldclient/util.py | 37 ++++++++++++++++++++++++++++------- 6 files changed, 46 insertions(+), 13 deletions(-) diff --git a/ldclient/config.py b/ldclient/config.py index b0283d95..0e4ab391 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -43,7 +43,8 @@ def __init__(self, offline=False, user_keys_capacity=1000, user_keys_flush_interval=300, - inline_users_in_events=False): + inline_users_in_events=False, + http_proxy=None): """ :param string sdk_key: The SDK key for your LaunchDarkly account. :param string base_uri: The base URL for the LaunchDarkly server. Most users should use the default @@ -95,6 +96,8 @@ def __init__(self, :type event_processor_class: (ldclient.config.Config) -> EventProcessor :param update_processor_class: A factory for an UpdateProcessor implementation taking the sdk key, config, and FeatureStore implementation + :param http_proxy: Use a proxy when connecting to LaunchDarkly. This is the full URI of the + proxy; for example: http://my-proxy.com:1234. """ self.__sdk_key = sdk_key @@ -126,6 +129,7 @@ def __init__(self, self.__user_keys_capacity = user_keys_capacity self.__user_keys_flush_interval = user_keys_flush_interval self.__inline_users_in_events = inline_users_in_events + self.__http_proxy = http_proxy @classmethod def default(cls): @@ -278,6 +282,10 @@ def user_keys_flush_interval(self): def inline_users_in_events(self): return self.__inline_users_in_events + @property + def http_proxy(self): + return self.__http_proxy + def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key is '': log.warning("Missing or blank sdk_key.") diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index f66e0e57..5d0e429d 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -211,7 +211,7 @@ class EventDispatcher(object): def __init__(self, inbox, config, http_client): self._inbox = inbox self._config = config - self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl) if http_client is None else http_client + self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl, proxy_url=config.http_proxy) if http_client is None else http_client self._close_http = (http_client is None) # so we know whether to close it later self._disabled = False self._outbox = EventBuffer(config.events_max_pending) diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 51aee6a0..75dc373d 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -25,7 +25,7 @@ class FeatureRequesterImpl(FeatureRequester): def __init__(self, config): self._cache = dict() - self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl) + self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl, proxy_url=config.http_proxy) self._config = config def get_all_data(self): diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index fcd255a3..2603f31f 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -23,7 +23,7 @@ class SSEClient(object): def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeout=300, chunk_size=10000, - verify_ssl=False, http=None, **kwargs): + verify_ssl=False, http=None, http_proxy=None, **kwargs): self.url = url self.last_id = last_id self.retry = retry @@ -32,7 +32,8 @@ def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeo self._chunk_size = chunk_size # Optional support for passing in an HTTP client - self.http = create_http_pool_manager(num_pools=1, verify_ssl=verify_ssl) + self.http = create_http_pool_manager(num_pools=1, verify_ssl=verify_ssl, + proxy_url=http_proxy) # Any extra kwargs will be fed into the request call later. self.requests_kwargs = kwargs diff --git a/ldclient/streaming.py b/ldclient/streaming.py index 43e815a4..b279da9a 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -89,7 +89,8 @@ def _connect(self): headers=_stream_headers(self._config.sdk_key), connect_timeout=self._config.connect_timeout, read_timeout=stream_read_timeout, - verify_ssl=self._config.verify_ssl) + verify_ssl=self._config.verify_ssl, + http_proxy=self._config.http_proxy) def stop(self): log.info("Stopping StreamingUpdateProcessor") diff --git a/ldclient/util.py b/ldclient/util.py index 229030b8..1b5de3d2 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -84,14 +84,37 @@ def status(self): return self._status -def create_http_pool_manager(num_pools=1, verify_ssl=False): +def create_http_pool_manager(num_pools=1, verify_ssl=False, proxy_url=None): + """ + Create an http pool + + :param num_pools: The number of connections in the pool. + :param verify_ssl: If true, force the connections to verify valid SSL. + :param proxy_url: If set, proxy connections through the proxy at this URL. + + :return: A connection pool that implements urllib3.PoolManager + """ if not verify_ssl: - return urllib3.PoolManager(num_pools=num_pools) - return urllib3.PoolManager( - num_pools=num_pools, - cert_reqs='CERT_REQUIRED', - ca_certs=certifi.where() - ) + # Case: create a manager that does not need to respect SSL + if proxy_url is not None: + return urllib3.ProxyManager(num_pools=num_pools, proxy_url=proxy_url) + else: + return urllib3.PoolManager(num_pools=num_pools) + else: + # Case: force the connection to respect SSL + if proxy_url is not None: + return urllib3.ProxyManager( + num_pools=num_pools, + cert_reqs='CERT_REQUIRED', + ca_certs=certifi.where(), + proxy_url=proxy_url + ) + else: + return urllib3.PoolManager( + num_pools=num_pools, + cert_reqs='CERT_REQUIRED', + ca_certs=certifi.where() + ) def throw_if_unsuccessful_response(resp): From 69f2233c0a7db19d4e6c4cb2946710a74c39c25f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Oct 2019 13:22:36 -0700 Subject: [PATCH 129/356] fix broken indirect/patch request, add tests for feature requestor --- ldclient/feature_requester.py | 4 +- testing/http_util.py | 94 ++++++++++++++++++++++ testing/test_feature_requester.py | 127 ++++++++++++++++++++++++++++++ 3 files changed, 223 insertions(+), 2 deletions(-) create mode 100644 testing/http_util.py create mode 100644 testing/test_feature_requester.py diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 51aee6a0..16a79981 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -36,7 +36,7 @@ def get_all_data(self): } def get_one(self, kind, key): - return self._do_request(kind.request_api_path + '/' + key, False) + return self._do_request(self._config.base_uri + kind.request_api_path + '/' + key, False) def _do_request(self, uri, allow_cache): hdrs = _headers(self._config.sdk_key) @@ -49,7 +49,7 @@ def _do_request(self, uri, allow_cache): timeout=urllib3.Timeout(connect=self._config.connect_timeout, read=self._config.read_timeout), retries=1) throw_if_unsuccessful_response(r) - if r.status == 304 and cache_entry is not None: + if r.status == 304 and allow_cache and cache_entry is not None: data = cache_entry.data etag = cache_entry.etag from_cache = True diff --git a/testing/http_util.py b/testing/http_util.py new file mode 100644 index 00000000..321f71cc --- /dev/null +++ b/testing/http_util.py @@ -0,0 +1,94 @@ +from http.server import BaseHTTPRequestHandler, HTTPServer +import json +from queue import Queue +from six import iteritems +import socket +from threading import Thread + +def get_available_port(): + s = socket.socket(socket.AF_INET, type = socket.SOCK_STREAM) + s.bind(('localhost', 0)) + _, port = s.getsockname() + s.close() + return port + +def start_server(): + sw = MockServerWrapper(get_available_port()) + sw.start() + return sw + +class MockServerWrapper(Thread): + def __init__(self, port): + Thread.__init__(self) + self.port = port + self.uri = 'http://localhost:%d' % port + self.server = HTTPServer(('localhost', port), MockServerRequestHandler) + self.server.server_wrapper = self + self.matchers = {} + self.requests = Queue() + + def close(self): + self.server.shutdown() + self.server.server_close() + + def run(self): + self.server.serve_forever() + + def setup_response(self, uri_path, status, body = None, headers = None): + self.matchers[uri_path] = MockServerResponse(status, body, headers) + + def setup_json_response(self, uri_path, data, headers = None): + final_headers = {} if headers is None else headers.copy() + final_headers['Content-Type'] = 'application/json' + return self.setup_response(uri_path, 200, json.dumps(data), headers) + + def await_request(self): + return self.requests.get() + + def require_request(self): + return self.requests.get(block=False) + + # enter/exit magic methods allow server to be auto-closed by "with" statement + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + +class MockServerRequestHandler(BaseHTTPRequestHandler): + # def __init__(self, server_wrapper, request, client_address, server): + # self.server_wrapper = server_wrapper + # BaseHTTPRequestHandler.__init__(self, request, client_address, server) + + def do_GET(self): + self._do_request('GET') + + def do_POST(self): + self._do_request('POST') + + def _do_request(self, method): + server_wrapper = self.server.server_wrapper + server_wrapper.requests.put(MockServerRequest(method, self.path, self.headers)) + if self.path in server_wrapper.matchers: + resp = server_wrapper.matchers[self.path] + self.send_response(resp.status) + if resp.headers is not None: + for key, value in iteritems(resp.headers): + self.send_header(key, value) + self.end_headers() + if resp.body is not None: + self.wfile.write(resp.body) + else: + self.send_error(404) + +class MockServerRequest(object): + def __init__(self, method, path, headers): + self.method = method + self.path = path + self.headers = headers + +class MockServerResponse(object): + def __init__(self, status, body, headers): + self.status = status + self.body = body + self.headers = headers diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py new file mode 100644 index 00000000..569f1ef9 --- /dev/null +++ b/testing/test_feature_requester.py @@ -0,0 +1,127 @@ +import pytest + +from ldclient.config import Config +from ldclient.feature_requester import FeatureRequesterImpl +from ldclient.util import UnsuccessfulResponseException +from ldclient.version import VERSION +from ldclient.versioned_data_kind import FEATURES, SEGMENTS +from testing.http_util import start_server + + +def test_get_all_data_returns_data(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri) + fr = FeatureRequesterImpl(config) + + flags = { 'flag1': { 'key': 'flag1' } } + segments = { 'segment1': { 'key': 'segment1' } } + resp_data = { 'flags': flags, 'segments': segments } + expected_data = { FEATURES: flags, SEGMENTS: segments } + server.setup_json_response('/sdk/latest-all', resp_data) + + result = fr.get_all_data() + assert result == expected_data + +def test_get_all_data_sends_headers(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri) + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + server.setup_json_response('/sdk/latest-all', resp_data) + + fr.get_all_data() + req = server.require_request() + assert req.headers['Authorization'] == 'sdk-key' + assert req.headers['User-Agent'] == 'PythonClient/' + VERSION + +def test_get_all_data_can_use_cached_data(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri) + fr = FeatureRequesterImpl(config) + + etag1 = 'my-etag-1' + etag2 = 'my-etag-2' + resp_data1 = { 'flags': {}, 'segments': {} } + resp_data2 = { 'flags': { 'flag1': { 'key': 'flag1' } }, 'segments': {} } + expected_data1 = { FEATURES: {}, SEGMENTS: {} } + expected_data2 = { FEATURES: { 'flag1': { 'key': 'flag1' } }, SEGMENTS: {} } + req_path = '/sdk/latest-all' + server.setup_json_response(req_path, resp_data1, { 'Etag': etag1 }) + + result = fr.get_all_data() + assert result == expected_data1 + req = server.require_request() + assert 'If-None-Match' not in req.headers.keys() + + server.setup_response(req_path, 304, None, { 'Etag': etag1 }) + + result = fr.get_all_data() + assert result == expected_data1 + req = server.require_request() + assert req.headers['If-None-Match'] == etag1 + + server.setup_json_response(req_path, resp_data2, { 'Etag': etag2 }) + + result = fr.get_all_data() + assert result == expected_data2 + req = server.require_request() + assert req.headers['If-None-Match'] == etag1 + + server.setup_response(req_path, 304, None, { 'Etag': etag2 }) + + result = fr.get_all_data() + assert result == expected_data2 + req = server.require_request() + assert req.headers['If-None-Match'] == etag2 + +def test_get_one_flag_returns_data(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri) + fr = FeatureRequesterImpl(config) + key = 'flag1' + flag_data = { 'key': key } + server.setup_json_response('/sdk/latest-flags/' + key, flag_data) + result = fr.get_one(FEATURES, key) + assert result == flag_data + +def test_get_one_flag_sends_headers(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri) + fr = FeatureRequesterImpl(config) + key = 'flag1' + flag_data = { 'key': key } + server.setup_json_response('/sdk/latest-flags/' + key, flag_data) + fr.get_one(FEATURES, key) + req = server.require_request() + assert req.headers['Authorization'] == 'sdk-key' + assert req.headers['User-Agent'] == 'PythonClient/' + VERSION + +def test_get_one_flag_throws_on_error(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri) + fr = FeatureRequesterImpl(config) + with pytest.raises(UnsuccessfulResponseException) as e: + fr.get_one(FEATURES, 'didnt-set-up-a-response-for-this-flag') + assert e.value.status == 404 + +def test_get_one_flag_does_not_use_etags(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri) + fr = FeatureRequesterImpl(config) + + etag = 'my-etag' + key = 'flag1' + flag_data = { 'key': key } + req_path = '/sdk/latest-flags/' + key + server.setup_json_response(req_path, flag_data, { 'Etag': etag }) + + result = fr.get_one(FEATURES, key) + assert result == flag_data + req = server.require_request() + assert 'If-None-Match' not in req.headers.keys() + + result = fr.get_one(FEATURES, key) + assert result == flag_data + req = server.require_request() + assert 'If-None-Match' not in req.headers.keys() # did not send etag from previous request From 0fa5e05d5cafaf810a2127ef4e4e0d94ba680781 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Oct 2019 13:37:13 -0700 Subject: [PATCH 130/356] Python 2/3 compatibility for HTTPServer --- testing/http_util.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testing/http_util.py b/testing/http_util.py index 321f71cc..a8c1fde7 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -1,7 +1,7 @@ -from http.server import BaseHTTPRequestHandler, HTTPServer import json from queue import Queue from six import iteritems +from six.moves import BaseHTTPServer import socket from threading import Thread @@ -22,7 +22,7 @@ def __init__(self, port): Thread.__init__(self) self.port = port self.uri = 'http://localhost:%d' % port - self.server = HTTPServer(('localhost', port), MockServerRequestHandler) + self.server = BaseHTTPServer.HTTPServer(('localhost', port), MockServerRequestHandler) self.server.server_wrapper = self self.matchers = {} self.requests = Queue() @@ -55,7 +55,7 @@ def __enter__(self): def __exit__(self, type, value, traceback): self.close() -class MockServerRequestHandler(BaseHTTPRequestHandler): +class MockServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): # def __init__(self, server_wrapper, request, client_address, server): # self.server_wrapper = server_wrapper # BaseHTTPRequestHandler.__init__(self, request, client_address, server) From e75ff0fc70bebddf40b975340935b9c39246fb5f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Oct 2019 13:48:39 -0700 Subject: [PATCH 131/356] Py2/3 compatibility: queue --- testing/http_util.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/testing/http_util.py b/testing/http_util.py index a8c1fde7..3a881f57 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -1,7 +1,6 @@ import json -from queue import Queue from six import iteritems -from six.moves import BaseHTTPServer +from six.moves import BaseHTTPServer, queue import socket from threading import Thread @@ -25,7 +24,7 @@ def __init__(self, port): self.server = BaseHTTPServer.HTTPServer(('localhost', port), MockServerRequestHandler) self.server.server_wrapper = self self.matchers = {} - self.requests = Queue() + self.requests = queue.Queue() def close(self): self.server.shutdown() From 68161a294d4266b4f7c74b2a9a243758c843d38b Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Oct 2019 13:58:55 -0700 Subject: [PATCH 132/356] more Py3 compatibility --- testing/http_util.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/testing/http_util.py b/testing/http_util.py index 3a881f57..4e54890c 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -1,3 +1,4 @@ +from builtins import bytes import json from six import iteritems from six.moves import BaseHTTPServer, queue @@ -76,7 +77,7 @@ def _do_request(self, method): self.send_header(key, value) self.end_headers() if resp.body is not None: - self.wfile.write(resp.body) + self.wfile.write(bytes(resp.body, 'utf-8')) else: self.send_error(404) From 74c9eed339ee640f86048cc9b979bc0714d1d001 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Oct 2019 15:12:54 -0700 Subject: [PATCH 133/356] don't need import of builtins --- testing/http_util.py | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/http_util.py b/testing/http_util.py index 4e54890c..992c5d01 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -1,4 +1,3 @@ -from builtins import bytes import json from six import iteritems from six.moves import BaseHTTPServer, queue From 032b04ca8f512920a43d2d7176e41eaf343e4a0f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Oct 2019 15:44:30 -0700 Subject: [PATCH 134/356] fix string encoding --- testing/http_util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/http_util.py b/testing/http_util.py index 992c5d01..009458bd 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -76,7 +76,7 @@ def _do_request(self, method): self.send_header(key, value) self.end_headers() if resp.body is not None: - self.wfile.write(bytes(resp.body, 'utf-8')) + self.wfile.write(resp.body.encode('UTF-8')) else: self.send_error(404) From 89ce3e2652ae9a4b99a37ccf6a3d882bf068aeb6 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Oct 2019 17:19:10 -0700 Subject: [PATCH 135/356] implement setting proxy URL by environment variable --- ldclient/event_processor.py | 3 +- ldclient/feature_requester.py | 2 +- ldclient/sse_client.py | 2 +- ldclient/streaming.py | 10 ++++ ldclient/util.py | 39 ++++++++++++--- testing/http_util.py | 14 +++--- testing/test_event_processor.py | 38 +++++++++++++++ testing/test_feature_requester.py | 38 +++++++++++++++ testing/test_streaming.py | 81 +++++++++++++++++++++++++++++++ 9 files changed, 211 insertions(+), 16 deletions(-) create mode 100644 testing/test_streaming.py diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 5ffd8517..1f9c5649 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -211,7 +211,8 @@ class EventDispatcher(object): def __init__(self, inbox, config, http_client): self._inbox = inbox self._config = config - self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl) if http_client is None else http_client + self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl, + target_base_uri=config.events_uri) if http_client is None else http_client self._close_http = (http_client is None) # so we know whether to close it later self._disabled = False self._outbox = EventBuffer(config.events_max_pending) diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 16a79981..e14ebfe5 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -25,7 +25,7 @@ class FeatureRequesterImpl(FeatureRequester): def __init__(self, config): self._cache = dict() - self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl) + self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl, target_base_uri=config.base_uri) self._config = config def get_all_data(self): diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index fcd255a3..4aeee9f2 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -32,7 +32,7 @@ def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeo self._chunk_size = chunk_size # Optional support for passing in an HTTP client - self.http = create_http_pool_manager(num_pools=1, verify_ssl=verify_ssl) + self.http = create_http_pool_manager(num_pools=1, verify_ssl=verify_ssl, target_base_uri=url) # Any extra kwargs will be fed into the request call later. self.requests_kwargs = kwargs diff --git a/ldclient/streaming.py b/ldclient/streaming.py index 43e815a4..d5a2375b 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -56,11 +56,14 @@ def run(self): for msg in messages: if not self._running: break + print('*** msg: %s' % msg.event) message_ok = self.process_message(self._store, self._requester, msg) if message_ok is True and self._ready.is_set() is False: + print('*** inited') log.info("StreamingUpdateProcessor initialized ok.") self._ready.set() except UnsuccessfulResponseException as e: + print('*** nope: %s' % e) log.error(http_error_message(e.status, "stream connection")) if not is_http_error_recoverable(e.status): self._ready.set() # if client is initializing, make it stop waiting; has no effect if already inited @@ -154,3 +157,10 @@ def _parse_path(path): if path.startswith(kind.stream_api_path): return ParsedPath(kind = kind, key = path[len(kind.stream_api_path):]) return None + + # magic methods for "with" statement (used in testing) + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.stop() diff --git a/ldclient/util.py b/ldclient/util.py index 229030b8..391a5ed6 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -5,6 +5,7 @@ import certifi import logging +from os import environ import six import sys import urllib3 @@ -84,15 +85,41 @@ def status(self): return self._status -def create_http_pool_manager(num_pools=1, verify_ssl=False): +def create_http_pool_manager(num_pools=1, verify_ssl=False, target_base_uri=None): + proxy_url = _get_proxy_url(target_base_uri) + if not verify_ssl: - return urllib3.PoolManager(num_pools=num_pools) - return urllib3.PoolManager( - num_pools=num_pools, - cert_reqs='CERT_REQUIRED', - ca_certs=certifi.where() + if proxy_url is None: + print("no proxy for %s" % target_base_uri) + return urllib3.PoolManager(num_pools=num_pools) + else: + print("the proxy is %s for %s" % (proxy_url, target_base_uri)) + return urllib3.ProxyManager(proxy_url, num_pools=num_pools) + + if proxy_url is None: + print("no proxy for %s" % target_base_uri) + return urllib3.PoolManager( + num_pools=num_pools, + cert_reqs='CERT_REQUIRED', + ca_certs=certifi.where() + ) + else: + print("the proxy is %s for %s" % (proxy_url, target_base_uri)) + return urllib3.ProxyManager( + proxy_url, + num_pools=num_pools, + cert_reqs='CERT_REQUIRED', + ca_certs=certifi.where() ) +def _get_proxy_url(target_base_uri): + if target_base_uri is None: + return None + is_https = target_base_uri.startswith('https:') + if is_https: + return environ.get('https_proxy') + return environ.get('http_proxy') + def throw_if_unsuccessful_response(resp): if resp.status >= 400: diff --git a/testing/http_util.py b/testing/http_util.py index 009458bd..333eeac6 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -55,19 +55,19 @@ def __exit__(self, type, value, traceback): self.close() class MockServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): - # def __init__(self, server_wrapper, request, client_address, server): - # self.server_wrapper = server_wrapper - # BaseHTTPRequestHandler.__init__(self, request, client_address, server) + def do_CONNECT(self): + self._do_request() def do_GET(self): - self._do_request('GET') + self._do_request() def do_POST(self): - self._do_request('POST') + self._do_request() - def _do_request(self, method): + def _do_request(self): + print('*** %s %s' % (self.command, self.path)) server_wrapper = self.server.server_wrapper - server_wrapper.requests.put(MockServerRequest(method, self.path, self.headers)) + server_wrapper.requests.put(MockServerRequest(self.command, self.path, self.headers)) if self.path in server_wrapper.matchers: resp = server_wrapper.matchers[self.path] self.send_response(resp.status) diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 4d24454b..44ed3609 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -6,6 +6,7 @@ from ldclient.config import Config from ldclient.event_processor import DefaultEventProcessor from ldclient.util import log +from testing.http_util import start_server from testing.stub_util import MockResponse, MockHttp @@ -467,6 +468,43 @@ def start_consuming_events(): assert message1.param == event1 assert had_no_more +def test_can_use_http_proxy_via_environment_var(monkeypatch): + fake_events_uri = 'http://not-real' + + with start_server() as server: + monkeypatch.setenv('http_proxy', server.uri) + config = Config(sdk_key = 'sdk-key', events_uri = fake_events_uri) + server.setup_response(fake_events_uri + '/bulk', 200, None) + + with DefaultEventProcessor(config) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the + # HTTP client, so we should be able to see the request go through. Note that the URI path will + # actually be an absolute URI for a proxy request. + req = server.require_request() + assert req.method == 'POST' + +def test_can_use_https_proxy_via_environment_var(monkeypatch): + fake_events_uri = 'https://not-real' + + with start_server() as server: + monkeypatch.setenv('https_proxy', server.uri) + config = Config(sdk_key = 'sdk-key', events_uri = fake_events_uri) + server.setup_response(fake_events_uri + '/bulk', 200, None) + + with DefaultEventProcessor(config) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but + # it can still record that it *got* the request, which proves that the request went to the proxy. + req = server.require_request() + assert req.method == 'CONNECT' + def verify_unrecoverable_http_error(status): with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: mock_http.set_response_status(status) diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py index 569f1ef9..45239567 100644 --- a/testing/test_feature_requester.py +++ b/testing/test_feature_requester.py @@ -125,3 +125,41 @@ def test_get_one_flag_does_not_use_etags(): assert result == flag_data req = server.require_request() assert 'If-None-Match' not in req.headers.keys() # did not send etag from previous request + +def test_can_use_http_proxy_via_environment_var(monkeypatch): + fake_base_uri = 'http://not-real' + with start_server() as server: + monkeypatch.setenv('http_proxy', server.uri) + config = Config(sdk_key = 'sdk-key', base_uri = fake_base_uri) + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + expected_data = { FEATURES: {}, SEGMENTS: {} } + server.setup_json_response(fake_base_uri + '/sdk/latest-all', resp_data) + + # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the + # HTTP client, so we should be able to see the request go through. Note that the URI path will + # actually be an absolute URI for a proxy request. + result = fr.get_all_data() + assert result == expected_data + req = server.require_request() + assert req.method == 'GET' + +def test_can_use_https_proxy_via_environment_var(monkeypatch): + fake_base_uri = 'https://not-real' + with start_server() as server: + monkeypatch.setenv('https_proxy', server.uri) + config = Config(sdk_key = 'sdk-key', base_uri = fake_base_uri) + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + server.setup_json_response(fake_base_uri + '/sdk/latest-all', resp_data) + + # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but + # it can still record that it *got* the request, which proves that the request went to the proxy. + try: + fr.get_all_data() + except: + pass + req = server.require_request() + assert req.method == 'CONNECT' diff --git a/testing/test_streaming.py b/testing/test_streaming.py new file mode 100644 index 00000000..ba2899c0 --- /dev/null +++ b/testing/test_streaming.py @@ -0,0 +1,81 @@ +from threading import Event + +from ldclient.config import Config +from ldclient.feature_store import InMemoryFeatureStore +from ldclient.streaming import StreamingUpdateProcessor +from ldclient.version import VERSION +from testing.http_util import start_server + + +fake_event = 'event:put\ndata: {"data":{"flags":{},"segments":{}}}\n\n' + +# Note that our simple HTTP stub server implementation does not actually do streaming responses, so +# in these tests the connection will get closed after the response, causing the streaming processor +# to reconnect. For the purposes of the current tests, that's OK because we only care that the initial +# request and response were handled correctly. + +def test_uses_stream_uri(): + store = InMemoryFeatureStore() + ready = Event() + + with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) + server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + + with StreamingUpdateProcessor(config, None, store, ready) as sp: + sp.start() + req = server.await_request() + assert req.method == 'GET' + ready.wait(1) + assert sp.initialized() + +def test_sends_headers(): + store = InMemoryFeatureStore() + ready = Event() + + with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) + server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + + with StreamingUpdateProcessor(config, None, store, ready) as sp: + sp.start() + req = server.await_request() + assert req.headers['Authorization'] == 'sdk-key' + assert req.headers['User-Agent'] == 'PythonClient/' + VERSION + +def test_can_use_http_proxy_via_environment_var(monkeypatch): + store = InMemoryFeatureStore() + ready = Event() + fake_stream_uri = 'http://not-real' + + with start_server() as server: + monkeypatch.setenv('http_proxy', server.uri) + config = Config(sdk_key = 'sdk-key', stream_uri = fake_stream_uri) + server.setup_response(fake_stream_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + + with StreamingUpdateProcessor(config, None, store, ready) as sp: + sp.start() + # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the + # HTTP client, so we should be able to see the request go through. Note that the URI path will + # actually be an absolute URI for a proxy request. + req = server.await_request() + assert req.method == 'GET' + ready.wait(1) + assert sp.initialized() + +def test_can_use_https_proxy_via_environment_var(monkeypatch): + store = InMemoryFeatureStore() + ready = Event() + fake_stream_uri = 'https://not-real' + + with start_server() as server: + monkeypatch.setenv('https_proxy', server.uri) + config = Config(sdk_key = 'sdk-key', stream_uri = fake_stream_uri) + server.setup_response(fake_stream_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + + with StreamingUpdateProcessor(config, None, store, ready) as sp: + sp.start() + # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but + # it can still record that it *got* the request, which proves that the request went to the proxy. + req = server.await_request() + assert req.method == 'CONNECT' From ae764b55ae2c2089289a207a2eee4b9d1fb1181f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 28 Oct 2019 17:23:23 -0700 Subject: [PATCH 136/356] rm debugging --- ldclient/streaming.py | 3 --- ldclient/util.py | 4 ---- testing/http_util.py | 1 - 3 files changed, 8 deletions(-) diff --git a/ldclient/streaming.py b/ldclient/streaming.py index d5a2375b..75a56840 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -56,14 +56,11 @@ def run(self): for msg in messages: if not self._running: break - print('*** msg: %s' % msg.event) message_ok = self.process_message(self._store, self._requester, msg) if message_ok is True and self._ready.is_set() is False: - print('*** inited') log.info("StreamingUpdateProcessor initialized ok.") self._ready.set() except UnsuccessfulResponseException as e: - print('*** nope: %s' % e) log.error(http_error_message(e.status, "stream connection")) if not is_http_error_recoverable(e.status): self._ready.set() # if client is initializing, make it stop waiting; has no effect if already inited diff --git a/ldclient/util.py b/ldclient/util.py index 391a5ed6..98ad4357 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -90,21 +90,17 @@ def create_http_pool_manager(num_pools=1, verify_ssl=False, target_base_uri=None if not verify_ssl: if proxy_url is None: - print("no proxy for %s" % target_base_uri) return urllib3.PoolManager(num_pools=num_pools) else: - print("the proxy is %s for %s" % (proxy_url, target_base_uri)) return urllib3.ProxyManager(proxy_url, num_pools=num_pools) if proxy_url is None: - print("no proxy for %s" % target_base_uri) return urllib3.PoolManager( num_pools=num_pools, cert_reqs='CERT_REQUIRED', ca_certs=certifi.where() ) else: - print("the proxy is %s for %s" % (proxy_url, target_base_uri)) return urllib3.ProxyManager( proxy_url, num_pools=num_pools, diff --git a/testing/http_util.py b/testing/http_util.py index 333eeac6..a232f9e0 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -65,7 +65,6 @@ def do_POST(self): self._do_request() def _do_request(self): - print('*** %s %s' % (self.command, self.path)) server_wrapper = self.server.server_wrapper server_wrapper.requests.put(MockServerRequest(self.command, self.path, self.headers)) if self.path in server_wrapper.matchers: From 28ee4b580436fd4ebccdeded65167809308dccbe Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 29 Oct 2019 16:45:31 -0700 Subject: [PATCH 137/356] fix autodoc options to exclude magic methods --- docs/api-main.rst | 9 +-------- docs/conf.py | 6 ++---- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/docs/api-main.rst b/docs/api-main.rst index 56417ea5..1a5af4a1 100644 --- a/docs/api-main.rst +++ b/docs/api-main.rst @@ -6,35 +6,28 @@ ldclient module .. automodule:: ldclient :members: get,set_config,set_sdk_key - :show-inheritance: ldclient.client module ---------------------- .. automodule:: ldclient.client :members: LDClient - :special-members: __init__ - :show-inheritance: ldclient.config module ---------------------- .. automodule:: ldclient.config :members: - :special-members: __init__ - :show-inheritance: ldclient.flag module -------------------- .. automodule:: ldclient.flag :members: EvaluationDetail - :special-members: __init__ - :show-inheritance: ldclient.flags_state module --------------------------- .. automodule:: ldclient.flags_state :members: - :show-inheritance: + :exclude-members: __init__, add_flag diff --git a/docs/conf.py b/docs/conf.py index 9e3db965..b93d3c36 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -167,8 +167,6 @@ # -- Extension configuration ------------------------------------------------- autodoc_default_options = { - 'members': None, - 'show-inheritance': None, - 'special-members': None, - 'undoc-members': None + 'special-members': '__init__', + 'undoc-members': False } From 4fc6ce797fc6c975515a85ad1733060a9698e3b7 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 30 Oct 2019 17:12:06 -0700 Subject: [PATCH 138/356] comment --- ldclient/config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ldclient/config.py b/ldclient/config.py index 23cc77b5..44da64f0 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -99,7 +99,8 @@ def __init__(self, :param http_proxy: Use a proxy when connecting to LaunchDarkly. This is the full URI of the proxy; for example: http://my-proxy.com:1234. Note that unlike the standard `http_proxy` environment variable, this is used regardless of whether the target URI is HTTP or HTTPS (the actual LaunchDarkly - service uses HTTPS, but a Relay Proxy instance could use HTTP). + service uses HTTPS, but a Relay Proxy instance could use HTTP). Setting this Config parameter will + override any proxy specified by an environment variable, but only for LaunchDarkly SDK connections. """ self.__sdk_key = sdk_key From 00432bede3b70d75f2205ce5a4368c390fd9cbed Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Oct 2019 11:23:47 -0700 Subject: [PATCH 139/356] add end-to-end unit tests for proxy config --- testing/test_event_processor.py | 50 ++++++++++++--------- testing/test_feature_requester.py | 72 ++++++++++++++++++------------- testing/test_streaming.py | 66 ++++++++++++++++------------ 3 files changed, 111 insertions(+), 77 deletions(-) diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 44ed3609..550c0789 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -469,33 +469,43 @@ def start_consuming_events(): assert had_no_more def test_can_use_http_proxy_via_environment_var(monkeypatch): - fake_events_uri = 'http://not-real' - with start_server() as server: monkeypatch.setenv('http_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', events_uri = fake_events_uri) - server.setup_response(fake_events_uri + '/bulk', 200, None) - - with DefaultEventProcessor(config) as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) - ep.flush() - ep._wait_until_inactive() - - # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the - # HTTP client, so we should be able to see the request go through. Note that the URI path will - # actually be an absolute URI for a proxy request. - req = server.require_request() - assert req.method == 'POST' + config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real') + _verify_http_proxy_is_used(server, config) def test_can_use_https_proxy_via_environment_var(monkeypatch): - fake_events_uri = 'https://not-real' - with start_server() as server: monkeypatch.setenv('https_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', events_uri = fake_events_uri) - server.setup_response(fake_events_uri + '/bulk', 200, None) + config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real') + _verify_https_proxy_is_used(server, config) + +def test_can_use_http_proxy_via_config(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real', http_proxy=server.uri) + _verify_http_proxy_is_used(server, config) + +def test_can_use_https_proxy_via_config(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real', http_proxy=server.uri) + _verify_https_proxy_is_used(server, config) + +def _verify_http_proxy_is_used(server, config): + server.setup_response(config.events_uri + '/bulk', 200, None) + with DefaultEventProcessor(config) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the + # HTTP client, so we should be able to see the request go through. Note that the URI path will + # actually be an absolute URI for a proxy request. + req = server.require_request() + assert req.method == 'POST' - with DefaultEventProcessor(config) as ep: +def _verify_https_proxy_is_used(server, config): + server.setup_response(config.events_uri + '/bulk', 200, None) + with DefaultEventProcessor(config) as ep: ep.send_event({ 'kind': 'identify', 'user': user }) ep.flush() ep._wait_until_inactive() diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py index 45239567..658c8157 100644 --- a/testing/test_feature_requester.py +++ b/testing/test_feature_requester.py @@ -127,39 +127,53 @@ def test_get_one_flag_does_not_use_etags(): assert 'If-None-Match' not in req.headers.keys() # did not send etag from previous request def test_can_use_http_proxy_via_environment_var(monkeypatch): - fake_base_uri = 'http://not-real' with start_server() as server: monkeypatch.setenv('http_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', base_uri = fake_base_uri) - fr = FeatureRequesterImpl(config) - - resp_data = { 'flags': {}, 'segments': {} } - expected_data = { FEATURES: {}, SEGMENTS: {} } - server.setup_json_response(fake_base_uri + '/sdk/latest-all', resp_data) - - # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the - # HTTP client, so we should be able to see the request go through. Note that the URI path will - # actually be an absolute URI for a proxy request. - result = fr.get_all_data() - assert result == expected_data - req = server.require_request() - assert req.method == 'GET' + config = Config(sdk_key = 'sdk-key', base_uri = 'http://not-real') + _verify_http_proxy_is_used(server, config) def test_can_use_https_proxy_via_environment_var(monkeypatch): - fake_base_uri = 'https://not-real' with start_server() as server: monkeypatch.setenv('https_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', base_uri = fake_base_uri) - fr = FeatureRequesterImpl(config) + config = Config(sdk_key = 'sdk-key', base_uri = 'https://not-real') + _verify_https_proxy_is_used(server, config) - resp_data = { 'flags': {}, 'segments': {} } - server.setup_json_response(fake_base_uri + '/sdk/latest-all', resp_data) - - # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but - # it can still record that it *got* the request, which proves that the request went to the proxy. - try: - fr.get_all_data() - except: - pass - req = server.require_request() - assert req.method == 'CONNECT' +def test_can_use_http_proxy_via_config(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = 'http://not-real', http_proxy = server.uri) + _verify_http_proxy_is_used(server, config) + +def test_can_use_https_proxy_via_config(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = 'https://not-real', http_proxy = server.uri) + _verify_https_proxy_is_used(server, config) + +def _verify_http_proxy_is_used(server, config): + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + expected_data = { FEATURES: {}, SEGMENTS: {} } + server.setup_json_response(config.base_uri + '/sdk/latest-all', resp_data) + + # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the + # HTTP client, so we should be able to see the request go through. Note that the URI path will + # actually be an absolute URI for a proxy request. + result = fr.get_all_data() + assert result == expected_data + req = server.require_request() + assert req.method == 'GET' + +def _verify_https_proxy_is_used(server, config): + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + server.setup_json_response(config.base_uri + '/sdk/latest-all', resp_data) + + # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but + # it can still record that it *got* the request, which proves that the request went to the proxy. + try: + fr.get_all_data() + except: + pass + req = server.require_request() + assert req.method == 'CONNECT' \ No newline at end of file diff --git a/testing/test_streaming.py b/testing/test_streaming.py index ba2899c0..65ba0542 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -44,38 +44,48 @@ def test_sends_headers(): assert req.headers['User-Agent'] == 'PythonClient/' + VERSION def test_can_use_http_proxy_via_environment_var(monkeypatch): - store = InMemoryFeatureStore() - ready = Event() - fake_stream_uri = 'http://not-real' - with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = 'http://not-real') monkeypatch.setenv('http_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', stream_uri = fake_stream_uri) - server.setup_response(fake_stream_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) - - with StreamingUpdateProcessor(config, None, store, ready) as sp: - sp.start() - # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the - # HTTP client, so we should be able to see the request go through. Note that the URI path will - # actually be an absolute URI for a proxy request. - req = server.await_request() - assert req.method == 'GET' - ready.wait(1) - assert sp.initialized() + _verify_http_proxy_is_used(server, config) def test_can_use_https_proxy_via_environment_var(monkeypatch): - store = InMemoryFeatureStore() - ready = Event() - fake_stream_uri = 'https://not-real' - with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = 'https://not-real') monkeypatch.setenv('https_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', stream_uri = fake_stream_uri) - server.setup_response(fake_stream_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + _verify_https_proxy_is_used(server, config) - with StreamingUpdateProcessor(config, None, store, ready) as sp: - sp.start() - # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but - # it can still record that it *got* the request, which proves that the request went to the proxy. - req = server.await_request() - assert req.method == 'CONNECT' +def test_can_use_http_proxy_via_config(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = 'http://not-real', http_proxy=server.uri) + _verify_http_proxy_is_used(server, config) + +def test_can_use_https_proxy_via_config(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = 'https://not-real', http_proxy=server.uri) + _verify_https_proxy_is_used(server, config) + +def _verify_http_proxy_is_used(server, config): + store = InMemoryFeatureStore() + ready = Event() + server.setup_response(config.stream_base_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + with StreamingUpdateProcessor(config, None, store, ready) as sp: + sp.start() + # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the + # HTTP client, so we should be able to see the request go through. Note that the URI path will + # actually be an absolute URI for a proxy request. + req = server.await_request() + assert req.method == 'GET' + ready.wait(1) + assert sp.initialized() + +def _verify_https_proxy_is_used(server, config): + store = InMemoryFeatureStore() + ready = Event() + server.setup_response(config.stream_base_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + with StreamingUpdateProcessor(config, None, store, ready) as sp: + sp.start() + # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but + # it can still record that it *got* the request, which proves that the request went to the proxy. + req = server.await_request() + assert req.method == 'CONNECT' From 5911fd9afb63fc7774f65928cdd83524dab59a54 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 31 Oct 2019 12:05:31 -0700 Subject: [PATCH 140/356] indents --- testing/test_event_processor.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 550c0789..75093a3d 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -506,14 +506,14 @@ def _verify_http_proxy_is_used(server, config): def _verify_https_proxy_is_used(server, config): server.setup_response(config.events_uri + '/bulk', 200, None) with DefaultEventProcessor(config) as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) - ep.flush() - ep._wait_until_inactive() - - # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but - # it can still record that it *got* the request, which proves that the request went to the proxy. - req = server.require_request() - assert req.method == 'CONNECT' + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but + # it can still record that it *got* the request, which proves that the request went to the proxy. + req = server.require_request() + assert req.method == 'CONNECT' def verify_unrecoverable_http_error(status): with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: From 63125f56d1b21638d80dedbaf016c8579c178428 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 17:11:22 -0800 Subject: [PATCH 141/356] add 3.8 build --- .circleci/config.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 46e2166e..6cfbc616 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,6 +9,7 @@ workflows: - test-3.5 - test-3.6 - test-3.7 + - test-3.8 test-template: &test-template steps: - checkout @@ -92,3 +93,10 @@ jobs: - image: redis - image: amazon/dynamodb-local - image: consul + test-3.8: + <<: *test-template + docker: + - image: circleci/python:3.8-stretch + - image: redis + - image: amazon/dynamodb-local + - image: consul From 3c68cd20e1cffc41929657b6d7c12237f3bb68ee Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 17:13:12 -0800 Subject: [PATCH 142/356] image name --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6cfbc616..feb6d110 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -96,7 +96,7 @@ jobs: test-3.8: <<: *test-template docker: - - image: circleci/python:3.8-stretch + - image: circleci/python:3.8-buster - image: redis - image: amazon/dynamodb-local - image: consul From 9b1adf32780d5b2695278ffef975bea481ae7936 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 17:25:56 -0800 Subject: [PATCH 143/356] fail on SyntaxWarning --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index feb6d110..1523d759 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,10 +31,10 @@ test-template: &test-template command: | mkdir test-reports; if [[ "$CIRCLE_JOB" == "test-2.7" ]]; then - pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing; + python -W error:SyntaxWarning -m pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing; sh -c '[ -n "${CODECLIMATE_REPO_TOKEN+1}" ] && codeclimate-test-reporter || echo "No CODECLIMATE_REPO_TOKEN value is set; not publishing coverage report"'; else - pytest -s --junitxml=test-reports/junit.xml testing; + pytest -W error:SyntaxWarning -m pytest -s --junitxml=test-reports/junit.xml testing; fi - run: name: test packaging/install From 6a954e344d3ceed2bd057175e35f314d1f283792 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 17:29:37 -0800 Subject: [PATCH 144/356] typo --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1523d759..861f05a8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,10 +31,10 @@ test-template: &test-template command: | mkdir test-reports; if [[ "$CIRCLE_JOB" == "test-2.7" ]]; then - python -W error:SyntaxWarning -m pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing; + python -W error::SyntaxWarning -m pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing; sh -c '[ -n "${CODECLIMATE_REPO_TOKEN+1}" ] && codeclimate-test-reporter || echo "No CODECLIMATE_REPO_TOKEN value is set; not publishing coverage report"'; else - pytest -W error:SyntaxWarning -m pytest -s --junitxml=test-reports/junit.xml testing; + pytest -W error::SyntaxWarning -m pytest -s --junitxml=test-reports/junit.xml testing; fi - run: name: test packaging/install From d6bf44c6308b9cbe88ac4c8f3bdd74931a9f3f11 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 17:33:54 -0800 Subject: [PATCH 145/356] command syntax --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 861f05a8..2aa451e9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -31,10 +31,10 @@ test-template: &test-template command: | mkdir test-reports; if [[ "$CIRCLE_JOB" == "test-2.7" ]]; then - python -W error::SyntaxWarning -m pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing; + pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing -W error::SyntaxWarning; sh -c '[ -n "${CODECLIMATE_REPO_TOKEN+1}" ] && codeclimate-test-reporter || echo "No CODECLIMATE_REPO_TOKEN value is set; not publishing coverage report"'; else - pytest -W error::SyntaxWarning -m pytest -s --junitxml=test-reports/junit.xml testing; + pytest -s --junitxml=test-reports/junit.xml testing -W error::SyntaxWarning; fi - run: name: test packaging/install From 7b3177fb8961dc61ca4a0336997f7ec1e0eca538 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 18:56:37 -0800 Subject: [PATCH 146/356] pin expiringdict dependency for Python 3.3 compatibility --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 2e3cba6f..f941d6ab 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ backoff>=1.4.3 certifi>=2018.4.16 -expiringdict>=1.1.4 +expiringdict>=1.1.4,<1.2.0 six>=1.10.0 pyRFC3339>=1.0 semver>=2.7.9 From 9942d77357bf557430e00875a8c32b7b3be72a4c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:06:15 -0800 Subject: [PATCH 147/356] add Windows CircleCI job --- .circleci/config.yml | 64 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 2aa451e9..68a6122b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,9 @@ -version: 2 +version: 2.1 + +orbs: + win: circleci/windows@1.0.0 + workflows: - version: 2 test: jobs: - test-2.7 @@ -10,6 +13,8 @@ workflows: - test-3.6 - test-3.7 - test-3.8 + - test-windows + test-template: &test-template steps: - checkout @@ -100,3 +105,58 @@ jobs: - image: redis - image: amazon/dynamodb-local - image: consul + + test-windows: + executor: + name: win/vs2019 + shell: powershell.exe + steps: + - checkout + - run: + name: set up DynamoDB + command: | + $ProgressPreference = "SilentlyContinue" # prevents console errors from CircleCI host + iwr -outf dynamo.zip https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip + mkdir dynamo + Expand-Archive -Path dynamo.zip -DestinationPath dynamo + cd dynamo + javaw -D"java.library.path=./DynamoDBLocal_lib" -jar DynamoDBLocal.jar + background: true + - run: + name: set up Consul + command: | + $ProgressPreference = "SilentlyContinue" + iwr -outf consul.zip https://releases.hashicorp.com/consul/1.4.2/consul_1.4.2_windows_amd64.zip + mkdir consul + Expand-Archive -Path consul.zip -DestinationPath consul + cd consul + sc.exe create "Consul" binPath="$(System.DefaultWorkingDirectory)/consul/consul.exe agent -dev" + sc.exe start "Consul" + - run: + name: start Redis + command: | + $ProgressPreference = "SilentlyContinue" + iwr -outf redis.zip https://github.com/MicrosoftArchive/redis/releases/download/win-3.0.504/Redis-x64-3.0.504.zip + mkdir redis + Expand-Archive -Path redis.zip -DestinationPath redis + cd redis + ./redis-server --service-install + ./redis-server --service-start + Start-Sleep -s 5 + ./redis-cli ping + - run: python --version + - run: + name: install requirements + command: | + pip install -r test-requirements.txt + pip install -r consul-requirements.txt + python setup.py install + - run: + name: run tests + command: | + mkdir test-reports + python -m pytest -s --junitxml=test-reports/junit.xml testing; + - store_test_results: + path: test-reports + - store_artifacts: + path: test-reports From 38f3f433036d9d43cb4fab6a28ae7735951c68eb Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:09:14 -0800 Subject: [PATCH 148/356] periods are no longer valid in CircleCI job names --- .circleci/config.yml | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 68a6122b..4a46a82a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,13 +6,13 @@ orbs: workflows: test: jobs: - - test-2.7 - - test-3.3 - - test-3.4 - - test-3.5 - - test-3.6 - - test-3.7 - - test-3.8 + - test-2-7 + - test-3-3 + - test-3-4 + - test-3-5 + - test-3-6 + - test-3-7 + - test-3-8 - test-windows test-template: &test-template @@ -56,49 +56,49 @@ test-template: &test-template path: test-reports jobs: - test-2.7: + test-2-7: <<: *test-template docker: - image: circleci/python:2.7-jessie - image: redis - image: amazon/dynamodb-local - image: consul - test-3.3: + test-3-3: <<: *test-template docker: - image: circleci/python:3.3-jessie - image: redis - image: amazon/dynamodb-local # python-consul doesn't support Python 3.3 - test-3.4: + test-3-4: <<: *test-template docker: - image: circleci/python:3.4-jessie - image: redis - image: amazon/dynamodb-local # python-consul doesn't support Python 3.4 - test-3.5: + test-3-5: <<: *test-template docker: - image: circleci/python:3.5-jessie - image: redis - image: amazon/dynamodb-local - image: consul - test-3.6: + test-3-6: <<: *test-template docker: - image: circleci/python:3.6-jessie - image: redis - image: amazon/dynamodb-local - image: consul - test-3.7: + test-3-7: <<: *test-template docker: - image: circleci/python:3.7-stretch - image: redis - image: amazon/dynamodb-local - image: consul - test-3.8: + test-3-8: <<: *test-template docker: - image: circleci/python:3.8-buster From c969db23ed1ddc8f559b2991098ba19119a3c646 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:11:13 -0800 Subject: [PATCH 149/356] syntax fix --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4a46a82a..8861dfa1 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -130,7 +130,7 @@ jobs: mkdir consul Expand-Archive -Path consul.zip -DestinationPath consul cd consul - sc.exe create "Consul" binPath="$(System.DefaultWorkingDirectory)/consul/consul.exe agent -dev" + sc.exe create "Consul" binPath="$(Get-Location)/consul.exe agent -dev" sc.exe start "Consul" - run: name: start Redis From bc31ec9b1bc517b5468e8d531c2c16ece2ea0940 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:17:46 -0800 Subject: [PATCH 150/356] install Python in Windows --- .circleci/config.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8861dfa1..f0f80d23 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -113,9 +113,15 @@ jobs: steps: - checkout - run: - name: set up DynamoDB + name: install Python 2.7 command: | $ProgressPreference = "SilentlyContinue" # prevents console errors from CircleCI host + iwr -outf python-2.7.16.amd64.msi https://www.python.org/ftp/python/2.7.16/python-2.7.16.amd64.msi + Start-Process msiexec.exe -Wait -ArgumentList '/I python-2.7.16.amd64.msi /quiet' + - run: + name: set up DynamoDB + command: | + $ProgressPreference = "SilentlyContinue" iwr -outf dynamo.zip https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip mkdir dynamo Expand-Archive -Path dynamo.zip -DestinationPath dynamo From 64486a3ebbcc3e919e90e0f92aa758bc3b64cc59 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:26:35 -0800 Subject: [PATCH 151/356] set path --- .circleci/config.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index f0f80d23..fc3eae3e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -154,6 +154,7 @@ jobs: - run: name: install requirements command: | + $env:Path += ";C:\Python27\;C:\Python27\Scripts\" pip install -r test-requirements.txt pip install -r consul-requirements.txt python setup.py install @@ -161,6 +162,7 @@ jobs: name: run tests command: | mkdir test-reports + $env:Path += ";C:\Python27\;C:\Python27\Scripts\" python -m pytest -s --junitxml=test-reports/junit.xml testing; - store_test_results: path: test-reports From 37509ffb49003698eff367d6b011a6d20a881bf9 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:28:50 -0800 Subject: [PATCH 152/356] move command --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index fc3eae3e..b8f88903 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -150,11 +150,11 @@ jobs: ./redis-server --service-start Start-Sleep -s 5 ./redis-cli ping - - run: python --version - run: name: install requirements command: | $env:Path += ";C:\Python27\;C:\Python27\Scripts\" + python --version pip install -r test-requirements.txt pip install -r consul-requirements.txt python setup.py install From 3b41766dc4c4d2aa313567d08265edcf45fccea7 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:32:07 -0800 Subject: [PATCH 153/356] turn off debug logging --- testing/test_init.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/test_init.py b/testing/test_init.py index 16d67b6c..2819bbcc 100644 --- a/testing/test_init.py +++ b/testing/test_init.py @@ -3,7 +3,7 @@ import ldclient from ldclient import Config -logging.basicConfig(level=logging.DEBUG) +logging.basicConfig(level=logging.WARN) mylogger = logging.getLogger() From ef680582a4033ce685c6f8cc760d88eb29c09969 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:37:11 -0800 Subject: [PATCH 154/356] Py3 in Windows --- .circleci/config.yml | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b8f88903..788aa99d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -13,7 +13,12 @@ workflows: - test-3-6 - test-3-7 - test-3-8 - - test-windows + - test-windows: + name: Windows - Py2.7 + py3: false + - test-windows: + name: Windows - Py3 + py3: true test-template: &test-template steps: @@ -112,12 +117,24 @@ jobs: shell: powershell.exe steps: - checkout + - when: + condition: <> + steps: + - run: + name: install Python 3 + command: choco install python + - unless: + condition: <> + steps: + - run: + name: install Python 2.7 + command: | + $ProgressPreference = "SilentlyContinue" # prevents console errors from CircleCI host + iwr -outf python-2.7.16.amd64.msi https://www.python.org/ftp/python/2.7.16/python-2.7.16.amd64.msi + Start-Process msiexec.exe -Wait -ArgumentList '/I python-2.7.16.amd64.msi /quiet' - run: - name: install Python 2.7 - command: | - $ProgressPreference = "SilentlyContinue" # prevents console errors from CircleCI host - iwr -outf python-2.7.16.amd64.msi https://www.python.org/ftp/python/2.7.16/python-2.7.16.amd64.msi - Start-Process msiexec.exe -Wait -ArgumentList '/I python-2.7.16.amd64.msi /quiet' + name: install Python 3 + command: choco install python - run: name: set up DynamoDB command: | @@ -153,16 +170,16 @@ jobs: - run: name: install requirements command: | - $env:Path += ";C:\Python27\;C:\Python27\Scripts\" + $env:Path += ";C:\Python27\;C:\Python27\Scripts\" # has no effect if 2.7 isn't installed python --version pip install -r test-requirements.txt pip install -r consul-requirements.txt python setup.py install - run: - name: run tests + name: run tests (2.7) command: | mkdir test-reports - $env:Path += ";C:\Python27\;C:\Python27\Scripts\" + $env:Path += ";C:\Python27\;C:\Python27\Scripts\" # has no effect if 2.7 isn't installed python -m pytest -s --junitxml=test-reports/junit.xml testing; - store_test_results: path: test-reports From 0c93df7f14b9b782978facaad54d35eb1360db8b Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:37:42 -0800 Subject: [PATCH 155/356] config param --- .circleci/config.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 788aa99d..e2a87c38 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -115,6 +115,9 @@ jobs: executor: name: win/vs2019 shell: powershell.exe + parameters: + py3: + type: boolean steps: - checkout - when: From 86d27a87691a28f783be69cf99c2530e61e74d18 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:40:16 -0800 Subject: [PATCH 156/356] rm redundant step --- .circleci/config.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e2a87c38..fe3f9c01 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -135,9 +135,6 @@ jobs: $ProgressPreference = "SilentlyContinue" # prevents console errors from CircleCI host iwr -outf python-2.7.16.amd64.msi https://www.python.org/ftp/python/2.7.16/python-2.7.16.amd64.msi Start-Process msiexec.exe -Wait -ArgumentList '/I python-2.7.16.amd64.msi /quiet' - - run: - name: install Python 3 - command: choco install python - run: name: set up DynamoDB command: | From 001e1968189239577814b92ad8d18276e18dbf26 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 19:42:52 -0800 Subject: [PATCH 157/356] choco switch --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index fe3f9c01..e2c98ce5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -125,7 +125,7 @@ jobs: steps: - run: name: install Python 3 - command: choco install python + command: choco install python --no-progress - unless: condition: <> steps: From 23a42229a4f56552cca7d5a5b2dcaf2f288c4208 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 21:17:37 -0800 Subject: [PATCH 158/356] refactor Linux jobs using CircleCI 2.1 features --- .circleci/config.yml | 183 +++++++++++++++++++++---------------------- 1 file changed, 89 insertions(+), 94 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e2c98ce5..2920bc7e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,110 +6,105 @@ orbs: workflows: test: jobs: - - test-2-7 - - test-3-3 - - test-3-4 - - test-3-5 - - test-3-6 - - test-3-7 - - test-3-8 + - test-linux: + name: Python 2.7 + docker-image: circleci/python:2.7-jessie + test-with-codeclimate: true # we only need to run CodeClimate in one job + - test-linux: + name: Python 3.3 + docker-image: circleci/python:3.3-jessie + consul-supported: false # Consul isn't supported in 3.3 + filesource-supported: false # FileDataSource isn't supported in 3.3 + test-packaging: false # packaging test requires virtualenv, which isn't supported in 3.3 + - test-linux: + name: Python 3.4 + docker-image: circleci/python:3.4-jessie + consul-supported: false # Consul isn't supported in 3.4 + - test-linux: + name: Python 3.5 + docker-image: circleci/python:3.5-jessie + - test-linux: + name: Python 3.6 + docker-image: circleci/python:3.6-jessie + - test-linux: + name: Python 3.7 + docker-image: circleci/python:3.7-stretch + - test-linux: + name: Python 3.8 + docker-image: circleci/python:3.8-buster - test-windows: - name: Windows - Py2.7 + name: Windows Py2.7 py3: false - test-windows: - name: Windows - Py3 + name: Windows Py3.3 py3: true -test-template: &test-template - steps: - - checkout - - run: - name: install requirements - command: | - sudo pip install --upgrade pip virtualenv; - sudo pip install -r test-requirements.txt; - if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then - sudo pip install -r test-filesource-optional-requirements.txt; - fi; - if [[ "$CIRCLE_JOB" != "test-3.3" ]] && [[ "$CIRCLE_JOB" != "test-3.4" ]]; then - sudo pip install -r consul-requirements.txt; - fi; - sudo python setup.py install; - pip freeze - - run: - name: run tests - command: | - mkdir test-reports; - if [[ "$CIRCLE_JOB" == "test-2.7" ]]; then - pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing -W error::SyntaxWarning; - sh -c '[ -n "${CODECLIMATE_REPO_TOKEN+1}" ] && codeclimate-test-reporter || echo "No CODECLIMATE_REPO_TOKEN value is set; not publishing coverage report"'; - else - pytest -s --junitxml=test-reports/junit.xml testing -W error::SyntaxWarning; - fi - - run: - name: test packaging/install - # Note, virtualenv isn't supported on Python 3.3 and this test requires virtualenv. But we - # never build our published package on 3.3 anyway. - command: | - if [[ "$CIRCLE_JOB" != "test-3.3" ]]; then - sudo rm -rf dist *.egg-info; - ./test-packaging/test-packaging.sh; - fi - - store_test_results: - path: test-reports - - store_artifacts: - path: test-reports - jobs: - test-2-7: - <<: *test-template - docker: - - image: circleci/python:2.7-jessie - - image: redis - - image: amazon/dynamodb-local - - image: consul - test-3-3: - <<: *test-template - docker: - - image: circleci/python:3.3-jessie - - image: redis - - image: amazon/dynamodb-local - # python-consul doesn't support Python 3.3 - test-3-4: - <<: *test-template - docker: - - image: circleci/python:3.4-jessie - - image: redis - - image: amazon/dynamodb-local - # python-consul doesn't support Python 3.4 - test-3-5: - <<: *test-template - docker: - - image: circleci/python:3.5-jessie - - image: redis - - image: amazon/dynamodb-local - - image: consul - test-3-6: - <<: *test-template - docker: - - image: circleci/python:3.6-jessie - - image: redis - - image: amazon/dynamodb-local - - image: consul - test-3-7: - <<: *test-template - docker: - - image: circleci/python:3.7-stretch - - image: redis - - image: amazon/dynamodb-local - - image: consul - test-3-8: - <<: *test-template + test-linux: + parameters: + docker-image: + type: string + consul-supported: + type: boolean + default: true + filesource-supported: + type: boolean + default: true + test-packaging: + type: boolean + default: true + test-with-codeclimate: + type: boolean + default: false docker: - - image: circleci/python:3.8-buster + - image: <> - image: redis - image: amazon/dynamodb-local - image: consul + steps: + - checkout + - run: + name: install requirements + command: | + sudo pip install --upgrade pip virtualenv; + sudo pip install -r test-requirements.txt; + if [[ "<>" == "true" ]]; then + sudo pip install -r test-filesource-optional-requirements.txt; + fi; + if [[ "<>" == "true" ]]; then + sudo pip install -r consul-requirements.txt; + fi; + sudo python setup.py install; + pip freeze + - when: + condition: <> + steps: + - run: + name: run tests (with CodeClimate) + command: | + mkdir test-reports + pytest -s --cov=ldclient --junitxml=test-reports/junit.xml testing -W error::SyntaxWarning + sh -c '[ -n "${CODECLIMATE_REPO_TOKEN+1}" ] && codeclimate-test-reporter || echo "No CODECLIMATE_REPO_TOKEN value is set; not publishing coverage report"' + - unless: + condition: <> + steps: + - run: + name: run tests + command: | + mkdir test-reports + pytest -s --junitxml=test-reports/junit.xml testing -W error::SyntaxWarning + - when: + condition: <> + steps: + - run: + name: test packaging/install + command: | + sudo rm -rf dist *.egg-info + ./test-packaging/test-packaging.sh + - store_test_results: + path: test-reports + - store_artifacts: + path: test-reports test-windows: executor: From a5aaa99afb81aa4632bd5691fc23a27451d90341 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 21:21:42 -0800 Subject: [PATCH 159/356] set log level before anything else --- testing/__init__.py | 3 +++ testing/test_init.py | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/testing/__init__.py b/testing/__init__.py index d2b1b498..0602017d 100644 --- a/testing/__init__.py +++ b/testing/__init__.py @@ -1,3 +1,6 @@ +import logging import os +logging.basicConfig(level=logging.WARN) + sdk_key = os.environ.get('LD_SDK_KEY') diff --git a/testing/test_init.py b/testing/test_init.py index 2819bbcc..ca13c130 100644 --- a/testing/test_init.py +++ b/testing/test_init.py @@ -3,7 +3,6 @@ import ldclient from ldclient import Config -logging.basicConfig(level=logging.WARN) mylogger = logging.getLogger() From 9e403215690498e4068d7bf6ae0bab49b6660cac Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 20 Nov 2019 21:23:22 -0800 Subject: [PATCH 160/356] rm Azure config --- azure-pipelines.yml | 84 --------------------------------------------- 1 file changed, 84 deletions(-) delete mode 100644 azure-pipelines.yml diff --git a/azure-pipelines.yml b/azure-pipelines.yml deleted file mode 100644 index af1f3342..00000000 --- a/azure-pipelines.yml +++ /dev/null @@ -1,84 +0,0 @@ -jobs: - - job: build - pool: - vmImage: 'vs2017-win2016' - steps: - - task: PowerShell@2 - displayName: 'Setup Dynamo' - inputs: - targetType: inline - workingDirectory: $(System.DefaultWorkingDirectory) - script: | - iwr -outf dynamo.zip https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip - mkdir dynamo - Expand-Archive -Path dynamo.zip -DestinationPath dynamo - cd dynamo - javaw -D"java.library.path=./DynamoDBLocal_lib" -jar DynamoDBLocal.jar - - task: PowerShell@2 - displayName: 'Setup Consul' - inputs: - targetType: inline - workingDirectory: $(System.DefaultWorkingDirectory) - script: | - iwr -outf consul.zip https://releases.hashicorp.com/consul/1.4.2/consul_1.4.2_windows_amd64.zip - mkdir consul - Expand-Archive -Path consul.zip -DestinationPath consul - cd consul - sc.exe create "Consul" binPath="$(System.DefaultWorkingDirectory)/consul/consul.exe agent -dev" - sc.exe start "Consul" - - task: PowerShell@2 - displayName: 'Setup Redis' - inputs: - targetType: inline - workingDirectory: $(System.DefaultWorkingDirectory) - script: | - iwr -outf redis.zip https://github.com/MicrosoftArchive/redis/releases/download/win-3.0.504/Redis-x64-3.0.504.zip - mkdir redis - Expand-Archive -Path redis.zip -DestinationPath redis - cd redis - ./redis-server --service-install - ./redis-server --service-start - - task: UsePythonVersion@0 - inputs: - versionSpec: '2.7' - addToPath: true - - task: PowerShell@2 - displayName: 'Setup SDK and Test 2.7' - inputs: - targetType: inline - workingDirectory: $(System.DefaultWorkingDirectory) - script: | - python --version - pip install -r test-requirements.txt - pip install -r consul-requirements.txt - python setup.py install - mkdir test-reports27 - python -m pytest -s --junitxml=test-reports27/junit.xml testing; - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.7' - addToPath: true - - task: PowerShell@2 - displayName: 'Setup SDK and Test 3.7' - inputs: - targetType: inline - workingDirectory: $(System.DefaultWorkingDirectory) - script: | - python --version - pip install -r test-requirements.txt - pip install -r consul-requirements.txt - python setup.py install - mkdir test-reports37 - python -m pytest -s --junitxml=test-reports37/junit.xml testing; - - task: CopyFiles@2 - inputs: - targetFolder: $(Build.ArtifactStagingDirectory)/test-reports27 - sourceFolder: $(System.DefaultWorkingDirectory)/test-reports27 - - task: CopyFiles@2 - inputs: - targetFolder: $(Build.ArtifactStagingDirectory)/test-reports37 - sourceFolder: $(System.DefaultWorkingDirectory)/test-reports37 - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: '$(Build.ArtifactStagingDirectory)' - artifactName: reports From 669e7721a78cf574db7720a8a469fb62dc0e5600 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 9 Dec 2019 10:52:48 -0800 Subject: [PATCH 161/356] use yaml.safe_load() to avoid code execution vulnerability in file data source --- .../integrations/files/file_data_source.py | 2 +- testing/test_file_data_source.py | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/ldclient/impl/integrations/files/file_data_source.py b/ldclient/impl/integrations/files/file_data_source.py index 785a3851..9f9f3eaf 100644 --- a/ldclient/impl/integrations/files/file_data_source.py +++ b/ldclient/impl/integrations/files/file_data_source.py @@ -80,7 +80,7 @@ def _load_file(self, path, all_data): def _parse_content(self, content): if have_yaml: - return yaml.load(content) # pyyaml correctly parses JSON too + return yaml.safe_load(content) # pyyaml correctly parses JSON too return json.loads(content) def _add_item(self, all_data, kind, item): diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 78ab5359..7b13cf9b 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -246,3 +246,28 @@ def test_evaluates_simplified_flag_with_client_as_expected(): os.remove(path) if client is not None: client.close() + +unsafe_yaml_caused_method_to_be_called = False + +def arbitrary_method_called_from_yaml(x): + global unsafe_yaml_caused_method_to_be_called + unsafe_yaml_caused_method_to_be_called = True + +def test_does_not_allow_unsafe_yaml(): + if not have_yaml: + pytest.skip("skipping file source test with YAML because pyyaml isn't available") + + # This extended syntax defined by pyyaml allows arbitrary code execution. We should be using + # yaml.safe_load() which does not support such things. + unsafe_yaml = ''' +!!python/object/apply:testing.test_file_data_source.arbitrary_method_called_from_yaml ["hi"] +''' + path = make_temp_file(unsafe_yaml) + try: + factory = Files.new_data_source(paths = path) + client = LDClient(config=Config(update_processor_class = factory, send_events = False)) + finally: + os.remove(path) + if client is not None: + client.close() + assert unsafe_yaml_caused_method_to_be_called == False From 52238d1d44ebc661bd037004398718c4a8929780 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Mon, 16 Dec 2019 23:17:44 +0000 Subject: [PATCH 162/356] Initial work on wrapper_name, wrapper_version, diagnostic config options and start of diagnostic config event creation. --- ldclient/config.py | 41 +++++++++++++++++++++-- ldclient/diagnostics.py | 29 +++++++++++++++++ ldclient/event_processor.py | 2 +- ldclient/feature_requester.py | 2 +- ldclient/streaming.py | 2 +- ldclient/util.py | 32 +++++++++++------- testing/test_event_processor.py | 24 ++++++++++++++ testing/test_feature_requester.py | 54 ++++++++++++++++++++++++++++++- testing/test_streaming.py | 29 +++++++++++++++++ 9 files changed, 197 insertions(+), 18 deletions(-) create mode 100644 ldclient/diagnostics.py diff --git a/ldclient/config.py b/ldclient/config.py index 4ea3d6bc..517b9e18 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -44,7 +44,11 @@ def __init__(self, user_keys_capacity=1000, user_keys_flush_interval=300, inline_users_in_events=False, - http_proxy=None): + http_proxy=None, + diagnostic_opt_out=False, + diagnostic_recording_interval=900, + wrapper_name=None, + wrapper_version=None): """ :param string sdk_key: The SDK key for your LaunchDarkly account. :param string base_uri: The base URL for the LaunchDarkly server. Most users should use the default @@ -101,6 +105,15 @@ def __init__(self, variable, this is used regardless of whether the target URI is HTTP or HTTPS (the actual LaunchDarkly service uses HTTPS, but a Relay Proxy instance could use HTTP). Setting this Config parameter will override any proxy specified by an environment variable, but only for LaunchDarkly SDK connections. + :param bool diagnostic_opt_out: TODO TODO TODO + :param int diagnostic_recording_interval: TODO TODO TODO + :param string wrapper_name: For use by wrapper libraries to set an identifying name for the wrapper + being used. This will be sent in HTTP headers during requests to the LaunchDarkly servers to allow + recording metrics on the usage of these wrapper libraries. + :param string wrapper_version: For use by wrapper libraries to report the version of the library in + use. If `wrapper_name` is not set, this field will be ignored. Otherwise the version string will + be included in the HTTP headers along with the `wrapper_name` during requests to the LaunchDarkly + servers. """ self.__sdk_key = sdk_key @@ -133,6 +146,10 @@ def __init__(self, self.__user_keys_flush_interval = user_keys_flush_interval self.__inline_users_in_events = inline_users_in_events self.__http_proxy = http_proxy + self.__diagnostic_opt_out = diagnostic_opt_out + self.__diagnostic_recording_interval = diagnostic_recording_interval + self.__wrapper_name = wrapper_name + self.__wrapper_version = wrapper_version @classmethod def default(cls): @@ -171,7 +188,11 @@ def copy_with_new_sdk_key(self, new_sdk_key): offline=self.__offline, user_keys_capacity=self.__user_keys_capacity, user_keys_flush_interval=self.__user_keys_flush_interval, - inline_users_in_events=self.__inline_users_in_events) + inline_users_in_events=self.__inline_users_in_events, + diagnostic_opt_out=self.__diagnostic_opt_out, + diagnostic_recording_interval=self.__diagnostic_recording_interval, + wrapper_name=self.__wrapper_name, + wrapper_version=self.__wrapper_version) # for internal use only - probably should be part of the client logic def get_default(self, key, default): @@ -289,6 +310,22 @@ def inline_users_in_events(self): def http_proxy(self): return self.__http_proxy + @property + def diagnostic_opt_out(self): + return self.__diagnostic_opt_out + + @property + def diagnostic_recording_interval(self): + return self.__diagnostic_recording_interval + + @property + def wrapper_name(self): + return self.__wrapper_name + + @property + def wrapper_version(self): + return self.__wrapper_version + def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key == '': log.warning("Missing or blank sdk_key.") diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py new file mode 100644 index 00000000..29c6dcb8 --- /dev/null +++ b/ldclient/diagnostics.py @@ -0,0 +1,29 @@ +DEFAULT_CONFIG = Config('sdk_key') +DEFAULT_BASE_URI = DEFAULT_CONFIG.base_uri +DEFAULT_EVENTS_URI = DEFAULT_CONFIG.events_uri +DEFAULT_STREAM_BASE_URI = DEFAULT_CONFIG.stream_base_uri + +def create_diagnostic_config_object(config): + return {'customBaseURI': False if config.base_uri == DEFAULT_BASE_URI else True, + 'customEventsURI': False if config.events_uri == DEFAULT_EVENTS_URI else True, + 'customStreamURI': False if config.stream_base_uri == DEFAULT_STREAM_BASE_URI else True, + 'eventsCapacity': config.events_max_pending, + 'connectTimeoutMillis': config.connect_timeout * 1000, + 'socketTimeoutMillis': config.read_timeout * 1000, + 'eventsFlushIntervalMillis': config.flush_interval * 1000, + 'usingProxy': False, #TODO + 'usingProxyAuthenticator': False, #TODO + 'streamingDisabled': not config.stream, + 'usingRelayDaemon': False, #TODO + 'offline': config.offline, #Check if this actually makes sense + 'allAttributesPrivate': config.all_attributes_private, + 'pollingIntervalMillis': config.poll_interval * 1000, + #'startWaitMillis': check, + #'samplingInterval': check, + #'reconnectTimeMillis': check, + 'userKeysCapacity': config.user_keys_capacity, + 'userKeysFlushIntervalMillis': config.user_keys_flush_interval * 1000, + 'inlineUsersInEvents': config.inline_users_in_events, + 'diagnosticRecordingIntervalMillis': config.diagnostic_recording_interval * 1000, + #'featureStoreFactory': check, + } diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 93680c13..29d25979 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -162,7 +162,7 @@ def _do_send(self, output_events): try: json_body = json.dumps(output_events) log.debug('Sending events payload: ' + json_body) - hdrs = _headers(self._config.sdk_key) + hdrs = _headers(self._config) hdrs['X-LaunchDarkly-Event-Schema'] = str(__CURRENT_EVENT_SCHEMA__) uri = self._config.events_uri r = self._http.request('POST', uri, diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 6af810a5..983798ff 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -40,7 +40,7 @@ def get_one(self, kind, key): return self._do_request(self._config.base_uri + kind.request_api_path + '/' + key, False) def _do_request(self, uri, allow_cache): - hdrs = _headers(self._config.sdk_key) + hdrs = _headers(self._config) if allow_cache: cache_entry = self._cache.get(uri) if cache_entry is not None: diff --git a/ldclient/streaming.py b/ldclient/streaming.py index b3638621..391e2f52 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -86,7 +86,7 @@ def log_backoff_message(props): def _connect(self): return SSEClient( self._uri, - headers=_stream_headers(self._config.sdk_key), + headers=_stream_headers(self._config), connect_timeout=self._config.connect_timeout, read_timeout=stream_read_timeout, verify_ssl=self._config.verify_ssl, diff --git a/ldclient/util.py b/ldclient/util.py index 1d059798..23dff4fb 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -37,18 +37,26 @@ # noinspection PyUnresolvedReferences __BASE_TYPES__ = (str, float, int, bool, unicode) - -def _headers(sdk_key): - return {'Authorization': sdk_key, 'User-Agent': 'PythonClient/' + VERSION, - 'Content-Type': "application/json"} - - -def _stream_headers(sdk_key, client="PythonClient"): - return {'Authorization': sdk_key, - 'User-Agent': '{0}/{1}'.format(client, VERSION), - 'Cache-Control': 'no-cache', - 'Accept': "text/event-stream"} - +def _base_headers(config): + headers = {'Authorization': config.sdk_key, + 'User-Agent': 'PythonClient/' + VERSION} + if isinstance(config.wrapper_name, str) and config.wrapper_name != "": + wrapper_version = "" + if isinstance(config.wrapper_version, str) and config.wrapper_version != "": + wrapper_version = "/" + config.wrapper_version + headers.update({'X-LaunchDarkly-Wrapper': config.wrapper_name + wrapper_version}) + return headers + +def _headers(config): + base_headers = _base_headers(config) + base_headers.update({'Content-Type': "application/json"}) + return base_headers + +def _stream_headers(config): + base_headers = _base_headers(config) + base_headers.update({ 'Cache-Control': "no-cache" + , 'Accept': "text/event-stream" }) + return base_headers def check_uwsgi(): if 'uwsgi' in sys.modules: diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 9ef1b4f8..61033bec 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -424,6 +424,30 @@ def test_sdk_key_is_sent(): assert mock_http.request_headers.get('Authorization') == 'SDK_KEY' +def test_wrapper_header_not_sent_when_not_set(): + with DefaultEventProcessor(Config(), mock_http) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') is None + +def test_wrapper_header_sent_when_set(): + with DefaultEventProcessor(Config(wrapper_name = "Flask", wrapper_version = "0.0.1"), mock_http) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') == "Flask/0.0.1" + +def test_wrapper_header_sent_without_version(): + with DefaultEventProcessor(Config(wrapper_name = "Flask"), mock_http) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') == "Flask" + def test_no_more_payloads_are_sent_after_401_error(): verify_unrecoverable_http_error(401) diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py index 658c8157..f4837d7e 100644 --- a/testing/test_feature_requester.py +++ b/testing/test_feature_requester.py @@ -34,6 +34,33 @@ def test_get_all_data_sends_headers(): req = server.require_request() assert req.headers['Authorization'] == 'sdk-key' assert req.headers['User-Agent'] == 'PythonClient/' + VERSION + assert req.headers['X-LaunchDarkly-Wrapper'] is None + +def test_get_all_data_sends_wrapper_header(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri, + wrapper_name = 'Flask', wrapper_version = '0.1.0') + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + server.setup_json_response('/sdk/latest-all', resp_data) + + fr.get_all_data() + req = server.require_request() + assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask/0.1.0' + +def test_get_all_data_sends_wrapper_header_without_version(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri, + wrapper_name = 'Flask') + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + server.setup_json_response('/sdk/latest-all', resp_data) + + fr.get_all_data() + req = server.require_request() + assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask' def test_get_all_data_can_use_cached_data(): with start_server() as server: @@ -96,6 +123,31 @@ def test_get_one_flag_sends_headers(): req = server.require_request() assert req.headers['Authorization'] == 'sdk-key' assert req.headers['User-Agent'] == 'PythonClient/' + VERSION + assert req.headers['X-LaunchDarkly-Wrapper'] is None + +def test_get_one_flag_sends_wrapper_header(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri, + wrapper_name = 'Flask', wrapper_version = '0.1.0') + fr = FeatureRequesterImpl(config) + key = 'flag1' + flag_data = { 'key': key } + server.setup_json_response('/sdk/latest-flags/' + key, flag_data) + fr.get_one(FEATURES, key) + req = server.require_request() + assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask/0.1.0' + +def test_get_one_flag_sends_wrapper_header_without_version(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri, + wrapper_name = 'Flask') + fr = FeatureRequesterImpl(config) + key = 'flag1' + flag_data = { 'key': key } + server.setup_json_response('/sdk/latest-flags/' + key, flag_data) + fr.get_one(FEATURES, key) + req = server.require_request() + assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask' def test_get_one_flag_throws_on_error(): with start_server() as server: @@ -176,4 +228,4 @@ def _verify_https_proxy_is_used(server, config): except: pass req = server.require_request() - assert req.method == 'CONNECT' \ No newline at end of file + assert req.method == 'CONNECT' diff --git a/testing/test_streaming.py b/testing/test_streaming.py index 65ba0542..e784aa6c 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -42,6 +42,35 @@ def test_sends_headers(): req = server.await_request() assert req.headers['Authorization'] == 'sdk-key' assert req.headers['User-Agent'] == 'PythonClient/' + VERSION + assert req.headers['X-LaunchDarkly-Wrapper'] is None + +def test_sends_wrapper_header(): + store = InMemoryFeatureStore() + ready = Event() + + with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, + wrapper_name = 'Flask', wrapper_version = '0.1.0') + server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + + with StreamingUpdateProcessor(config, None, store, ready) as sp: + sp.start() + req = server.await_request() + assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask/0.1.0' + +def test_sends_wrapper_header_without_version(): + store = InMemoryFeatureStore() + ready = Event() + + with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, + wrapper_name = 'Flask') + server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + + with StreamingUpdateProcessor(config, None, store, ready) as sp: + sp.start() + req = server.await_request() + assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask' def test_can_use_http_proxy_via_environment_var(monkeypatch): with start_server() as server: From 38d08bdd935d2ce37a5c28952e428efacc053eff Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Tue, 17 Dec 2019 00:07:46 +0000 Subject: [PATCH 163/356] Python 2 compat changes. --- testing/test_feature_requester.py | 12 ++++++------ testing/test_streaming.py | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py index f4837d7e..da72442c 100644 --- a/testing/test_feature_requester.py +++ b/testing/test_feature_requester.py @@ -34,7 +34,7 @@ def test_get_all_data_sends_headers(): req = server.require_request() assert req.headers['Authorization'] == 'sdk-key' assert req.headers['User-Agent'] == 'PythonClient/' + VERSION - assert req.headers['X-LaunchDarkly-Wrapper'] is None + assert req.headers.get('X-LaunchDarkly-Wrapper') is None def test_get_all_data_sends_wrapper_header(): with start_server() as server: @@ -47,7 +47,7 @@ def test_get_all_data_sends_wrapper_header(): fr.get_all_data() req = server.require_request() - assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask/0.1.0' + assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask/0.1.0' def test_get_all_data_sends_wrapper_header_without_version(): with start_server() as server: @@ -60,7 +60,7 @@ def test_get_all_data_sends_wrapper_header_without_version(): fr.get_all_data() req = server.require_request() - assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask' + assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' def test_get_all_data_can_use_cached_data(): with start_server() as server: @@ -123,7 +123,7 @@ def test_get_one_flag_sends_headers(): req = server.require_request() assert req.headers['Authorization'] == 'sdk-key' assert req.headers['User-Agent'] == 'PythonClient/' + VERSION - assert req.headers['X-LaunchDarkly-Wrapper'] is None + assert req.headers.get('X-LaunchDarkly-Wrapper') is None def test_get_one_flag_sends_wrapper_header(): with start_server() as server: @@ -135,7 +135,7 @@ def test_get_one_flag_sends_wrapper_header(): server.setup_json_response('/sdk/latest-flags/' + key, flag_data) fr.get_one(FEATURES, key) req = server.require_request() - assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask/0.1.0' + assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask/0.1.0' def test_get_one_flag_sends_wrapper_header_without_version(): with start_server() as server: @@ -147,7 +147,7 @@ def test_get_one_flag_sends_wrapper_header_without_version(): server.setup_json_response('/sdk/latest-flags/' + key, flag_data) fr.get_one(FEATURES, key) req = server.require_request() - assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask' + assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' def test_get_one_flag_throws_on_error(): with start_server() as server: diff --git a/testing/test_streaming.py b/testing/test_streaming.py index e784aa6c..37cf0148 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -40,9 +40,9 @@ def test_sends_headers(): with StreamingUpdateProcessor(config, None, store, ready) as sp: sp.start() req = server.await_request() - assert req.headers['Authorization'] == 'sdk-key' - assert req.headers['User-Agent'] == 'PythonClient/' + VERSION - assert req.headers['X-LaunchDarkly-Wrapper'] is None + assert req.headers.get('Authorization') == 'sdk-key' + assert req.headers.get('User-Agent') == 'PythonClient/' + VERSION + assert req.headers.get('X-LaunchDarkly-Wrapper') is None def test_sends_wrapper_header(): store = InMemoryFeatureStore() @@ -56,7 +56,7 @@ def test_sends_wrapper_header(): with StreamingUpdateProcessor(config, None, store, ready) as sp: sp.start() req = server.await_request() - assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask/0.1.0' + assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask/0.1.0' def test_sends_wrapper_header_without_version(): store = InMemoryFeatureStore() @@ -70,7 +70,7 @@ def test_sends_wrapper_header_without_version(): with StreamingUpdateProcessor(config, None, store, ready) as sp: sp.start() req = server.await_request() - assert req.headers['X-LaunchDarkly-Wrapper'] == 'Flask' + assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' def test_can_use_http_proxy_via_environment_var(monkeypatch): with start_server() as server: From e3eb3ee38b33b4b80edfba4c29fe6acc4f78d5f6 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Tue, 17 Dec 2019 21:29:06 +0000 Subject: [PATCH 164/356] More event generation code and starting to integrate tracking diagnostic values. --- ldclient/diagnostics.py | 32 +++++++++++++++++++++++++++++--- ldclient/event_processor.py | 31 +++++++++++++++++++++++++++---- 2 files changed, 56 insertions(+), 7 deletions(-) diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index 29c6dcb8..d6109afb 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -3,10 +3,23 @@ DEFAULT_EVENTS_URI = DEFAULT_CONFIG.events_uri DEFAULT_STREAM_BASE_URI = DEFAULT_CONFIG.stream_base_uri +def diagnostic_base_fields(kind, creation_date, diagnostic_id): + return {'kind': kind, + 'creationDate': creation_date, + 'id': diagnostic_id} + +def create_diagnostic_statistics(creation_date, diagnostic_id, data_since_date, dropped_events, deduplicated_users, events_in_last_batch): + base_object = diagnostic_base_fields('diagnostic', creation_date, diagnostic_id) + base_object.update({'dataSinceDate': data_since_date, + 'droppedEvents': dropped_events, + 'deduplicatedUsers': deduplicated_users, + 'eventsInLastBatch': events_in_last_batch}) + return base_object + def create_diagnostic_config_object(config): - return {'customBaseURI': False if config.base_uri == DEFAULT_BASE_URI else True, - 'customEventsURI': False if config.events_uri == DEFAULT_EVENTS_URI else True, - 'customStreamURI': False if config.stream_base_uri == DEFAULT_STREAM_BASE_URI else True, + return {'customBaseURI': config.base_uri != DEFAULT_BASE_URI, + 'customEventsURI': config.events_uri != DEFAULT_EVENTS_URI, + 'customStreamURI': config.stream_base_uri != DEFAULT_STREAM_BASE_URI, 'eventsCapacity': config.events_max_pending, 'connectTimeoutMillis': config.connect_timeout * 1000, 'socketTimeoutMillis': config.read_timeout * 1000, @@ -27,3 +40,16 @@ def create_diagnostic_config_object(config): 'diagnosticRecordingIntervalMillis': config.diagnostic_recording_interval * 1000, #'featureStoreFactory': check, } + +def create_diagnostic_sdk_object(): + return {} + +def create_diagnostic_platform_object(): + return {} + +def create_diagnostic_init(creation_date, diagnostic_id, config): + base_object = diagnostic_base_fields('diagnostic-init', creation_date, diagnostic_id) + base_object.update({'configuration': create_diagnostic_config_object(config), + 'sdk': create_diagnostic_sdk_object(), + 'platform': create_diagnostic_platform_object()}) + return base_object diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 29d25979..c66d6aac 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -186,9 +186,11 @@ def __init__(self, capacity): self._events = [] self._summarizer = EventSummarizer() self._exceeded_capacity = False + self._dropped_events = 0 def add_event(self, event): if len(self._events) >= self._capacity: + self._dropped_events = self._dropped_events + 1 if not self._exceeded_capacity: log.warning("Exceeded event queue capacity. Increase capacity to avoid dropping events.") self._exceeded_capacity = True @@ -198,7 +200,12 @@ def add_event(self, event): def add_to_summary(self, event): self._summarizer.summarize_event(event) - + + def get_and_clear_dropped_count(self): + ret = self._dropped_events + self._dropped_events = 0 + return ret + def get_payload(self): return FlushPayload(self._events, self._summarizer.snapshot()) @@ -219,6 +226,7 @@ def __init__(self, inbox, config, http_client): self._user_keys = SimpleLRUCache(config.user_keys_capacity) self._formatter = EventOutputFormatter(config) self._last_known_past_time = 0 + self._deduplicated_users = 0 self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__, "ldclient.flush") @@ -237,6 +245,8 @@ def _run_main_loop(self): self._trigger_flush() elif message.type == 'flush_users': self._user_keys.clear() + elif message.type == 'diagnostic': + self._send_and_reset_diagnostics() elif message.type == 'test_sync': self._flush_workers.wait() message.param.set() @@ -269,9 +279,12 @@ def _process_event(self, event): # an identify event for that user. if not (add_full_event and self._config.inline_users_in_events): user = event.get('user') - if user and not self.notice_user(user): - if event['kind'] != 'identify': - add_index_event = True + if user and 'key' in user: + is_index_event = event['kind'] == 'identify' + already_seen = self.notice_user(user) + add_index_event = not is_index_event and not already_seen + if not is_index_event and already_seen: + self._deduplicated_users = self._deduplicated_users + 1 if add_index_event: ie = { 'kind': 'index', 'creationDate': event['creationDate'], 'user': user } @@ -326,6 +339,10 @@ def _handle_response(self, r): self._disabled = True return + def _send_and_reset_diagnostics(self): + dropped_event_count = self._outbox.get_and_clear_dropped_count() + return + def _do_shutdown(self): self._flush_workers.stop() self._flush_workers.wait() @@ -341,6 +358,9 @@ def __init__(self, config, http=None, dispatcher_class=None): self._users_flush_timer = RepeatingTimer(config.user_keys_flush_interval, self._flush_users) self._flush_timer.start() self._users_flush_timer.start() + if not config.diagnostic_opt_out: + self._diagnostic_event_timer = RepeatingTimer(config.diagnostic_recording_interval, self._send_diagnostic) + self._diagnostic_event_timer.start() self._close_lock = Lock() self._closed = False (dispatcher_class or EventDispatcher)(self._inbox, config, http) @@ -376,6 +396,9 @@ def _post_to_inbox(self, message): def _flush_users(self): self._inbox.put(EventProcessorMessage('flush_users', None)) + def _send_diagnostic(self): + self._inbox.put(EventProcessorMessage('diagnostic', None)) + # Used only in tests def _wait_until_inactive(self): self._post_message_and_wait('test_sync') From 2d801980829d309344a8766d4932161d41bddc9e Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Fri, 20 Dec 2019 19:26:12 +0000 Subject: [PATCH 165/356] Add minimum diagnostic recording interval. Fix diagnostic.py to be importable. Add more diagnostic event fields. --- ldclient/config.py | 2 +- ldclient/diagnostics.py | 31 ++++++++++++++++--------------- ldclient/event_processor.py | 3 ++- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/ldclient/config.py b/ldclient/config.py index 517b9e18..c040e9c4 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -147,7 +147,7 @@ def __init__(self, self.__inline_users_in_events = inline_users_in_events self.__http_proxy = http_proxy self.__diagnostic_opt_out = diagnostic_opt_out - self.__diagnostic_recording_interval = diagnostic_recording_interval + self.__diagnostic_recording_interval = max(diagnostic_recording_interval, 60) self.__wrapper_name = wrapper_name self.__wrapper_version = wrapper_version diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index d6109afb..0b6c5cfc 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -1,7 +1,7 @@ -DEFAULT_CONFIG = Config('sdk_key') -DEFAULT_BASE_URI = DEFAULT_CONFIG.base_uri -DEFAULT_EVENTS_URI = DEFAULT_CONFIG.events_uri -DEFAULT_STREAM_BASE_URI = DEFAULT_CONFIG.stream_base_uri +#DEFAULT_CONFIG = Config.default() +#DEFAULT_BASE_URI = DEFAULT_CONFIG.base_uri +#DEFAULT_EVENTS_URI = DEFAULT_CONFIG.events_uri +#DEFAULT_STREAM_BASE_URI = DEFAULT_CONFIG.stream_base_uri def diagnostic_base_fields(kind, creation_date, diagnostic_id): return {'kind': kind, @@ -17,22 +17,20 @@ def create_diagnostic_statistics(creation_date, diagnostic_id, data_since_date, return base_object def create_diagnostic_config_object(config): - return {'customBaseURI': config.base_uri != DEFAULT_BASE_URI, - 'customEventsURI': config.events_uri != DEFAULT_EVENTS_URI, - 'customStreamURI': config.stream_base_uri != DEFAULT_STREAM_BASE_URI, + default_config = Config.default() + return {'customBaseURI': config.base_uri != default_config.base_uri, + 'customEventsURI': config.events_uri != default_config.events_uri, + 'customStreamURI': config.stream_base_uri != default_config.stream_base_uri, 'eventsCapacity': config.events_max_pending, 'connectTimeoutMillis': config.connect_timeout * 1000, 'socketTimeoutMillis': config.read_timeout * 1000, 'eventsFlushIntervalMillis': config.flush_interval * 1000, - 'usingProxy': False, #TODO - 'usingProxyAuthenticator': False, #TODO + 'usingProxy': config.http_proxy is not None, 'streamingDisabled': not config.stream, - 'usingRelayDaemon': False, #TODO + 'usingRelayDaemon': config.use_ldd, 'offline': config.offline, #Check if this actually makes sense 'allAttributesPrivate': config.all_attributes_private, 'pollingIntervalMillis': config.poll_interval * 1000, - #'startWaitMillis': check, - #'samplingInterval': check, #'reconnectTimeMillis': check, 'userKeysCapacity': config.user_keys_capacity, 'userKeysFlushIntervalMillis': config.user_keys_flush_interval * 1000, @@ -41,11 +39,14 @@ def create_diagnostic_config_object(config): #'featureStoreFactory': check, } -def create_diagnostic_sdk_object(): - return {} +def create_diagnostic_sdk_object(config): + return {'name': 'python-server-sdk', + 'version': VERSION, + 'wrapperName': config.wrapper_name, + 'wrapperVersion': config.wrapper_version} def create_diagnostic_platform_object(): - return {} + return {'name': 'python'} def create_diagnostic_init(creation_date, diagnostic_id, config): base_object = diagnostic_base_fields('diagnostic-init', creation_date, diagnostic_id) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index c66d6aac..2b5b952e 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -30,7 +30,7 @@ from ldclient.util import create_http_pool_manager from ldclient.util import log from ldclient.util import http_error_message, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response - +from ldclient.diagnostics import create_diagnostic_init, create_diagnostic_statistics __MAX_FLUSH_THREADS__ = 5 __CURRENT_EVENT_SCHEMA__ = 3 @@ -341,6 +341,7 @@ def _handle_response(self, r): def _send_and_reset_diagnostics(self): dropped_event_count = self._outbox.get_and_clear_dropped_count() + stats_event = create_diagnostic_statistics(1, 0, 0, dropped_event_count, self._deduplicated_users, 0) return def _do_shutdown(self): From 7fd454fd6ee0ab32e3b1bb52ea57fc91e9514397 Mon Sep 17 00:00:00 2001 From: Ben Woskow Date: Mon, 23 Dec 2019 16:01:04 -0800 Subject: [PATCH 166/356] don't let user fall outside of last bucket in rollout --- ldclient/flag.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ldclient/flag.py b/ldclient/flag.py index 11a5be41..3ff80fab 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -209,6 +209,13 @@ def _variation_index_for_user(feature, rule, user): if bucket < sum: return wv.get('variation') + # The user's bucket value was greater than or equal to the end of the last bucket. This could happen due + # to a rounding error, or due to the fact that we are scaling to 100000 rather than 99999, or the flag + # data could contain buckets that don't actually add up to 100000. Rather than returning an error in + # this case (or changing the scaling, which would potentially change the results for *all* users), we + # will simply put the user in the last bucket. + return rule['rollout'].get('variations')[len(rule['rollout'].get('variations'))].get('variation') + return None From 588f352b0ade70519ef2085f8362676d2106cf46 Mon Sep 17 00:00:00 2001 From: Ben Woskow Date: Mon, 23 Dec 2019 16:13:38 -0800 Subject: [PATCH 167/356] fixing conditional logic --- ldclient/flag.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ldclient/flag.py b/ldclient/flag.py index 3ff80fab..332d165e 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -198,13 +198,13 @@ def _variation_index_for_user(feature, rule, user): if rule.get('variation') is not None: return rule['variation'] - if rule.get('rollout') is not None: + if rule.get('rollout') is not None and rule['rollout'].get('variations') is not None and len(rule['rollout'].get('variations')) > 0: bucket_by = 'key' if rule['rollout'].get('bucketBy') is not None: bucket_by = rule['rollout']['bucketBy'] bucket = _bucket_user(user, feature['key'], feature['salt'], bucket_by) sum = 0.0 - for wv in rule['rollout'].get('variations') or []: + for wv in rule['rollout'].get('variations'): sum += wv.get('weight', 0.0) / 100000.0 if bucket < sum: return wv.get('variation') From 7b357b0c7e3f751058811cd2ee1968eb699cdd86 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Sat, 28 Dec 2019 14:57:58 +0000 Subject: [PATCH 168/356] Add docstrings for diagnostic configuration options. --- ldclient/config.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ldclient/config.py b/ldclient/config.py index c040e9c4..65a39797 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -105,8 +105,13 @@ def __init__(self, variable, this is used regardless of whether the target URI is HTTP or HTTPS (the actual LaunchDarkly service uses HTTPS, but a Relay Proxy instance could use HTTP). Setting this Config parameter will override any proxy specified by an environment variable, but only for LaunchDarkly SDK connections. - :param bool diagnostic_opt_out: TODO TODO TODO - :param int diagnostic_recording_interval: TODO TODO TODO + :param bool diagnostic_opt_out: Unless the diagnosticOptOut field is set to True, the client will send + some diagnostics data to the LaunchDarkly servers in order to assist in the development of future SDK + improvements. These diagnostics consist of an initial payload containing some details of SDK in use, + the SDK's configuration, and the platform the SDK is being run on; as well as payloads sent + periodically with information on irregular occurrences such as dropped events. + :param int diagnostic_recording_interval: The interval in seconds at which periodic diagnostic data is + sent. The default is 900 seconds (every 15 minutes) and the minimum value is 60 seconds. :param string wrapper_name: For use by wrapper libraries to set an identifying name for the wrapper being used. This will be sent in HTTP headers during requests to the LaunchDarkly servers to allow recording metrics on the usage of these wrapper libraries. From af5a1621cda4f45cd49a436a2f413783afbf67b4 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 30 Dec 2019 12:04:04 -0800 Subject: [PATCH 169/356] fix off-by-1 error --- ldclient/flag.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/flag.py b/ldclient/flag.py index 332d165e..f5e9a237 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -214,7 +214,7 @@ def _variation_index_for_user(feature, rule, user): # data could contain buckets that don't actually add up to 100000. Rather than returning an error in # this case (or changing the scaling, which would potentially change the results for *all* users), we # will simply put the user in the last bucket. - return rule['rollout'].get('variations')[len(rule['rollout'].get('variations'))].get('variation') + return rule['rollout'].get('variations')[-1].get('variation') return None From 75a9aabc53c958cb7ce257f9bb755365365581a9 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 30 Dec 2019 12:04:37 -0800 Subject: [PATCH 170/356] avoid redundant dict lookups --- ldclient/flag.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ldclient/flag.py b/ldclient/flag.py index f5e9a237..422a56f0 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -198,13 +198,17 @@ def _variation_index_for_user(feature, rule, user): if rule.get('variation') is not None: return rule['variation'] - if rule.get('rollout') is not None and rule['rollout'].get('variations') is not None and len(rule['rollout'].get('variations')) > 0: + rollout = rule.get('rollout') + if rollout is None: + return None + variations = rollout.get('variations') + if variations is not None and len(variations) > 0: bucket_by = 'key' - if rule['rollout'].get('bucketBy') is not None: - bucket_by = rule['rollout']['bucketBy'] + if rollout.get('bucketBy') is not None: + bucket_by = rollout['bucketBy'] bucket = _bucket_user(user, feature['key'], feature['salt'], bucket_by) sum = 0.0 - for wv in rule['rollout'].get('variations'): + for wv in variations: sum += wv.get('weight', 0.0) / 100000.0 if bucket < sum: return wv.get('variation') @@ -214,7 +218,7 @@ def _variation_index_for_user(feature, rule, user): # data could contain buckets that don't actually add up to 100000. Rather than returning an error in # this case (or changing the scaling, which would potentially change the results for *all* users), we # will simply put the user in the last bucket. - return rule['rollout'].get('variations')[-1].get('variation') + return variations[-1].get('variation') return None From 590ca64ae68c772b71b905cff14f5a046bbc6f09 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 30 Dec 2019 12:04:56 -0800 Subject: [PATCH 171/356] add unit tests for basic bucketing logic and edge case --- testing/test_flag.py | 43 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 42 insertions(+), 1 deletion(-) diff --git a/testing/test_flag.py b/testing/test_flag.py index ced400e5..6b50b55a 100644 --- a/testing/test_flag.py +++ b/testing/test_flag.py @@ -1,6 +1,7 @@ +import math import pytest from ldclient.feature_store import InMemoryFeatureStore -from ldclient.flag import EvaluationDetail, EvalResult, _bucket_user, evaluate +from ldclient.flag import EvaluationDetail, EvalResult, _bucket_user, _variation_index_for_user, evaluate from ldclient.impl.event_factory import _EventFactory from ldclient.versioned_data_kind import FEATURES, SEGMENTS @@ -384,7 +385,47 @@ def _make_bool_flag_from_clause(clause): 'variations': [ False, True ] } +def test_variation_index_is_returned_for_bucket(): + user = { 'key': 'userkey' } + flag = { 'key': 'flagkey', 'salt': 'salt' } + + # First verify that with our test inputs, the bucket value will be greater than zero and less than 100000, + # so we can construct a rollout whose second bucket just barely contains that value + bucket_value = math.trunc(_bucket_user(user, flag['key'], flag['salt'], 'key') * 100000) + assert bucket_value > 0 and bucket_value < 100000 + + bad_variation_a = 0 + matched_variation = 1 + bad_variation_b = 2 + rule = { + 'rollout': { + 'variations': [ + { 'variation': bad_variation_a, 'weight': bucket_value }, # end of bucket range is not inclusive, so it will *not* match the target value + { 'variation': matched_variation, 'weight': 1 }, # size of this bucket is 1, so it only matches that specific value + { 'variation': bad_variation_b, 'weight': 100000 - (bucket_value + 1) } + ] + } + } + result_variation = _variation_index_for_user(flag, rule, user) + assert result_variation == matched_variation +def test_last_bucket_is_used_if_bucket_value_equals_total_weight(): + user = { 'key': 'userkey' } + flag = { 'key': 'flagkey', 'salt': 'salt' } + + # We'll construct a list of variations that stops right at the target bucket value + bucket_value = math.trunc(_bucket_user(user, flag['key'], flag['salt'], 'key') * 100000) + + rule = { + 'rollout': { + 'variations': [ + { 'variation': 0, 'weight': bucket_value } + ] + } + } + result_variation = _variation_index_for_user(flag, rule, user) + assert result_variation == 0 + def test_bucket_by_user_key(): user = { u'key': u'userKeyA' } bucket = _bucket_user(user, 'hashKey', 'saltyA', 'key') From 0f09a732077a182ddbcdc2da212cc1a2ac348d59 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Mon, 30 Dec 2019 21:24:28 +0000 Subject: [PATCH 172/356] Stream init tracking. Feeding of accumulator object through SDK. Various fixes. --- ldclient/client.py | 11 +++-- ldclient/config.py | 5 +++ ldclient/diagnostics.py | 77 +++++++++++++++++++++++---------- ldclient/event_processor.py | 64 ++++++++++++++++++++++++--- ldclient/streaming.py | 12 ++++- testing/test_event_processor.py | 69 ++++++++++++++--------------- testing/test_streaming.py | 12 ++--- 7 files changed, 178 insertions(+), 72 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 825d542c..c51b2b53 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -105,8 +105,13 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): self._event_processor = self._make_event_processor(self._config) + if callable(getattr(self._event_processor, 'retrieve_diagnostic_accumulator', None)): + diagnostic_accumulator = self._event_processor.retrieve_diagnostic_accumulator() + else: + diagnostic_accumulator = None + update_processor_ready = threading.Event() - self._update_processor = self._make_update_processor(self._config, self._store, update_processor_ready) + self._update_processor = self._make_update_processor(self._config, self._store, update_processor_ready, diagnostic_accumulator) self._update_processor.start() if start_wait > 0 and not self._config.offline and not self._config.use_ldd: @@ -124,7 +129,7 @@ def _make_event_processor(self, config): return NullEventProcessor() return config.event_processor_class(config) - def _make_update_processor(self, config, store, ready): + def _make_update_processor(self, config, store, ready, diagnostic_accumulator): if config.update_processor_class: log.info("Using user-specified update processor: " + str(config.update_processor_class)) return config.update_processor_class(config, store, ready) @@ -139,7 +144,7 @@ def _make_update_processor(self, config, store, ready): """ :type: FeatureRequester """ if config.stream: - return StreamingUpdateProcessor(config, feature_requester, store, ready) + return StreamingUpdateProcessor(config, feature_requester, store, ready, diagnostic_accumulator) log.info("Disabling streaming API") log.warning("You should only disable the streaming API if instructed to do so by LaunchDarkly support") diff --git a/ldclient/config.py b/ldclient/config.py index 65a39797..8b1ee411 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -216,6 +216,11 @@ def base_uri(self): def get_latest_flags_uri(self): return self.__base_uri + GET_LATEST_FEATURES_PATH + # for internal use only + @property + def events_base_uri(self): + return self.__events_uri + # for internal use only - should construct the URL path in the events code, not here @property def events_uri(self): diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index 0b6c5cfc..3acb96fd 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -2,22 +2,63 @@ #DEFAULT_BASE_URI = DEFAULT_CONFIG.base_uri #DEFAULT_EVENTS_URI = DEFAULT_CONFIG.events_uri #DEFAULT_STREAM_BASE_URI = DEFAULT_CONFIG.stream_base_uri +import threading +import time +import uuid -def diagnostic_base_fields(kind, creation_date, diagnostic_id): +class _DiagnosticAccumulator(object): + def __init__(self, diagnostic_id): + self.diagnostic_id = diagnostic_id + self.data_since_date = int(time.time() * 1000) + self._state_lock = threading.Lock() + self._events_in_last_batch = 0 + self._stream_inits = [] + + def record_stream_init(self, timestamp, duration, failed): + with self._state_lock: + self._stream_inits.append({'timestamp': timestamp, + 'durationMillis': duration, + 'failed': failed}) + + def record_events_in_batch(self, events_in_batch): + with self._state_lock: + self._events_in_last_batch = events_in_batch + + def create_event_and_reset(self, dropped_events, deduplicated_users): + with self._state_lock: + events_in_batch = self._events_in_last_batch + stream_inits = self._stream_inits + self._events_in_last_batch = 0 + self._stream_inits = [] + + current_time = int(time.time() * 1000) + periodic_event = _diagnostic_base_fields('diagnostic', current_time, self.diagnostic_id) + periodic_event.update({'dataSincedate': self.data_since_date, + 'droppedEvents': dropped_events, + 'deduplicatedUsers': deduplicated_users, + 'eventsInLastBatch': events_in_batch, + 'streamInits': stream_inits}) + self.data_since_date = current_time + return periodic_event + +def create_diagnostic_id(config): + return {'diagnosticId': str(uuid.uuid4()), + 'sdkKeySuffix': '' if not config.sdk_key else config.sdk_key[-6:]} + +def create_diagnostic_init(creation_date, diagnostic_id, config): + base_object = _diagnostic_base_fields('diagnostic-init', creation_date, diagnostic_id) + base_object.update({'configuration': _create_diagnostic_config_object(config), + 'sdk': _create_diagnostic_sdk_object(config), + 'platform': _create_diagnostic_platform_object()}) + return base_object + +def _diagnostic_base_fields(kind, creation_date, diagnostic_id): return {'kind': kind, 'creationDate': creation_date, 'id': diagnostic_id} -def create_diagnostic_statistics(creation_date, diagnostic_id, data_since_date, dropped_events, deduplicated_users, events_in_last_batch): - base_object = diagnostic_base_fields('diagnostic', creation_date, diagnostic_id) - base_object.update({'dataSinceDate': data_since_date, - 'droppedEvents': dropped_events, - 'deduplicatedUsers': deduplicated_users, - 'eventsInLastBatch': events_in_last_batch}) - return base_object - -def create_diagnostic_config_object(config): - default_config = Config.default() +def _create_diagnostic_config_object(config): + default_config = config.default() return {'customBaseURI': config.base_uri != default_config.base_uri, 'customEventsURI': config.events_uri != default_config.events_uri, 'customStreamURI': config.stream_base_uri != default_config.stream_base_uri, @@ -28,7 +69,6 @@ def create_diagnostic_config_object(config): 'usingProxy': config.http_proxy is not None, 'streamingDisabled': not config.stream, 'usingRelayDaemon': config.use_ldd, - 'offline': config.offline, #Check if this actually makes sense 'allAttributesPrivate': config.all_attributes_private, 'pollingIntervalMillis': config.poll_interval * 1000, #'reconnectTimeMillis': check, @@ -39,18 +79,11 @@ def create_diagnostic_config_object(config): #'featureStoreFactory': check, } -def create_diagnostic_sdk_object(config): +def _create_diagnostic_sdk_object(config): return {'name': 'python-server-sdk', - 'version': VERSION, + 'version': 6, #VERSION, 'wrapperName': config.wrapper_name, 'wrapperVersion': config.wrapper_version} -def create_diagnostic_platform_object(): +def _create_diagnostic_platform_object(): return {'name': 'python'} - -def create_diagnostic_init(creation_date, diagnostic_id, config): - base_object = diagnostic_base_fields('diagnostic-init', creation_date, diagnostic_id) - base_object.update({'configuration': create_diagnostic_config_object(config), - 'sdk': create_diagnostic_sdk_object(), - 'platform': create_diagnostic_platform_object()}) - return base_object diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 2b5b952e..c7ce1b27 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -30,7 +30,7 @@ from ldclient.util import create_http_pool_manager from ldclient.util import log from ldclient.util import http_error_message, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response -from ldclient.diagnostics import create_diagnostic_init, create_diagnostic_statistics +from ldclient.diagnostics import create_diagnostic_init, create_diagnostic_id, _DiagnosticAccumulator __MAX_FLUSH_THREADS__ = 5 __CURRENT_EVENT_SCHEMA__ = 3 @@ -177,6 +177,40 @@ def _do_send(self, output_events): 'Unhandled exception in event processor. Analytics events were not processed. [%s]', e) +class DiagnosticEventSendTask(object): + def __init__(self, http, config, event_body, response_fn): + self._http = http + self._config = config + self._event_body = event_body + self._response_fn = response_fn + + def run_thread(self): + try: + Thread(target = self._do_send()).start() + except Exception: + log.warning( + 'Unhandled exception in event processor. Analytics events were not processed.', + exc_info=True) + + def _do_send(self): + # noinspection PyBroadException + try: + json_body = json.dumps(self._event_body) + log.debug('Sending diagnostic event: ' + json_body) + hdrs = _headers(self._config) + uri = self._config.events_base_uri + '/diagnostic' + r = self._http.request('POST', uri, + headers=hdrs, + timeout=urllib3.Timeout(connect=self._config.connect_timeout, read=self._config.read_timeout), + body=json_body, + retries=1) + if (self._response_fn): + self._response_fn(r) + except Exception as e: + log.warning( + 'Unhandled exception in event processor. Diagnostic event was not sent. [%s]', e) + + FlushPayload = namedtuple('FlushPayload', ['events', 'summary']) @@ -215,7 +249,7 @@ def clear(self): class EventDispatcher(object): - def __init__(self, inbox, config, http_client): + def __init__(self, inbox, config, http_client, diagnostic_accumulator=None): self._inbox = inbox self._config = config self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl, @@ -227,6 +261,7 @@ def __init__(self, inbox, config, http_client): self._formatter = EventOutputFormatter(config) self._last_known_past_time = 0 self._deduplicated_users = 0 + self._diagnostic_accumulator = diagnostic_accumulator self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__, "ldclient.flush") @@ -340,9 +375,11 @@ def _handle_response(self, r): return def _send_and_reset_diagnostics(self): - dropped_event_count = self._outbox.get_and_clear_dropped_count() - stats_event = create_diagnostic_statistics(1, 0, 0, dropped_event_count, self._deduplicated_users, 0) - return + if (self._diagnostic_accumulator): + dropped_event_count = self._outbox.get_and_clear_dropped_count() + stats_event = self._diagnostic_accumulator.create_event_and_reset(dropped_event_count, self._deduplicated_users) + self._deduplicated_users = 0 + DiagnosticEventSendTask(self._http, self._config, stats_event, None).run_thread() def _do_shutdown(self): self._flush_workers.stop() @@ -359,12 +396,24 @@ def __init__(self, config, http=None, dispatcher_class=None): self._users_flush_timer = RepeatingTimer(config.user_keys_flush_interval, self._flush_users) self._flush_timer.start() self._users_flush_timer.start() + self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl, + target_base_uri=config.events_uri, + force_proxy=config.http_proxy) if http is None else http if not config.diagnostic_opt_out: + diagnostic_id = create_diagnostic_id(config) + self._diagnostic_accumulator = _DiagnosticAccumulator(diagnostic_id) + init_event = create_diagnostic_init(self._diagnostic_accumulator.data_since_date, diagnostic_id, config) + DiagnosticEventSendTask(self._http, config, init_event, None).run_thread() + self._diagnostic_event_timer = RepeatingTimer(config.diagnostic_recording_interval, self._send_diagnostic) self._diagnostic_event_timer.start() + else: + self._diagnostic_accumulator = None + self._close_lock = Lock() self._closed = False - (dispatcher_class or EventDispatcher)(self._inbox, config, http) + + (dispatcher_class or EventDispatcher)(self._inbox, config, self._http, self._diagnostic_accumulator) def send_event(self, event): event['creationDate'] = int(time.time() * 1000) @@ -385,6 +434,9 @@ def stop(self): # is full; an orderly shutdown can't happen unless these messages are received. self._post_message_and_wait('stop') + def retrieve_diagnostic_accumulator(self): + return self._diagnostic_accumulator + def _post_to_inbox(self, message): try: self._inbox.put(message, block=False) diff --git a/ldclient/streaming.py b/ldclient/streaming.py index 391e2f52..2016e2d3 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -27,7 +27,7 @@ class StreamingUpdateProcessor(Thread, UpdateProcessor): - def __init__(self, config, requester, store, ready): + def __init__(self, config, requester, store, ready, diagnostic_accumulator): Thread.__init__(self) self.daemon = True self._uri = config.stream_base_uri + STREAM_ALL_PATH @@ -36,6 +36,8 @@ def __init__(self, config, requester, store, ready): self._store = store self._running = False self._ready = ready + self._diagnostic_accumulator = diagnostic_accumulator + self._es_started = None # We need to suppress the default logging behavior of the backoff package, because # it logs messages at ERROR level with variable content (the delay time) which will @@ -52,11 +54,14 @@ def run(self): self._running = True while self._running: try: + self._es_started = int(time.time() * 1000) messages = self._connect() for msg in messages: if not self._running: break message_ok = self.process_message(self._store, self._requester, msg) + self._record_stream_init(False) + self._es_started = None if message_ok is True and self._ready.is_set() is False: log.info("StreamingUpdateProcessor initialized ok.") self._ready.set() @@ -71,6 +76,11 @@ def run(self): # no stacktrace here because, for a typical connection error, it'll just be a lengthy tour of urllib3 internals time.sleep(1) + def _record_stream_init(self, failed): + if self._diagnostic_accumulator and self._es_started: + current_time = int(time.time() * 1000) + self._diagnostic_accumulator.record_stream_init(current_time, current_time - self._es_started, failed) + def _backoff_expo(): return backoff.expo(max_value=30) diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 61033bec..d6641471 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -62,13 +62,14 @@ def teardown_function(): if ep is not None: ep.stop() -def setup_processor(config): - global ep - ep = DefaultEventProcessor(config, mock_http) - +class DefaultTestProcessor(DefaultEventProcessor): + def __init__(self, **kwargs): + if not 'diagnostic_opt_out' in kwargs: + kwargs['diagnostic_opt_out'] = True + DefaultEventProcessor.__init__(self, Config(**kwargs), mock_http) def test_identify_event_is_queued(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: e = { 'kind': 'identify', 'user': user } ep.send_event(e) @@ -82,7 +83,7 @@ def test_identify_event_is_queued(): }] def test_user_is_filtered_in_identify_event(): - with DefaultEventProcessor(Config(all_attributes_private = True), mock_http) as ep: + with DefaultTestProcessor(all_attributes_private = True) as ep: e = { 'kind': 'identify', 'user': user } ep.send_event(e) @@ -96,7 +97,7 @@ def test_user_is_filtered_in_identify_event(): }] def test_user_attrs_are_stringified_in_identify_event(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: e = { 'kind': 'identify', 'user': numeric_user } ep.send_event(e) @@ -110,7 +111,7 @@ def test_user_attrs_are_stringified_in_identify_event(): }] def test_individual_feature_event_is_queued_with_index_event(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True @@ -124,7 +125,7 @@ def test_individual_feature_event_is_queued_with_index_event(): check_summary_event(output[2]) def test_user_is_filtered_in_index_event(): - with DefaultEventProcessor(Config(all_attributes_private = True), mock_http) as ep: + with DefaultTestProcessor(all_attributes_private = True) as ep: e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True @@ -138,7 +139,7 @@ def test_user_is_filtered_in_index_event(): check_summary_event(output[2]) def test_user_attrs_are_stringified_in_index_event(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True @@ -152,7 +153,7 @@ def test_user_attrs_are_stringified_in_index_event(): check_summary_event(output[2]) def test_feature_event_can_contain_inline_user(): - with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + with DefaultTestProcessor(inline_users_in_events = True) as ep: e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True @@ -165,7 +166,7 @@ def test_feature_event_can_contain_inline_user(): check_summary_event(output[1]) def test_user_is_filtered_in_feature_event(): - with DefaultEventProcessor(Config(inline_users_in_events = True, all_attributes_private = True), mock_http) as ep: + with DefaultTestProcessor(inline_users_in_events = True, all_attributes_private = True) as ep: e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True @@ -178,7 +179,7 @@ def test_user_is_filtered_in_feature_event(): check_summary_event(output[1]) def test_user_attrs_are_stringified_in_feature_event(): - with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + with DefaultTestProcessor(inline_users_in_events = True) as ep: e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True @@ -191,7 +192,7 @@ def test_user_attrs_are_stringified_in_feature_event(): check_summary_event(output[1]) def test_index_event_is_still_generated_if_inline_users_is_true_but_feature_event_is_not_tracked(): - with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + with DefaultTestProcessor(inline_users_in_events = True) as ep: e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': False @@ -204,7 +205,7 @@ def test_index_event_is_still_generated_if_inline_users_is_true_but_feature_even check_summary_event(output[1]) def test_two_events_for_same_user_only_produce_one_index_event(): - with DefaultEventProcessor(Config(user_keys_flush_interval = 300), mock_http) as ep: + with DefaultTestProcessor(user_keys_flush_interval = 300) as ep: e0 = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True @@ -221,7 +222,7 @@ def test_two_events_for_same_user_only_produce_one_index_event(): check_summary_event(output[3]) def test_new_index_event_is_added_if_user_cache_has_been_cleared(): - with DefaultEventProcessor(Config(user_keys_flush_interval = 0.1), mock_http) as ep: + with DefaultTestProcessor(user_keys_flush_interval = 0.1) as ep: e0 = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True @@ -240,7 +241,7 @@ def test_new_index_event_is_added_if_user_cache_has_been_cleared(): check_summary_event(output[4]) def test_event_kind_is_debug_if_flag_is_temporarily_in_debug_mode(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: future_time = now() + 100000 e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, @@ -256,7 +257,7 @@ def test_event_kind_is_debug_if_flag_is_temporarily_in_debug_mode(): check_summary_event(output[2]) def test_event_can_be_both_tracked_and_debugged(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: future_time = now() + 100000 e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, @@ -273,7 +274,7 @@ def test_event_can_be_both_tracked_and_debugged(): check_summary_event(output[3]) def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_server_time(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: # Pick a server time that is somewhat behind the client time server_time = now() - 20000 @@ -299,7 +300,7 @@ def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_se check_summary_event(output[1]) def test_debug_mode_expires_based_on_server_time_if_server_time_is_later_than_client_time(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: # Pick a server time that is somewhat ahead of the client time server_time = now() + 20000 @@ -325,7 +326,7 @@ def test_debug_mode_expires_based_on_server_time_if_server_time_is_later_than_cl check_summary_event(output[1]) def test_two_feature_events_for_same_user_generate_only_one_index_event(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: e1 = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value1', 'default': 'default', 'trackEvents': False @@ -343,7 +344,7 @@ def test_two_feature_events_for_same_user_generate_only_one_index_event(): check_summary_event(output[1]) def test_nontracked_events_are_summarized(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: e1 = { 'kind': 'feature', 'key': 'flagkey1', 'version': 11, 'user': user, 'variation': 1, 'value': 'value1', 'default': 'default1', 'trackEvents': False @@ -374,7 +375,7 @@ def test_nontracked_events_are_summarized(): } def test_custom_event_is_queued_with_user(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '}, 'metricValue': 1.5 } ep.send_event(e) @@ -384,7 +385,7 @@ def test_custom_event_is_queued_with_user(): check_custom_event(output[1], e, None) def test_custom_event_can_contain_inline_user(): - with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + with DefaultTestProcessor(inline_users_in_events = True) as ep: e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } ep.send_event(e) @@ -393,7 +394,7 @@ def test_custom_event_can_contain_inline_user(): check_custom_event(output[0], e, user) def test_user_is_filtered_in_custom_event(): - with DefaultEventProcessor(Config(inline_users_in_events = True, all_attributes_private = True), mock_http) as ep: + with DefaultTestProcessor(inline_users_in_events = True, all_attributes_private = True) as ep: e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } ep.send_event(e) @@ -402,7 +403,7 @@ def test_user_is_filtered_in_custom_event(): check_custom_event(output[0], e, filtered_user) def test_user_attrs_are_stringified_in_custom_event(): - with DefaultEventProcessor(Config(inline_users_in_events = True), mock_http) as ep: + with DefaultTestProcessor(inline_users_in_events = True) as ep: e = { 'kind': 'custom', 'key': 'eventkey', 'user': numeric_user, 'data': { 'thing': 'stuff '} } ep.send_event(e) @@ -411,13 +412,13 @@ def test_user_attrs_are_stringified_in_custom_event(): check_custom_event(output[0], e, stringified_numeric_user) def test_nothing_is_sent_if_there_are_no_events(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: ep.flush() ep._wait_until_inactive() assert mock_http.request_data is None def test_sdk_key_is_sent(): - with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: + with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: ep.send_event({ 'kind': 'identify', 'user': user }) ep.flush() ep._wait_until_inactive() @@ -425,7 +426,7 @@ def test_sdk_key_is_sent(): assert mock_http.request_headers.get('Authorization') == 'SDK_KEY' def test_wrapper_header_not_sent_when_not_set(): - with DefaultEventProcessor(Config(), mock_http) as ep: + with DefaultTestProcessor() as ep: ep.send_event({ 'kind': 'identify', 'user': user }) ep.flush() ep._wait_until_inactive() @@ -433,7 +434,7 @@ def test_wrapper_header_not_sent_when_not_set(): assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') is None def test_wrapper_header_sent_when_set(): - with DefaultEventProcessor(Config(wrapper_name = "Flask", wrapper_version = "0.0.1"), mock_http) as ep: + with DefaultTestProcessor(wrapper_name = "Flask", wrapper_version = "0.0.1") as ep: ep.send_event({ 'kind': 'identify', 'user': user }) ep.flush() ep._wait_until_inactive() @@ -441,7 +442,7 @@ def test_wrapper_header_sent_when_set(): assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') == "Flask/0.0.1" def test_wrapper_header_sent_without_version(): - with DefaultEventProcessor(Config(wrapper_name = "Flask"), mock_http) as ep: + with DefaultTestProcessor(wrapper_name = "Flask") as ep: ep.send_event({ 'kind': 'identify', 'user': user }) ep.flush() ep._wait_until_inactive() @@ -468,7 +469,7 @@ def test_does_not_block_on_full_inbox(): ep_inbox_holder = [ None ] ep_inbox = None - def dispatcher_factory(inbox, config, http): + def dispatcher_factory(inbox, config, http, diag): ep_inbox_holder[0] = inbox # it's an array because otherwise it's hard for a closure to modify a variable return None # the dispatcher object itself doesn't matter, we only manipulate the inbox def event_consumer(): @@ -540,7 +541,7 @@ def _verify_https_proxy_is_used(server, config): assert req.method == 'CONNECT' def verify_unrecoverable_http_error(status): - with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: + with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: mock_http.set_response_status(status) ep.send_event({ 'kind': 'identify', 'user': user }) ep.flush() @@ -553,7 +554,7 @@ def verify_unrecoverable_http_error(status): assert mock_http.request_data is None def verify_recoverable_http_error(status): - with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: + with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: mock_http.set_response_status(status) ep.send_event({ 'kind': 'identify', 'user': user }) ep.flush() diff --git a/testing/test_streaming.py b/testing/test_streaming.py index 37cf0148..0adf6738 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -22,7 +22,7 @@ def test_uses_stream_uri(): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) - with StreamingUpdateProcessor(config, None, store, ready) as sp: + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() req = server.await_request() assert req.method == 'GET' @@ -37,7 +37,7 @@ def test_sends_headers(): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) - with StreamingUpdateProcessor(config, None, store, ready) as sp: + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() req = server.await_request() assert req.headers.get('Authorization') == 'sdk-key' @@ -53,7 +53,7 @@ def test_sends_wrapper_header(): wrapper_name = 'Flask', wrapper_version = '0.1.0') server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) - with StreamingUpdateProcessor(config, None, store, ready) as sp: + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() req = server.await_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask/0.1.0' @@ -67,7 +67,7 @@ def test_sends_wrapper_header_without_version(): wrapper_name = 'Flask') server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) - with StreamingUpdateProcessor(config, None, store, ready) as sp: + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() req = server.await_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' @@ -98,7 +98,7 @@ def _verify_http_proxy_is_used(server, config): store = InMemoryFeatureStore() ready = Event() server.setup_response(config.stream_base_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) - with StreamingUpdateProcessor(config, None, store, ready) as sp: + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the # HTTP client, so we should be able to see the request go through. Note that the URI path will @@ -112,7 +112,7 @@ def _verify_https_proxy_is_used(server, config): store = InMemoryFeatureStore() ready = Event() server.setup_response(config.stream_base_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) - with StreamingUpdateProcessor(config, None, store, ready) as sp: + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but # it can still record that it *got* the request, which proves that the request went to the proxy. From e50ad29e2cb97307683c4ebc6676da19e8a69311 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Mon, 30 Dec 2019 21:57:21 +0000 Subject: [PATCH 173/356] Track events in last batch. --- ldclient/event_processor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index c7ce1b27..7d88b64c 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -351,6 +351,8 @@ def _trigger_flush(self): if self._disabled: return payload = self._outbox.get_payload() + if self._diagnostic_accumulator: + self._diagnostic_accumulator.record_events_in_batch(len(payload.events)) if len(payload.events) > 0 or len(payload.summary.counters) > 0: task = EventPayloadSendTask(self._http, self._config, self._formatter, payload, self._handle_response) @@ -375,7 +377,7 @@ def _handle_response(self, r): return def _send_and_reset_diagnostics(self): - if (self._diagnostic_accumulator): + if self._diagnostic_accumulator: dropped_event_count = self._outbox.get_and_clear_dropped_count() stats_event = self._diagnostic_accumulator.create_event_and_reset(dropped_event_count, self._deduplicated_users) self._deduplicated_users = 0 From f6ad20136112fa18d220da04e9afc7a82d8ad075 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Mon, 30 Dec 2019 22:07:33 +0000 Subject: [PATCH 174/356] Fix sdk version field, some stylistic improvements. --- ldclient/diagnostics.py | 4 +++- ldclient/event_processor.py | 8 ++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index 3acb96fd..62913e45 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -6,6 +6,8 @@ import time import uuid +from ldclient.version import VERSION + class _DiagnosticAccumulator(object): def __init__(self, diagnostic_id): self.diagnostic_id = diagnostic_id @@ -81,7 +83,7 @@ def _create_diagnostic_config_object(config): def _create_diagnostic_sdk_object(config): return {'name': 'python-server-sdk', - 'version': 6, #VERSION, + 'version': VERSION, 'wrapperName': config.wrapper_name, 'wrapperVersion': config.wrapper_version} diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 7d88b64c..2045d5de 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -224,7 +224,7 @@ def __init__(self, capacity): def add_event(self, event): if len(self._events) >= self._capacity: - self._dropped_events = self._dropped_events + 1 + self._dropped_events += 1 if not self._exceeded_capacity: log.warning("Exceeded event queue capacity. Increase capacity to avoid dropping events.") self._exceeded_capacity = True @@ -236,9 +236,9 @@ def add_to_summary(self, event): self._summarizer.summarize_event(event) def get_and_clear_dropped_count(self): - ret = self._dropped_events + dropped_count = self._dropped_events self._dropped_events = 0 - return ret + return dropped_count def get_payload(self): return FlushPayload(self._events, self._summarizer.snapshot()) @@ -319,7 +319,7 @@ def _process_event(self, event): already_seen = self.notice_user(user) add_index_event = not is_index_event and not already_seen if not is_index_event and already_seen: - self._deduplicated_users = self._deduplicated_users + 1 + self._deduplicated_users += 1 if add_index_event: ie = { 'kind': 'index', 'creationDate': event['creationDate'], 'user': user } From 0375f70f81e7e6c44975946f5a63fb252cc1de06 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Tue, 31 Dec 2019 17:37:30 +0000 Subject: [PATCH 175/356] Last of diagnostic configuration object fields. --- ldclient/diagnostics.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index 62913e45..b8524e81 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -73,13 +73,11 @@ def _create_diagnostic_config_object(config): 'usingRelayDaemon': config.use_ldd, 'allAttributesPrivate': config.all_attributes_private, 'pollingIntervalMillis': config.poll_interval * 1000, - #'reconnectTimeMillis': check, 'userKeysCapacity': config.user_keys_capacity, 'userKeysFlushIntervalMillis': config.user_keys_flush_interval * 1000, 'inlineUsersInEvents': config.inline_users_in_events, 'diagnosticRecordingIntervalMillis': config.diagnostic_recording_interval * 1000, - #'featureStoreFactory': check, - } + 'featureStoreFactory': config.feature_store.__class__.__name__} def _create_diagnostic_sdk_object(config): return {'name': 'python-server-sdk', From 5f2ca11d95405f117e9b08b7ed59b6d076b14b4c Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Tue, 31 Dec 2019 17:55:19 +0000 Subject: [PATCH 176/356] Fill out rest of platform fields. --- ldclient/diagnostics.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index b8524e81..2c3a0437 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -5,6 +5,7 @@ import threading import time import uuid +import platform from ldclient.version import VERSION @@ -86,4 +87,9 @@ def _create_diagnostic_sdk_object(config): 'wrapperVersion': config.wrapper_version} def _create_diagnostic_platform_object(): - return {'name': 'python'} + return {'name': 'python', + 'osArch': platform.machine(), + 'osName': platform.system(), + 'osVersion': platform.release(), + 'pythonVersion': platform.python_version(), + 'pythonImplementation': platform.python_implementation()} From 49a4ea9f0a4cf265d9cf426d7a10aaf1edf437b0 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Tue, 31 Dec 2019 18:15:45 +0000 Subject: [PATCH 177/356] Cleanup and failed stream initialization tracking. --- ldclient/diagnostics.py | 4 ---- ldclient/event_processor.py | 7 +------ ldclient/streaming.py | 9 +++++++-- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index 2c3a0437..751356dd 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -1,7 +1,3 @@ -#DEFAULT_CONFIG = Config.default() -#DEFAULT_BASE_URI = DEFAULT_CONFIG.base_uri -#DEFAULT_EVENTS_URI = DEFAULT_CONFIG.events_uri -#DEFAULT_STREAM_BASE_URI = DEFAULT_CONFIG.stream_base_uri import threading import time import uuid diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 2045d5de..a89be59b 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -185,12 +185,7 @@ def __init__(self, http, config, event_body, response_fn): self._response_fn = response_fn def run_thread(self): - try: - Thread(target = self._do_send()).start() - except Exception: - log.warning( - 'Unhandled exception in event processor. Analytics events were not processed.', - exc_info=True) + Thread(target = self._do_send).start() def _do_send(self): # noinspection PyBroadException diff --git a/ldclient/streaming.py b/ldclient/streaming.py index 2016e2d3..c159571a 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -60,19 +60,24 @@ def run(self): if not self._running: break message_ok = self.process_message(self._store, self._requester, msg) - self._record_stream_init(False) - self._es_started = None + if message_ok: + self._record_stream_init(False) + self._es_started = None if message_ok is True and self._ready.is_set() is False: log.info("StreamingUpdateProcessor initialized ok.") self._ready.set() except UnsuccessfulResponseException as e: log.error(http_error_message(e.status, "stream connection")) + self._record_stream_init(True) + self._es_started = None if not is_http_error_recoverable(e.status): self._ready.set() # if client is initializing, make it stop waiting; has no effect if already inited self.stop() break except Exception as e: log.warning("Caught exception. Restarting stream connection after one second. %s" % e) + self._record_stream_init(True) + self._es_started = None # no stacktrace here because, for a typical connection error, it'll just be a lengthy tour of urllib3 internals time.sleep(1) From 6f9ca76855e162e7fc96d98ef09614738f800c08 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Tue, 31 Dec 2019 18:44:04 +0000 Subject: [PATCH 178/356] Add diagnostic config option test. --- testing/test_config.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/testing/test_config.py b/testing/test_config.py index a5f5e065..88add31c 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -15,9 +15,17 @@ def test_copy_config(): assert new_config.stream is False def test_can_set_valid_poll_interval(): - config = Config(sdk_key = "SDK_KEY", poll_interval = 31) - assert config.poll_interval == 31 + config = Config(sdk_key = "SDK_KEY", poll_interval = 31) + assert config.poll_interval == 31 def test_minimum_poll_interval_is_enforced(): - config = Config(sdk_key = "SDK_KEY", poll_interval = 29) - assert config.poll_interval == 30 + config = Config(sdk_key = "SDK_KEY", poll_interval = 29) + assert config.poll_interval == 30 + +def test_can_set_valid_diagnostic_interval(): + config = Config(sdk_key = "SDK_KEY", diagnostic_recording_interval=61) + assert config.diagnostic_recording_interval == 61 + +def test_minimum_diagnostic_interval_is_enforced(): + config = Config(sdk_key = "SDK_KEY", diagnostic_recording_interval=59) + assert config.diagnostic_recording_interval == 60 From aa703fb2ca73c21538e22db569b4f3f9fbd7d54c Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Tue, 31 Dec 2019 19:08:42 +0000 Subject: [PATCH 179/356] Add tests for diagnostics.py --- ldclient/diagnostics.py | 7 +++- testing/test_diagnostics.py | 76 +++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 testing/test_diagnostics.py diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index 751356dd..4ce593b2 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -1,3 +1,8 @@ +""" +Implementation details of the diagnostic event generation. +""" +# currently excluded from documentation - see docs/README.md + import threading import time import uuid @@ -32,7 +37,7 @@ def create_event_and_reset(self, dropped_events, deduplicated_users): current_time = int(time.time() * 1000) periodic_event = _diagnostic_base_fields('diagnostic', current_time, self.diagnostic_id) - periodic_event.update({'dataSincedate': self.data_since_date, + periodic_event.update({'dataSinceDate': self.data_since_date, 'droppedEvents': dropped_events, 'deduplicatedUsers': deduplicated_users, 'eventsInLastBatch': events_in_batch, diff --git a/testing/test_diagnostics.py b/testing/test_diagnostics.py new file mode 100644 index 00000000..77c49a2e --- /dev/null +++ b/testing/test_diagnostics.py @@ -0,0 +1,76 @@ +import json +import uuid + +from ldclient.config import Config +from ldclient.diagnostics import create_diagnostic_id, create_diagnostic_init, _DiagnosticAccumulator + +def test_create_diagnostic_id(): + test_config = Config(sdk_key = "SDK_KEY") + diag_id = create_diagnostic_id(test_config); + assert len(diag_id) == 2 + uid = diag_id['diagnosticId'] + # Will throw if invalid UUID4 + uuid.UUID('urn:uuid:' + uid) + assert diag_id['sdkKeySuffix'] == 'DK_KEY' + +def test_create_diagnostic_init(): + test_config = Config(sdk_key = "SDK_KEY", wrapper_name='django', wrapper_version = '5.1.1') + diag_id = create_diagnostic_id(test_config); + diag_init = create_diagnostic_init(100, diag_id, test_config) + assert len(diag_init) == 6 + assert diag_init['kind'] == 'diagnostic-init' + assert diag_init['id'] == diag_id + assert diag_init['creationDate'] == 100 + assert diag_init['sdk'] + assert diag_init['platform'] + assert diag_init['configuration'] + + # Verify converts to json without failure + json.dumps(diag_init) + +def test_diagnostic_accumulator(): + test_config = Config(sdk_key = "SDK_KEY") + diag_id = create_diagnostic_id(test_config); + diag_accum = _DiagnosticAccumulator(diag_id) + + # Test default periodic event + def_diag_event = diag_accum.create_event_and_reset(0, 0) + assert len(def_diag_event) == 8 + assert def_diag_event['kind'] == 'diagnostic' + assert def_diag_event['id'] == diag_id + assert def_diag_event['creationDate'] == diag_accum.data_since_date + assert def_diag_event['dataSinceDate'] + assert def_diag_event['droppedEvents'] == 0 + assert def_diag_event['deduplicatedUsers'] == 0 + assert def_diag_event['eventsInLastBatch'] == 0 + assert def_diag_event['streamInits'] == [] + + # Verify converts to json without failure + json.dumps(def_diag_event) + + # Test periodic event after recording values + diag_accum.record_stream_init(100, 100, False) + diag_accum.record_stream_init(300, 200, True) + diag_accum.record_events_in_batch(10) + diag_accum.record_events_in_batch(50) + diag_event = diag_accum.create_event_and_reset(10, 15) + assert len(diag_event) == 8 + assert diag_event['kind'] == 'diagnostic' + assert diag_event['id'] == diag_id + assert diag_event['creationDate'] == diag_accum.data_since_date + assert diag_event['dataSinceDate'] == def_diag_event['creationDate'] + assert diag_event['droppedEvents'] == 10 + assert diag_event['deduplicatedUsers'] == 15 + assert diag_event['eventsInLastBatch'] == 50 + assert diag_event['streamInits'] == [{'timestamp': 100, 'durationMillis': 100, 'failed': False}, + {'timestamp': 300, 'durationMillis': 200, 'failed': True}] + json.dumps(diag_event) + + reset_diag_event = diag_accum.create_event_and_reset(0, 0) + assert reset_diag_event['creationDate'] == diag_accum.data_since_date + assert reset_diag_event['dataSinceDate'] == diag_event['creationDate'] + del reset_diag_event['creationDate'] + del def_diag_event['creationDate'] + del reset_diag_event['dataSinceDate'] + del def_diag_event['dataSinceDate'] + assert reset_diag_event == def_diag_event From 18d73407ecb9cce40e3263d0728099fdc9a4cef0 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Tue, 31 Dec 2019 19:34:57 +0000 Subject: [PATCH 180/356] Testing rest of diagnostic fields. --- testing/test_diagnostics.py | 67 ++++++++++++++++++++++++++++++++++--- 1 file changed, 63 insertions(+), 4 deletions(-) diff --git a/testing/test_diagnostics.py b/testing/test_diagnostics.py index 77c49a2e..0a218094 100644 --- a/testing/test_diagnostics.py +++ b/testing/test_diagnostics.py @@ -2,7 +2,7 @@ import uuid from ldclient.config import Config -from ldclient.diagnostics import create_diagnostic_id, create_diagnostic_init, _DiagnosticAccumulator +from ldclient.diagnostics import create_diagnostic_id, create_diagnostic_init, _DiagnosticAccumulator, _create_diagnostic_config_object def test_create_diagnostic_id(): test_config = Config(sdk_key = "SDK_KEY") @@ -21,13 +21,72 @@ def test_create_diagnostic_init(): assert diag_init['kind'] == 'diagnostic-init' assert diag_init['id'] == diag_id assert diag_init['creationDate'] == 100 - assert diag_init['sdk'] - assert diag_init['platform'] - assert diag_init['configuration'] + + assert diag_init['sdk']['name'] == 'python-server-sdk' + assert diag_init['sdk']['version'] + assert diag_init['sdk']['wrapperName'] == 'django' + assert diag_init['sdk']['wrapperVersion'] == '5.1.1' + + assert len(diag_init['platform']) == 6 + assert diag_init['platform']['name'] == 'python' + assert all(x in diag_init['platform'].keys() for x in ['osArch', 'osName', 'osVersion', 'pythonVersion', 'pythonImplementation']) + + assert diag_init['configuration'] == _create_diagnostic_config_object(test_config) # Verify converts to json without failure json.dumps(diag_init) +def test_create_diagnostic_config_defaults(): + test_config = Config() + diag_config = _create_diagnostic_config_object(test_config) + + assert len(diag_config) == 17 + assert diag_config['customBaseURI'] is False + assert diag_config['customEventsURI'] is False + assert diag_config['customStreamURI'] is False + assert diag_config['eventsCapacity'] == 10000 + assert diag_config['connectTimeoutMillis'] == 10000 + assert diag_config['socketTimeoutMillis'] == 15000 + assert diag_config['eventsFlushIntervalMillis'] == 5000 + assert diag_config['usingProxy'] is False + assert diag_config['streamingDisabled'] is False + assert diag_config['usingRelayDaemon'] is False + assert diag_config['allAttributesPrivate'] is False + assert diag_config['pollingIntervalMillis'] == 30000 + assert diag_config['userKeysCapacity'] == 1000 + assert diag_config['userKeysFlushIntervalMillis'] == 300000 + assert diag_config['inlineUsersInEvents'] is False + assert diag_config['diagnosticRecordingIntervalMillis'] == 900000 + assert diag_config['featureStoreFactory'] == 'InMemoryFeatureStore' + +def test_create_diagnostic_config_custom(): + test_config = Config(base_uri='https://test.com', events_uri='https://test.com', + connect_timeout=1, read_timeout=1, events_max_pending=10, + flush_interval=1, stream_uri='https://test.com', + stream=False, poll_interval=60, use_ldd=True, feature_store = 5, + all_attributes_private=True, user_keys_capacity=10, user_keys_flush_interval=60, + inline_users_in_events=True, http_proxy='', diagnostic_recording_interval=60) + diag_config = _create_diagnostic_config_object(test_config) + + assert len(diag_config) == 17 + assert diag_config['customBaseURI'] is True + assert diag_config['customEventsURI'] is True + assert diag_config['customStreamURI'] is True + assert diag_config['eventsCapacity'] == 10 + assert diag_config['connectTimeoutMillis'] == 1000 + assert diag_config['socketTimeoutMillis'] == 1000 + assert diag_config['eventsFlushIntervalMillis'] == 1000 + assert diag_config['usingProxy'] is True + assert diag_config['streamingDisabled'] is True + assert diag_config['usingRelayDaemon'] is True + assert diag_config['allAttributesPrivate'] is True + assert diag_config['pollingIntervalMillis'] == 60000 + assert diag_config['userKeysCapacity'] == 10 + assert diag_config['userKeysFlushIntervalMillis'] == 60000 + assert diag_config['inlineUsersInEvents'] is True + assert diag_config['diagnosticRecordingIntervalMillis'] == 60000 + assert diag_config['featureStoreFactory'] == 'int' + def test_diagnostic_accumulator(): test_config = Config(sdk_key = "SDK_KEY") diag_id = create_diagnostic_id(test_config); From c6904c763991d90bb5ce99c84d856fedb62fb5bd Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Tue, 31 Dec 2019 21:35:29 +0000 Subject: [PATCH 181/356] Test that streaming update processor records successful and unsuccessful connection attempts in the diagnostic accumulator when available. --- testing/test_streaming.py | 48 ++++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/testing/test_streaming.py b/testing/test_streaming.py index 0adf6738..229248a7 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -1,6 +1,7 @@ from threading import Event from ldclient.config import Config +from ldclient.diagnostics import _DiagnosticAccumulator from ldclient.feature_store import InMemoryFeatureStore from ldclient.streaming import StreamingUpdateProcessor from ldclient.version import VERSION @@ -8,6 +9,7 @@ fake_event = 'event:put\ndata: {"data":{"flags":{},"segments":{}}}\n\n' +response_headers = { 'Content-Type': 'text/event-stream' } # Note that our simple HTTP stub server implementation does not actually do streaming responses, so # in these tests the connection will get closed after the response, causing the streaming processor @@ -20,7 +22,7 @@ def test_uses_stream_uri(): with start_server() as server: config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) - server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + server.setup_response('/all', 200, fake_event, response_headers) with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() @@ -35,7 +37,7 @@ def test_sends_headers(): with start_server() as server: config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) - server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + server.setup_response('/all', 200, fake_event, response_headers) with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() @@ -51,7 +53,7 @@ def test_sends_wrapper_header(): with start_server() as server: config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, wrapper_name = 'Flask', wrapper_version = '0.1.0') - server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + server.setup_response('/all', 200, fake_event, response_headers) with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() @@ -65,7 +67,7 @@ def test_sends_wrapper_header_without_version(): with start_server() as server: config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, wrapper_name = 'Flask') - server.setup_response('/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + server.setup_response('/all', 200, fake_event, response_headers) with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() @@ -97,7 +99,7 @@ def test_can_use_https_proxy_via_config(): def _verify_http_proxy_is_used(server, config): store = InMemoryFeatureStore() ready = Event() - server.setup_response(config.stream_base_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + server.setup_response(config.stream_base_uri + '/all', 200, fake_event, response_headers) with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the @@ -111,10 +113,44 @@ def _verify_http_proxy_is_used(server, config): def _verify_https_proxy_is_used(server, config): store = InMemoryFeatureStore() ready = Event() - server.setup_response(config.stream_base_uri + '/all', 200, fake_event, { 'Content-Type': 'text/event-stream' }) + server.setup_response(config.stream_base_uri + '/all', 200, fake_event, response_headers) with StreamingUpdateProcessor(config, None, store, ready, None) as sp: sp.start() # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but # it can still record that it *got* the request, which proves that the request went to the proxy. req = server.await_request() assert req.method == 'CONNECT' + +def test_records_diagnostic_on_stream_init_success(): + store = InMemoryFeatureStore() + ready = Event() + with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) + server.setup_response('/all', 200, fake_event, response_headers) + diag_accum = _DiagnosticAccumulator(1) + + with StreamingUpdateProcessor(config, None, store, ready, diag_accum) as sp: + sp.start() + server.await_request() + server.await_request() + recorded_inits = diag_accum.create_event_and_reset(0, 0)['streamInits'] + + assert len(recorded_inits) == 1 + assert recorded_inits[0]['failed'] is False + +def test_records_diagnostic_on_stream_init_failure(): + store = InMemoryFeatureStore() + ready = Event() + with start_server() as server: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) + server.setup_response('/all', 200, 'event:put\ndata: {\n\n', response_headers) + diag_accum = _DiagnosticAccumulator(1) + + with StreamingUpdateProcessor(config, None, store, ready, diag_accum) as sp: + sp.start() + server.await_request() + server.await_request() + recorded_inits = diag_accum.create_event_and_reset(0, 0)['streamInits'] + + assert len(recorded_inits) == 1 + assert recorded_inits[0]['failed'] is True From 0f9f65c7f1e90dcad1e2e6f2110cbe95cb0ca503 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Thu, 2 Jan 2020 18:02:52 +0000 Subject: [PATCH 182/356] Improvements to testability of event processor. --- ldclient/event_processor.py | 33 ++++++++++++++++++--------------- testing/test_event_processor.py | 6 ++++++ 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index a89be59b..69e1807e 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -178,16 +178,12 @@ def _do_send(self, output_events): class DiagnosticEventSendTask(object): - def __init__(self, http, config, event_body, response_fn): + def __init__(self, http, config, event_body): self._http = http self._config = config self._event_body = event_body - self._response_fn = response_fn - - def run_thread(self): - Thread(target = self._do_send).start() - def _do_send(self): + def run(self): # noinspection PyBroadException try: json_body = json.dumps(self._event_body) @@ -259,6 +255,13 @@ def __init__(self, inbox, config, http_client, diagnostic_accumulator=None): self._diagnostic_accumulator = diagnostic_accumulator self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__, "ldclient.flush") + self._diagnostic_flush_workers = FixedThreadPool(1, "ldclient.diag_flush") if not config.diagnostic_opt_out else None + if not config.diagnostic_opt_out: + init_event = create_diagnostic_init(diagnostic_accumulator.data_since_date, + diagnostic_accumulator.diagnostic_id, + config) + task = DiagnosticEventSendTask(self._http, self._config, init_event) + self._diagnostic_flush_workers.execute(task.run) self._main_thread = Thread(target=self._run_main_loop) self._main_thread.daemon = True @@ -279,6 +282,8 @@ def _run_main_loop(self): self._send_and_reset_diagnostics() elif message.type == 'test_sync': self._flush_workers.wait() + if not self._config.diagnostic_opt_out: + self._diagnostic_flush_workers.wait() message.param.set() elif message.type == 'stop': self._do_shutdown() @@ -372,11 +377,12 @@ def _handle_response(self, r): return def _send_and_reset_diagnostics(self): - if self._diagnostic_accumulator: + if not self._config.diagnostic_opt_out: dropped_event_count = self._outbox.get_and_clear_dropped_count() stats_event = self._diagnostic_accumulator.create_event_and_reset(dropped_event_count, self._deduplicated_users) self._deduplicated_users = 0 - DiagnosticEventSendTask(self._http, self._config, stats_event, None).run_thread() + task = DiagnosticEventSendTask(self._http, self._config, stats_event) + self._diagnostic_flush_workers.execute(task.run) def _do_shutdown(self): self._flush_workers.stop() @@ -393,24 +399,19 @@ def __init__(self, config, http=None, dispatcher_class=None): self._users_flush_timer = RepeatingTimer(config.user_keys_flush_interval, self._flush_users) self._flush_timer.start() self._users_flush_timer.start() - self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl, - target_base_uri=config.events_uri, - force_proxy=config.http_proxy) if http is None else http if not config.diagnostic_opt_out: diagnostic_id = create_diagnostic_id(config) self._diagnostic_accumulator = _DiagnosticAccumulator(diagnostic_id) - init_event = create_diagnostic_init(self._diagnostic_accumulator.data_since_date, diagnostic_id, config) - DiagnosticEventSendTask(self._http, config, init_event, None).run_thread() - self._diagnostic_event_timer = RepeatingTimer(config.diagnostic_recording_interval, self._send_diagnostic) self._diagnostic_event_timer.start() else: + self._diagnostic_event_timer = None self._diagnostic_accumulator = None self._close_lock = Lock() self._closed = False - (dispatcher_class or EventDispatcher)(self._inbox, config, self._http, self._diagnostic_accumulator) + (dispatcher_class or EventDispatcher)(self._inbox, config, http, self._diagnostic_accumulator) def send_event(self, event): event['creationDate'] = int(time.time() * 1000) @@ -426,6 +427,8 @@ def stop(self): self._closed = True self._flush_timer.stop() self._users_flush_timer.stop() + if self._diagnostic_event_timer: + self._diagnostic_event_timer.stop() self.flush() # Note that here we are not calling _post_to_inbox, because we *do* want to wait if the inbox # is full; an orderly shutdown can't happen unless these messages are received. diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index d6641471..b6fe5024 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -449,6 +449,12 @@ def test_wrapper_header_sent_without_version(): assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') == "Flask" +def test_sdk_key_is_sent_on_diagnostic_request(): + with DefaultTestProcessor(sdk_key = 'SDK_KEY', diagnostic_opt_out=False) as ep: + ep._wait_until_inactive() + + assert mock_http.request_headers.get('Authorization') == 'SDK_KEY' + def test_no_more_payloads_are_sent_after_401_error(): verify_unrecoverable_http_error(401) From 689b231752f8a62ba13432735fa8a2df7f6af477 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Thu, 2 Jan 2020 21:14:28 +0000 Subject: [PATCH 183/356] Rest of event processor tests. --- testing/test_event_processor.py | 57 ++++++++++++++++++++++++++++++++- testing/test_streaming.py | 1 - 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index b6fe5024..b015433b 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -449,12 +449,67 @@ def test_wrapper_header_sent_without_version(): assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') == "Flask" +def test_event_schema_set_on_event_send(): + with DefaultTestProcessor() as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + assert mock_http.request_headers.get('X-LaunchDarkly-Event-Schema') == "3" + def test_sdk_key_is_sent_on_diagnostic_request(): with DefaultTestProcessor(sdk_key = 'SDK_KEY', diagnostic_opt_out=False) as ep: ep._wait_until_inactive() - assert mock_http.request_headers.get('Authorization') == 'SDK_KEY' +def test_event_schema_not_set_on_diagnostic_send(): + with DefaultTestProcessor(diagnostic_opt_out=False) as ep: + ep._wait_until_inactive() + assert mock_http.request_headers.get('X-LaunchDarkly-Event-Schema') is None + +def test_init_diagnostic_event_sent(): + with DefaultTestProcessor(diagnostic_opt_out=False) as ep: + diag_init = flush_and_get_events(ep) + # Fields are tested in test_diagnostics.py + assert len(diag_init) == 6 + assert diag_init['kind'] == 'diagnostic-init' + +def test_periodic_diagnostic_includes_events_in_batch(): + with DefaultTestProcessor(diagnostic_opt_out=False) as ep: + # Ignore init event + flush_and_get_events(ep) + # Send a payload with a single event + ep.send_event({ 'kind': 'identify', 'user': user }) + flush_and_get_events(ep) + + ep._send_diagnostic() + diag_event = flush_and_get_events(ep) + assert len(diag_event) == 8 + assert diag_event['kind'] == 'diagnostic' + assert diag_event['eventsInLastBatch'] == 1 + assert diag_event['deduplicatedUsers'] == 0 + +def test_periodic_diagnostic_includes_deduplicated_users(): + with DefaultTestProcessor(diagnostic_opt_out=False) as ep: + # Ignore init event + flush_and_get_events(ep) + # Send two eval events with the same user to cause a user deduplication + e0 = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + } + e1 = e0.copy(); + ep.send_event(e0) + ep.send_event(e1) + flush_and_get_events(ep) + + ep._send_diagnostic() + diag_event = flush_and_get_events(ep) + assert len(diag_event) == 8 + assert diag_event['kind'] == 'diagnostic' + assert diag_event['eventsInLastBatch'] == 3 + assert diag_event['deduplicatedUsers'] == 1 + def test_no_more_payloads_are_sent_after_401_error(): verify_unrecoverable_http_error(401) diff --git a/testing/test_streaming.py b/testing/test_streaming.py index 229248a7..3f6c166d 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -152,5 +152,4 @@ def test_records_diagnostic_on_stream_init_failure(): server.await_request() recorded_inits = diag_accum.create_event_and_reset(0, 0)['streamInits'] - assert len(recorded_inits) == 1 assert recorded_inits[0]['failed'] is True From 08740f15a8bd797084ca96d4da7b70b6d8985fe9 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Mon, 6 Jan 2020 23:03:20 +0000 Subject: [PATCH 184/356] Remove janky reflection. --- ldclient/client.py | 22 +++++++++++++--------- ldclient/config.py | 3 +-- ldclient/event_processor.py | 12 +++--------- testing/test_event_processor.py | 13 ++++++++----- 4 files changed, 25 insertions(+), 25 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index c51b2b53..b235aa3a 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -8,6 +8,8 @@ import traceback from ldclient.config import Config as Config +from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator +from ldclient.event_processor import DefaultEventProcessor from ldclient.feature_requester import FeatureRequesterImpl from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.flag import EvaluationDetail, evaluate, error_reason @@ -103,12 +105,7 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): if self._config.use_ldd: log.info("Started LaunchDarkly Client in LDD mode") - self._event_processor = self._make_event_processor(self._config) - - if callable(getattr(self._event_processor, 'retrieve_diagnostic_accumulator', None)): - diagnostic_accumulator = self._event_processor.retrieve_diagnostic_accumulator() - else: - diagnostic_accumulator = None + diagnostic_accumulator = self._set_event_processor(self._config) update_processor_ready = threading.Event() self._update_processor = self._make_update_processor(self._config, self._store, update_processor_ready, diagnostic_accumulator) @@ -124,10 +121,17 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): log.warning("Initialization timeout exceeded for LaunchDarkly Client or an error occurred. " "Feature Flags may not yet be available.") - def _make_event_processor(self, config): + def _set_event_processor(self, config): if config.offline or not config.send_events: - return NullEventProcessor() - return config.event_processor_class(config) + self._event_processor = NullEventProcessor() + return None + if not config.event_processor_class: + diagnostic_id = create_diagnostic_id(config) + diagnostic_accumulator = _DiagnosticAccumulator(diagnostic_id) + self._event_processor = DefaultEventProcessor(config, diagnostic_accumulator = diagnostic_accumulator) + return diagnostic_accumulator + self._event_processor = config.event_processor_class(config) + return None def _make_update_processor(self, config, store, ready, diagnostic_accumulator): if config.update_processor_class: diff --git a/ldclient/config.py b/ldclient/config.py index 8b1ee411..6fec9865 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -4,7 +4,6 @@ Note that the same class can also be imported from the ``ldclient.client`` submodule. """ -from ldclient.event_processor import DefaultEventProcessor from ldclient.feature_store import InMemoryFeatureStore from ldclient.util import log @@ -133,7 +132,7 @@ def __init__(self, self.__poll_interval = max(poll_interval, 30) self.__use_ldd = use_ldd self.__feature_store = InMemoryFeatureStore() if not feature_store else feature_store - self.__event_processor_class = DefaultEventProcessor if not event_processor_class else event_processor_class + self.__event_processor_class = event_processor_class self.__feature_requester_class = feature_requester_class self.__connect_timeout = connect_timeout self.__read_timeout = read_timeout diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 69e1807e..259224dd 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -30,7 +30,7 @@ from ldclient.util import create_http_pool_manager from ldclient.util import log from ldclient.util import http_error_message, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response -from ldclient.diagnostics import create_diagnostic_init, create_diagnostic_id, _DiagnosticAccumulator +from ldclient.diagnostics import create_diagnostic_init __MAX_FLUSH_THREADS__ = 5 __CURRENT_EVENT_SCHEMA__ = 3 @@ -392,7 +392,7 @@ def _do_shutdown(self): class DefaultEventProcessor(EventProcessor): - def __init__(self, config, http=None, dispatcher_class=None): + def __init__(self, config, http=None, dispatcher_class=None, diagnostic_accumulator=None): self._inbox = queue.Queue(config.events_max_pending) self._inbox_full = False self._flush_timer = RepeatingTimer(config.flush_interval, self.flush) @@ -400,18 +400,15 @@ def __init__(self, config, http=None, dispatcher_class=None): self._flush_timer.start() self._users_flush_timer.start() if not config.diagnostic_opt_out: - diagnostic_id = create_diagnostic_id(config) - self._diagnostic_accumulator = _DiagnosticAccumulator(diagnostic_id) self._diagnostic_event_timer = RepeatingTimer(config.diagnostic_recording_interval, self._send_diagnostic) self._diagnostic_event_timer.start() else: self._diagnostic_event_timer = None - self._diagnostic_accumulator = None self._close_lock = Lock() self._closed = False - (dispatcher_class or EventDispatcher)(self._inbox, config, http, self._diagnostic_accumulator) + (dispatcher_class or EventDispatcher)(self._inbox, config, http, diagnostic_accumulator) def send_event(self, event): event['creationDate'] = int(time.time() * 1000) @@ -434,9 +431,6 @@ def stop(self): # is full; an orderly shutdown can't happen unless these messages are received. self._post_message_and_wait('stop') - def retrieve_diagnostic_accumulator(self): - return self._diagnostic_accumulator - def _post_to_inbox(self, message): try: self._inbox.put(message, block=False) diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index b015433b..ef47ceaf 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -4,6 +4,7 @@ import time from ldclient.config import Config +from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.event_processor import DefaultEventProcessor from ldclient.util import log from testing.http_util import start_server @@ -66,7 +67,9 @@ class DefaultTestProcessor(DefaultEventProcessor): def __init__(self, **kwargs): if not 'diagnostic_opt_out' in kwargs: kwargs['diagnostic_opt_out'] = True - DefaultEventProcessor.__init__(self, Config(**kwargs), mock_http) + config = Config(**kwargs) + diagnostic_accumulator = _DiagnosticAccumulator(create_diagnostic_id(config)) + DefaultEventProcessor.__init__(self, config, mock_http, diagnostic_accumulator = diagnostic_accumulator) def test_identify_event_is_queued(): with DefaultTestProcessor() as ep: @@ -557,23 +560,23 @@ def start_consuming_events(): def test_can_use_http_proxy_via_environment_var(monkeypatch): with start_server() as server: monkeypatch.setenv('http_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real') + config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real', diagnostic_opt_out = True) _verify_http_proxy_is_used(server, config) def test_can_use_https_proxy_via_environment_var(monkeypatch): with start_server() as server: monkeypatch.setenv('https_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real') + config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real', diagnostic_opt_out = True) _verify_https_proxy_is_used(server, config) def test_can_use_http_proxy_via_config(): with start_server() as server: - config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real', http_proxy=server.uri) + config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real', http_proxy=server.uri, diagnostic_opt_out = True) _verify_http_proxy_is_used(server, config) def test_can_use_https_proxy_via_config(): with start_server() as server: - config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real', http_proxy=server.uri) + config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real', http_proxy=server.uri, diagnostic_opt_out = True) _verify_https_proxy_is_used(server, config) def _verify_http_proxy_is_used(server, config): From a26d4588236e905ec98d9ae09deb1d935460805f Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Mon, 6 Jan 2020 23:13:22 +0000 Subject: [PATCH 185/356] Test change to filesource optional test requirements. --- test-filesource-optional-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test-filesource-optional-requirements.txt b/test-filesource-optional-requirements.txt index e0a0e284..40e04279 100644 --- a/test-filesource-optional-requirements.txt +++ b/test-filesource-optional-requirements.txt @@ -1,2 +1,2 @@ -pyyaml>=3.0 +pyyaml>=3.0,<5.2 watchdog>=0.9 From ef256a58e5171fe6210cec994798d28d0356cdd5 Mon Sep 17 00:00:00 2001 From: Gavin Whelan Date: Fri, 17 Jan 2020 11:56:41 +0000 Subject: [PATCH 186/356] [ch61092] Add event payload ID on event requests. --- ldclient/event_processor.py | 18 ++++++++++++++-- ldclient/util.py | 3 ++- test-filesource-optional-requirements.txt | 2 +- testing/stub_util.py | 25 ++++++++++++++++------ testing/test_event_processor.py | 26 +++++++++++++++++++++++ 5 files changed, 63 insertions(+), 11 deletions(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 93680c13..6e3baab2 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -11,6 +11,7 @@ import six import time import urllib3 +import uuid # noinspection PyBroadException try: @@ -26,7 +27,7 @@ from ldclient.interfaces import EventProcessor from ldclient.repeating_timer import RepeatingTimer from ldclient.util import UnsuccessfulResponseException -from ldclient.util import _headers +from ldclient.util import _headers, _retryable_statuses from ldclient.util import create_http_pool_manager from ldclient.util import log from ldclient.util import http_error_message, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response @@ -140,6 +141,18 @@ def _get_userkey(self, event): return str(event['user'].get('key')) +class _EventRetry(urllib3.Retry): + def __init__(self): + urllib3.Retry.__init__(self, total=1, + method_whitelist=False, # Enable retry on POST + status_forcelist=_retryable_statuses, + raise_on_status=False) + + # Override backoff time to be flat 1 second + def get_backoff_time(self): + return 1 + + class EventPayloadSendTask(object): def __init__(self, http, config, formatter, payload, response_fn): self._http = http @@ -164,12 +177,13 @@ def _do_send(self, output_events): log.debug('Sending events payload: ' + json_body) hdrs = _headers(self._config.sdk_key) hdrs['X-LaunchDarkly-Event-Schema'] = str(__CURRENT_EVENT_SCHEMA__) + hdrs['X-LaunchDarkly-Payload-ID'] = str(uuid.uuid4()) uri = self._config.events_uri r = self._http.request('POST', uri, headers=hdrs, timeout=urllib3.Timeout(connect=self._config.connect_timeout, read=self._config.read_timeout), body=json_body, - retries=1) + retries=_EventRetry()) self._response_fn(r) return r except Exception as e: diff --git a/ldclient/util.py b/ldclient/util.py index 1d059798..c19190f2 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -37,6 +37,7 @@ # noinspection PyUnresolvedReferences __BASE_TYPES__ = (str, float, int, bool, unicode) +_retryable_statuses = [400, 408, 429] def _headers(sdk_key): return {'Authorization': sdk_key, 'User-Agent': 'PythonClient/' + VERSION, @@ -124,7 +125,7 @@ def throw_if_unsuccessful_response(resp): def is_http_error_recoverable(status): if status >= 400 and status < 500: - return (status == 400) or (status == 408) or (status == 429) # all other 4xx besides these are unrecoverable + return status in _retryable_statuses # all other 4xx besides these are unrecoverable return True # all other errors are recoverable diff --git a/test-filesource-optional-requirements.txt b/test-filesource-optional-requirements.txt index e0a0e284..40e04279 100644 --- a/test-filesource-optional-requirements.txt +++ b/test-filesource-optional-requirements.txt @@ -1,2 +1,2 @@ -pyyaml>=3.0 +pyyaml>=3.0,<5.2 watchdog>=0.9 diff --git a/testing/stub_util.py b/testing/stub_util.py index 80e53af6..41970edf 100644 --- a/testing/stub_util.py +++ b/testing/stub_util.py @@ -53,17 +53,20 @@ def getheader(self, name): class MockHttp(object): def __init__(self): + self._recorded_requests = [] self._request_data = None self._request_headers = None + self._response_func = None self._response_status = 200 self._server_time = None def request(self, method, uri, headers, timeout, body, retries): - self._request_headers = headers - self._request_data = body + self._recorded_requests.append((headers, body)) resp_hdr = dict() if self._server_time is not None: resp_hdr['date'] = formatdate(self._server_time / 1000, localtime=False, usegmt=True) + if self._response_func is not None: + return self._response_func() return MockResponse(self._response_status, resp_hdr) def clear(self): @@ -71,21 +74,29 @@ def clear(self): @property def request_data(self): - return self._request_data + if len(self._recorded_requests) != 0: + return self._recorded_requests[-1][1] @property def request_headers(self): - return self._request_headers + if len(self._recorded_requests) != 0: + return self._recorded_requests[-1][0] + + @property + def recorded_requests(self): + return self._recorded_requests def set_response_status(self, status): self._response_status = status - + + def set_response_func(self, response_func): + self._response_func = response_func + def set_server_time(self, timestamp): self._server_time = timestamp def reset(self): - self._request_headers = None - self._request_data = None + self._recorded_requests = [] class MockUpdateProcessor(UpdateProcessor): def __init__(self, config, store, ready): diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 9ef1b4f8..598038b2 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -2,6 +2,7 @@ import pytest from threading import Thread import time +import uuid from ldclient.config import Config from ldclient.event_processor import DefaultEventProcessor @@ -541,6 +542,31 @@ def verify_recoverable_http_error(status): ep._wait_until_inactive() assert mock_http.request_data is not None +def test_event_payload_id_is_sent(): + with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + headerVal = mock_http.request_headers.get('X-LaunchDarkly-Payload-ID') + assert headerVal is not None + # Throws on invalid UUID + uuid.UUID(headerVal) + +def test_event_payload_id_changes_between_requests(): + with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + firstPayloadId = mock_http.recorded_requests[0][0].get('X-LaunchDarkly-Payload-ID') + secondPayloadId = mock_http.recorded_requests[1][0].get('X-LaunchDarkly-Payload-ID') + assert firstPayloadId != secondPayloadId + def flush_and_get_events(ep): ep.flush() ep._wait_until_inactive() From 3a525e32945f4dee699d5670578997f0bcc42b1e Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 10 Feb 2020 18:10:14 -0800 Subject: [PATCH 187/356] normalize data store type and OS name in diagnostic events --- ldclient/diagnostics.py | 17 +++++++++++++++-- ldclient/feature_store.py | 7 +++++-- ldclient/feature_store_helpers.py | 9 +++++++-- .../integrations/consul/consul_feature_store.py | 7 +++++-- .../dynamodb/dynamodb_feature_store.py | 5 ++++- .../integrations/redis/redis_feature_store.py | 7 +++++-- ldclient/interfaces.py | 16 ++++++++++++++++ testing/test_diagnostics.py | 13 ++++++++++--- 8 files changed, 67 insertions(+), 14 deletions(-) diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index 4ce593b2..2890ca3a 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -79,7 +79,7 @@ def _create_diagnostic_config_object(config): 'userKeysFlushIntervalMillis': config.user_keys_flush_interval * 1000, 'inlineUsersInEvents': config.inline_users_in_events, 'diagnosticRecordingIntervalMillis': config.diagnostic_recording_interval * 1000, - 'featureStoreFactory': config.feature_store.__class__.__name__} + 'dataStoreType': _get_component_type_name(config.feature_store, config, 'memory')} def _create_diagnostic_sdk_object(config): return {'name': 'python-server-sdk', @@ -90,7 +90,20 @@ def _create_diagnostic_sdk_object(config): def _create_diagnostic_platform_object(): return {'name': 'python', 'osArch': platform.machine(), - 'osName': platform.system(), + 'osName': _normalize_os_name(platform.system()), 'osVersion': platform.release(), 'pythonVersion': platform.python_version(), 'pythonImplementation': platform.python_implementation()} + +def _get_component_type_name(component, config, default_name): + if component is not None: + if callable(getattr(component, 'describe_configuration', None)): + return component.describe_configuration(config) + return "custom" + return default_name + +def _normalize_os_name(name): + if name == 'Darwin': + return 'MacOS' + # Python already returns 'Linux' or 'Windows' for Linux or Windows, which is what we want + return name diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index efabe82e..501d8667 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -8,7 +8,7 @@ from collections import OrderedDict, defaultdict from ldclient.util import log -from ldclient.interfaces import FeatureStore +from ldclient.interfaces import DiagnosticDescription, FeatureStore from ldclient.rwlock import ReadWriteLock from six import iteritems @@ -75,7 +75,7 @@ def capacity(self): return self._capacity -class InMemoryFeatureStore(FeatureStore): +class InMemoryFeatureStore(FeatureStore, DiagnosticDescription): """The default feature store implementation, which holds all data in a thread-safe data structure in memory. """ @@ -163,6 +163,9 @@ def initialized(self): return self._initialized finally: self._lock.runlock() + + def describe_configuration(self, config): + return 'memory' class _FeatureStoreDataSetSorter: diff --git a/ldclient/feature_store_helpers.py b/ldclient/feature_store_helpers.py index 58f9a848..0f371f7b 100644 --- a/ldclient/feature_store_helpers.py +++ b/ldclient/feature_store_helpers.py @@ -4,10 +4,10 @@ from expiringdict import ExpiringDict -from ldclient.interfaces import FeatureStore +from ldclient.interfaces import DiagnosticDescription, FeatureStore -class CachingStoreWrapper(FeatureStore): +class CachingStoreWrapper(DiagnosticDescription, FeatureStore): """A partial implementation of :class:`ldclient.interfaces.FeatureStore`. This class delegates the basic functionality to an implementation of @@ -100,6 +100,11 @@ def initialized(self): self._inited = True return result + def describe_configuration(self, config): + if callable(getattr(self._core, 'describe_configuration', None)): + return self._core.describe_configuration(config) + return "custom" + @staticmethod def _item_cache_key(kind, key): return "{0}:{1}".format(kind.namespace, key) diff --git a/ldclient/impl/integrations/consul/consul_feature_store.py b/ldclient/impl/integrations/consul/consul_feature_store.py index 6fc8652e..497828a3 100644 --- a/ldclient/impl/integrations/consul/consul_feature_store.py +++ b/ldclient/impl/integrations/consul/consul_feature_store.py @@ -10,7 +10,7 @@ from ldclient import log from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper -from ldclient.interfaces import FeatureStore, FeatureStoreCore +from ldclient.interfaces import DiagnosticDescription, FeatureStore, FeatureStoreCore # # Internal implementation of the Consul feature store. @@ -33,7 +33,7 @@ # process that did the Init will also receive the new data shortly and do its own Upsert. # -class _ConsulFeatureStoreCore(FeatureStoreCore): +class _ConsulFeatureStoreCore(DiagnosticDescription, FeatureStoreCore): def __init__(self, host, port, prefix, consul_opts): if not have_consul: raise NotImplementedError("Cannot use Consul feature store because the python-consul package is not installed") @@ -115,6 +115,9 @@ def initialized_internal(self): index, resp = self._client.kv.get(self._inited_key()) return (resp is not None) + def describe_configuration(self, config): + return 'Consul' + def _kind_key(self, kind): return self._prefix + kind.namespace diff --git a/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py b/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py index 23ca3fce..79842ef6 100644 --- a/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py +++ b/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py @@ -10,7 +10,7 @@ from ldclient import log from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper -from ldclient.interfaces import FeatureStore, FeatureStoreCore +from ldclient.interfaces import DiagnosticDescription, FeatureStore, FeatureStoreCore # # Internal implementation of the DynamoDB feature store. @@ -120,6 +120,9 @@ def initialized_internal(self): resp = self._get_item_by_keys(self._inited_key(), self._inited_key()) return resp.get('Item') is not None and len(resp['Item']) > 0 + def describe_configuration(self, config): + return 'DynamoDB' + def _prefixed_namespace(self, base): return base if self._prefix is None else (self._prefix + ':' + base) diff --git a/ldclient/impl/integrations/redis/redis_feature_store.py b/ldclient/impl/integrations/redis/redis_feature_store.py index a23c2d66..eebe205d 100644 --- a/ldclient/impl/integrations/redis/redis_feature_store.py +++ b/ldclient/impl/integrations/redis/redis_feature_store.py @@ -8,11 +8,11 @@ pass from ldclient import log -from ldclient.interfaces import FeatureStoreCore +from ldclient.interfaces import DiagnosticDescription, FeatureStoreCore from ldclient.versioned_data_kind import FEATURES -class _RedisFeatureStoreCore(FeatureStoreCore): +class _RedisFeatureStoreCore(DiagnosticDescription, FeatureStoreCore): def __init__(self, url, prefix, max_connections): if not have_redis: raise NotImplementedError("Cannot use Redis feature store because redis package is not installed") @@ -96,6 +96,9 @@ def initialized_internal(self): r = redis.Redis(connection_pool=self._pool) return r.exists(self._items_key(FEATURES)) + def describe_configuration(self, config): + return 'Redis' + def _before_update_transaction(self, base_key, key): # exposed for testing pass diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 48c517b8..1a319494 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -269,3 +269,19 @@ def get_one(self, kind, key): :return: """ pass + + +class DiagnosticDescription(object): + """ + Optional interface for components to describe their own configuration. + """ + + @abstractmethod + def describe_configuration(self, config): + """ + Used internally by the SDK to inspect the configuration. + :param ldclient.config.Config config: the full configuration, in case this component depends on properties outside itself + :return: a string describing the type of the component, or None + :rtype: string + """ + pass diff --git a/testing/test_diagnostics.py b/testing/test_diagnostics.py index 0a218094..8bff0055 100644 --- a/testing/test_diagnostics.py +++ b/testing/test_diagnostics.py @@ -3,6 +3,8 @@ from ldclient.config import Config from ldclient.diagnostics import create_diagnostic_id, create_diagnostic_init, _DiagnosticAccumulator, _create_diagnostic_config_object +from ldclient.feature_store import CacheConfig +from ldclient.feature_store_helpers import CachingStoreWrapper def test_create_diagnostic_id(): test_config = Config(sdk_key = "SDK_KEY") @@ -57,13 +59,14 @@ def test_create_diagnostic_config_defaults(): assert diag_config['userKeysFlushIntervalMillis'] == 300000 assert diag_config['inlineUsersInEvents'] is False assert diag_config['diagnosticRecordingIntervalMillis'] == 900000 - assert diag_config['featureStoreFactory'] == 'InMemoryFeatureStore' + assert diag_config['dataStoreType'] == 'memory' def test_create_diagnostic_config_custom(): + test_store = CachingStoreWrapper(_TestStoreForDiagnostics(), CacheConfig.default()) test_config = Config(base_uri='https://test.com', events_uri='https://test.com', connect_timeout=1, read_timeout=1, events_max_pending=10, flush_interval=1, stream_uri='https://test.com', - stream=False, poll_interval=60, use_ldd=True, feature_store = 5, + stream=False, poll_interval=60, use_ldd=True, feature_store=test_store, all_attributes_private=True, user_keys_capacity=10, user_keys_flush_interval=60, inline_users_in_events=True, http_proxy='', diagnostic_recording_interval=60) diag_config = _create_diagnostic_config_object(test_config) @@ -85,7 +88,11 @@ def test_create_diagnostic_config_custom(): assert diag_config['userKeysFlushIntervalMillis'] == 60000 assert diag_config['inlineUsersInEvents'] is True assert diag_config['diagnosticRecordingIntervalMillis'] == 60000 - assert diag_config['featureStoreFactory'] == 'int' + assert diag_config['dataStoreType'] == 'MyFavoriteStore' + +class _TestStoreForDiagnostics(object): + def describe_configuration(self, config): + return 'MyFavoriteStore' def test_diagnostic_accumulator(): test_config = Config(sdk_key = "SDK_KEY") From 425dceb377699d2db3d4234ef1174c19c9c65bae Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 10 Feb 2020 18:15:29 -0800 Subject: [PATCH 188/356] gitignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index c949312e..f0def2a6 100644 --- a/.gitignore +++ b/.gitignore @@ -67,3 +67,5 @@ p2venv *.iml .vagrant test-packaging-venv + +.vscode/ From 27fb9a7509f27b3174485345946d85da288527d8 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 11 Feb 2020 12:36:01 -0800 Subject: [PATCH 189/356] copyedit to diagnostic event config property comment --- ldclient/config.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ldclient/config.py b/ldclient/config.py index 6fec9865..ae2e522c 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -104,11 +104,11 @@ def __init__(self, variable, this is used regardless of whether the target URI is HTTP or HTTPS (the actual LaunchDarkly service uses HTTPS, but a Relay Proxy instance could use HTTP). Setting this Config parameter will override any proxy specified by an environment variable, but only for LaunchDarkly SDK connections. - :param bool diagnostic_opt_out: Unless the diagnosticOptOut field is set to True, the client will send + :param bool diagnostic_opt_out: Unless this field is set to True, the client will send some diagnostics data to the LaunchDarkly servers in order to assist in the development of future SDK improvements. These diagnostics consist of an initial payload containing some details of SDK in use, - the SDK's configuration, and the platform the SDK is being run on; as well as payloads sent - periodically with information on irregular occurrences such as dropped events. + the SDK's configuration, and the platform the SDK is being run on, as well as periodic information + on irregular occurrences such as dropped events. :param int diagnostic_recording_interval: The interval in seconds at which periodic diagnostic data is sent. The default is 900 seconds (every 15 minutes) and the minimum value is 60 seconds. :param string wrapper_name: For use by wrapper libraries to set an identifying name for the wrapper From 13ddc54956046889f43927f896351d77a0c4c258 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 12 Feb 2020 12:45:00 -0800 Subject: [PATCH 190/356] fix spurious error after sending diagnostic event --- ldclient/event_processor.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 6bc19f7d..5c3457c1 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -209,8 +209,6 @@ def run(self): timeout=urllib3.Timeout(connect=self._config.connect_timeout, read=self._config.read_timeout), body=json_body, retries=1) - if (self._response_fn): - self._response_fn(r) except Exception as e: log.warning( 'Unhandled exception in event processor. Diagnostic event was not sent. [%s]', e) From ad248d653f57d6fe102bf73c2953030389be5bdd Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 19 Mar 2020 16:34:32 -0700 Subject: [PATCH 191/356] make verify_ssl=False turn off certificate verification too (#129) --- ldclient/util.py | 17 +++++++---------- testing/http_util.py | 19 ++++++++++++++++--- testing/selfsigned.key | 5 +++++ testing/selfsigned.pem | 10 ++++++++++ testing/test_ldclient_tls.py | 35 +++++++++++++++++++++++++++++++++++ 5 files changed, 73 insertions(+), 13 deletions(-) create mode 100644 testing/selfsigned.key create mode 100644 testing/selfsigned.pem create mode 100644 testing/test_ldclient_tls.py diff --git a/ldclient/util.py b/ldclient/util.py index 27863f11..c6ad2ba7 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -98,24 +98,21 @@ def status(self): def create_http_pool_manager(num_pools=1, verify_ssl=False, target_base_uri=None, force_proxy=None): proxy_url = force_proxy or _get_proxy_url(target_base_uri) - if not verify_ssl: - if proxy_url is None: - return urllib3.PoolManager(num_pools=num_pools) - else: - return urllib3.ProxyManager(proxy_url, num_pools=num_pools) - + cert_reqs = 'CERT_REQUIRED' if verify_ssl else 'CERT_NONE' + ca_certs = certifi.where() if verify_ssl else None + if proxy_url is None: return urllib3.PoolManager( num_pools=num_pools, - cert_reqs='CERT_REQUIRED', - ca_certs=certifi.where() + cert_reqs=cert_reqs, + ca_certs=ca_certs ) else: return urllib3.ProxyManager( proxy_url, num_pools=num_pools, - cert_reqs='CERT_REQUIRED', - ca_certs=certifi.where() + cert_reqs=cert_reqs, + ca_certs = ca_certs ) def _get_proxy_url(target_base_uri): diff --git a/testing/http_util.py b/testing/http_util.py index a232f9e0..24ae91c8 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -2,6 +2,7 @@ from six import iteritems from six.moves import BaseHTTPServer, queue import socket +import ssl from threading import Thread def get_available_port(): @@ -12,16 +13,28 @@ def get_available_port(): return port def start_server(): - sw = MockServerWrapper(get_available_port()) + sw = MockServerWrapper(get_available_port(), False) + sw.start() + return sw + +def start_secure_server(): + sw = MockServerWrapper(get_available_port(), True) sw.start() return sw class MockServerWrapper(Thread): - def __init__(self, port): + def __init__(self, port, secure): Thread.__init__(self) self.port = port - self.uri = 'http://localhost:%d' % port + self.uri = '%s://localhost:%d' % ('https' if secure else 'http', port) self.server = BaseHTTPServer.HTTPServer(('localhost', port), MockServerRequestHandler) + if secure: + self.server.socket = ssl.wrap_socket( + self.server.socket, + certfile='./testing/selfsigned.pem', # this is a pre-generated self-signed cert that is valid for 100 years + keyfile='./testing/selfsigned.key', + server_side=True + ) self.server.server_wrapper = self self.matchers = {} self.requests = queue.Queue() diff --git a/testing/selfsigned.key b/testing/selfsigned.key new file mode 100644 index 00000000..499c501a --- /dev/null +++ b/testing/selfsigned.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIIWkym77UXCR7NludcOuJyUc+KwjcWhNstarQewjH/4ZoAoGCCqGSM49 +AwEHoUQDQgAELb4Nb3GZRIOgsiFCRPxEFXYYb9JIR/ViYM76/EKNII7nl5cLQaNG +5BGo7ZVF47nePRerqzluEXHRTMt3oul2yw== +-----END EC PRIVATE KEY----- diff --git a/testing/selfsigned.pem b/testing/selfsigned.pem new file mode 100644 index 00000000..148948b7 --- /dev/null +++ b/testing/selfsigned.pem @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBZzCCAQ6gAwIBAgIRAJL5RmnJTnoxpf27KVMMnecwCgYIKoZIzj0EAwIwDzEN +MAsGA1UEChMEVGVzdDAgFw0yMDAzMTgyMTEyNDVaGA8yMTIwMDIyMzIxMTI0NVow +DzENMAsGA1UEChMEVGVzdDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABC2+DW9x +mUSDoLIhQkT8RBV2GG/SSEf1YmDO+vxCjSCO55eXC0GjRuQRqO2VReO53j0Xq6s5 +bhFx0UzLd6LpdsujSTBHMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEF +BQcDATAPBgNVHRMBAf8EBTADAQH/MA8GA1UdEQQIMAaHBH8AAAEwCgYIKoZIzj0E +AwIDRwAwRAIgXUpCMZGxpjXrWS9Z6K0fHzOAnMmjp78n8ZPMdRKb2eYCIBEmP6MK +O3TJdhTVnB5O3CnC9X/lCGViUR+njcH+sU3z +-----END CERTIFICATE----- diff --git a/testing/test_ldclient_tls.py b/testing/test_ldclient_tls.py new file mode 100644 index 00000000..1a5b7b5f --- /dev/null +++ b/testing/test_ldclient_tls.py @@ -0,0 +1,35 @@ +from ldclient.client import LDClient, Config +from testing.http_util import start_secure_server +import pytest +import sys + +# These tests are skipped in Python 3.3 because the embedded HTTPS server does not work correctly, causing a +# TLS handshake failure on the client side. It's unclear whether this is a problem with the self-signed +# certificate we are using or with some other server settings, but it does not appear to be a client-side +# problem. + +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +def test_cannot_connect_with_selfsigned_cert_if_ssl_verify_is_true(): + with start_secure_server() as server: + server.setup_json_response('/sdk/latest-all', { 'flags': {}, 'segments': {} }) + config = Config( + sdk_key = 'sdk_key', + base_uri = server.uri, + stream = False + ) + with LDClient(config = config, start_wait = 1.5) as client: + assert not client.is_initialized() + +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): + with start_secure_server() as server: + server.setup_json_response('/sdk/latest-all', { 'flags': {}, 'segments': {} }) + config = Config( + sdk_key = 'sdk_key', + base_uri = server.uri, + stream = False, + send_events = False, + verify_ssl = False + ) + with LDClient(config = config) as client: + assert client.is_initialized() From b7d081ba184fe20e70f1afaa9feb9c870b33521e Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 24 Mar 2020 17:06:10 -0700 Subject: [PATCH 192/356] add more TLS config options and collect HTTP/HTTPS config options in a class (#130) --- ldclient/config.py | 102 +++++++++++++++++++++++++++++++--- ldclient/event_processor.py | 5 +- ldclient/feature_requester.py | 5 +- ldclient/impl/http.py | 63 +++++++++++++++++++++ ldclient/sse_client.py | 33 +++++++++-- ldclient/streaming.py | 10 ++-- ldclient/util.py | 48 +--------------- testing/selfsigned.key | 6 +- testing/selfsigned.pem | 16 +++--- testing/test_ldclient_tls.py | 33 ++++++++++- 10 files changed, 235 insertions(+), 86 deletions(-) create mode 100644 ldclient/impl/http.py diff --git a/ldclient/config.py b/ldclient/config.py index ae2e522c..62b84429 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -11,6 +11,71 @@ STREAM_FLAGS_PATH = '/flags' +class HTTPConfig(object): + """Advanced HTTP configuration options for the SDK client. + + This class groups together HTTP/HTTPS-related configuration properties that rarely need to be changed. + If you need to set these, construct an `HTTPConfig` instance and pass it as the `http` parameter when + you construct the main :class:`Config` for the SDK client. + + For some of these properties, :class:`Config` also has properties with the same names; the latter are + deprecated and will be removed in the future, and if you specify an `HTTPConfig` instance then the + corresponding `Config` properties will be ignored. + """ + def __init__(self, + connect_timeout=10, + read_timeout=15, + http_proxy=None, + ca_certs=None, + cert_file=None, + disable_ssl_verification=False): + """ + :param float connect_timeout: The connect timeout for network connections in seconds. + :param float read_timeout: The read timeout for network connections in seconds. + :param http_proxy: Use a proxy when connecting to LaunchDarkly. This is the full URI of the + proxy; for example: http://my-proxy.com:1234. Note that unlike the standard `http_proxy` environment + variable, this is used regardless of whether the target URI is HTTP or HTTPS (the actual LaunchDarkly + service uses HTTPS, but a Relay Proxy instance could use HTTP). Setting this Config parameter will + override any proxy specified by an environment variable, but only for LaunchDarkly SDK connections. + :param string ca_certs: If using a custom certificate authority, set this to the file path of the + certificate bundle. + :param string cert_file: If using a custom client certificate, set this to the file path of the + certificate. + :param bool disable_ssl_verification: If true, completely disables SSL verification and certificate + verification for secure requests. This is unsafe and should not be used in a production environment; + instead, use a self-signed certificate and set `ca_certs`. + """ + self.__connect_timeout = connect_timeout + self.__read_timeout = read_timeout + self.__http_proxy = http_proxy + self.__ca_certs = ca_certs + self.__cert_file = cert_file + self.__disable_ssl_verification = disable_ssl_verification + + @property + def connect_timeout(self): + return self.__connect_timeout + + @property + def read_timeout(self): + return self.__read_timeout + + @property + def http_proxy(self): + return self.__http_proxy + + @property + def ca_certs(self): + return self.__ca_certs + + @property + def cert_file(self): + return self.__cert_file + + @property + def disable_ssl_verification(self): + return self.__disable_ssl_verification + class Config(object): """Advanced configuration options for the SDK client. @@ -47,15 +112,18 @@ def __init__(self, diagnostic_opt_out=False, diagnostic_recording_interval=900, wrapper_name=None, - wrapper_version=None): + wrapper_version=None, + http=None): """ :param string sdk_key: The SDK key for your LaunchDarkly account. :param string base_uri: The base URL for the LaunchDarkly server. Most users should use the default value. :param string events_uri: The URL for the LaunchDarkly events server. Most users should use the default value. - :param float connect_timeout: The connect timeout for network connections in seconds. - :param float read_timeout: The read timeout for network connections in seconds. + :param float connect_timeout: Deprecated; use `http` instead and specify the `connect_timeout` as + part of :class:`HTTPConfig`. + :param float read_timeout: Deprecated; use `http` instead and specify the `read_timeout` as + part of :class:`HTTPConfig`. :param int events_upload_max_batch_size: The maximum number of analytics events that the client will send at once. :param int events_max_pending: The capacity of the events buffer. The client buffers up to this many @@ -67,6 +135,8 @@ def __init__(self, use the default value. :param bool stream: Whether or not the streaming API should be used to receive flag updates. By default, it is enabled. Streaming should only be disabled on the advice of LaunchDarkly support. + :param bool verify_ssl: Deprecated; use `http` instead and specify `disable_ssl_verification` as + part of :class:`HTTPConfig` if you want to turn off SSL verification (not recommended). :param bool send_events: Whether or not to send events back to LaunchDarkly. This differs from `offline` in that it affects only the sending of client-side events, not streaming or polling for events from the server. By default, events will be sent. @@ -99,11 +169,8 @@ def __init__(self, :type event_processor_class: (ldclient.config.Config) -> EventProcessor :param update_processor_class: A factory for an UpdateProcessor implementation taking the sdk key, config, and FeatureStore implementation - :param http_proxy: Use a proxy when connecting to LaunchDarkly. This is the full URI of the - proxy; for example: http://my-proxy.com:1234. Note that unlike the standard `http_proxy` environment - variable, this is used regardless of whether the target URI is HTTP or HTTPS (the actual LaunchDarkly - service uses HTTPS, but a Relay Proxy instance could use HTTP). Setting this Config parameter will - override any proxy specified by an environment variable, but only for LaunchDarkly SDK connections. + :param http_proxy: Deprecated; use `http` instead and specify the `http_proxy` as part of + :class:`HTTPConfig`. :param bool diagnostic_opt_out: Unless this field is set to True, the client will send some diagnostics data to the LaunchDarkly servers in order to assist in the development of future SDK improvements. These diagnostics consist of an initial payload containing some details of SDK in use, @@ -118,6 +185,8 @@ def __init__(self, use. If `wrapper_name` is not set, this field will be ignored. Otherwise the version string will be included in the HTTP headers along with the `wrapper_name` during requests to the LaunchDarkly servers. + :param HTTPConfig http: Optional properties for customizing the client's HTTP/HTTPS behavior. See + :class:`HTTPConfig`. """ self.__sdk_key = sdk_key @@ -154,6 +223,7 @@ def __init__(self, self.__diagnostic_recording_interval = max(diagnostic_recording_interval, 60) self.__wrapper_name = wrapper_name self.__wrapper_version = wrapper_version + self.__http = http @classmethod def default(cls): @@ -196,7 +266,8 @@ def copy_with_new_sdk_key(self, new_sdk_key): diagnostic_opt_out=self.__diagnostic_opt_out, diagnostic_recording_interval=self.__diagnostic_recording_interval, wrapper_name=self.__wrapper_name, - wrapper_version=self.__wrapper_version) + wrapper_version=self.__wrapper_version, + http=self.__http) # for internal use only - probably should be part of the client logic def get_default(self, key, default): @@ -335,6 +406,19 @@ def wrapper_name(self): def wrapper_version(self): return self.__wrapper_version + @property + def http(self): + if self.__http is None: + return HTTPConfig( + connect_timeout=self.__connect_timeout, + read_timeout=self.__read_timeout, + http_proxy=self.__http_proxy, + ca_certs=None, + cert_file=None, + disable_ssl_verification=not self.__verify_ssl + ) + return self.__http + def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key == '': log.warning("Missing or blank sdk_key.") diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 5c3457c1..bbc18076 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -22,13 +22,13 @@ from ldclient.event_summarizer import EventSummarizer from ldclient.fixed_thread_pool import FixedThreadPool +from ldclient.impl.http import _http_factory from ldclient.lru_cache import SimpleLRUCache from ldclient.user_filter import UserFilter from ldclient.interfaces import EventProcessor from ldclient.repeating_timer import RepeatingTimer from ldclient.util import UnsuccessfulResponseException from ldclient.util import _headers, _retryable_statuses -from ldclient.util import create_http_pool_manager from ldclient.util import log from ldclient.util import http_error_message, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response from ldclient.diagnostics import create_diagnostic_init @@ -255,8 +255,7 @@ class EventDispatcher(object): def __init__(self, inbox, config, http_client, diagnostic_accumulator=None): self._inbox = inbox self._config = config - self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl, - target_base_uri=config.events_uri, force_proxy=config.http_proxy) if http_client is None else http_client + self._http = _http_factory(config).create_pool_manager(1, config.events_uri) if http_client is None else http_client self._close_http = (http_client is None) # so we know whether to close it later self._disabled = False self._outbox = EventBuffer(config.events_max_pending) diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 983798ff..3ab812fe 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -7,10 +7,10 @@ import json import urllib3 +from ldclient.impl.http import _http_factory from ldclient.interfaces import FeatureRequester from ldclient.util import UnsuccessfulResponseException from ldclient.util import _headers -from ldclient.util import create_http_pool_manager from ldclient.util import log from ldclient.util import throw_if_unsuccessful_response from ldclient.versioned_data_kind import FEATURES, SEGMENTS @@ -25,8 +25,7 @@ class FeatureRequesterImpl(FeatureRequester): def __init__(self, config): self._cache = dict() - self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl, - target_base_uri=config.base_uri, force_proxy=config.http_proxy) + self._http = _http_factory(config).create_pool_manager(1, config.base_uri) self._config = config def get_all_data(self): diff --git a/ldclient/impl/http.py b/ldclient/impl/http.py new file mode 100644 index 00000000..7e0130de --- /dev/null +++ b/ldclient/impl/http.py @@ -0,0 +1,63 @@ +from ldclient.version import VERSION +import certifi +from os import environ +import urllib3 + +def _base_headers(config): + headers = {'Authorization': config.sdk_key, + 'User-Agent': 'PythonClient/' + VERSION} + if isinstance(config.wrapper_name, str) and config.wrapper_name != "": + wrapper_version = "" + if isinstance(config.wrapper_version, str) and config.wrapper_version != "": + wrapper_version = "/" + config.wrapper_version + headers.update({'X-LaunchDarkly-Wrapper': config.wrapper_name + wrapper_version}) + return headers + +def _http_factory(config): + return HTTPFactory(_base_headers(config), config.http) + +class HTTPFactory(object): + def __init__(self, base_headers, http_config): + self.__base_headers = base_headers + self.__http_config = http_config + self.__timeout = urllib3.Timeout(connect=http_config.connect_timeout, read=http_config.read_timeout) + + @property + def base_headers(self): + return self.__base_headers + + @property + def timeout(self): + return self.__timeout + + def create_pool_manager(self, num_pools, target_base_uri): + proxy_url = self.__http_config.http_proxy or _get_proxy_url(target_base_uri) + + if self.__http_config.disable_ssl_verification: + cert_reqs = 'CERT_NONE' + ca_certs = None + else: + cert_reqs = 'CERT_REQUIRED' + ca_certs = self.__http_config.ca_certs or certifi.where() + + if proxy_url is None: + return urllib3.PoolManager( + num_pools=num_pools, + cert_reqs=cert_reqs, + ca_certs=ca_certs + ) + else: + return urllib3.ProxyManager( + proxy_url, + num_pools=num_pools, + cert_reqs=cert_reqs, + ca_certs = ca_certs + ) + +def _get_proxy_url(target_base_uri): + if target_base_uri is None: + return None + is_https = target_base_uri.startswith('https:') + if is_https: + return environ.get('https_proxy') + return environ.get('http_proxy') diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index 23a0209e..1e4bb22f 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -12,7 +12,8 @@ import urllib3 -from ldclient.util import create_http_pool_manager +from ldclient.config import HTTPConfig +from ldclient.impl.http import HTTPFactory from ldclient.util import log from ldclient.util import throw_if_unsuccessful_response @@ -23,7 +24,7 @@ class SSEClient(object): def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeout=300, chunk_size=10000, - verify_ssl=False, http=None, http_proxy=None, **kwargs): + verify_ssl=False, http=None, http_proxy=None, http_factory=None, **kwargs): self.url = url self.last_id = last_id self.retry = retry @@ -31,9 +32,28 @@ def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeo self._read_timeout = read_timeout self._chunk_size = chunk_size + if http_factory: + self._timeout = http_factory.timeout + base_headers = http_factory.base_headers + else: + # for backward compatibility in case anyone else is using this class + self._timeout = urllib3.Timeout(connect=self._connect_timeout, read=self._read_timeout) + base_headers = {} + # Optional support for passing in an HTTP client - self.http = create_http_pool_manager(num_pools=1, verify_ssl=verify_ssl, target_base_uri=url, - force_proxy=http_proxy) + if http: + self.http = http + else: + hf = http_factory + if hf is None: # build from individual parameters which we're only retaining for backward compatibility + hc = HTTPConfig( + connect_timeout=connect_timeout, + read_timeout=read_timeout, + disable_ssl_verification=not verify_ssl, + http_proxy=http_proxy + ) + hf = HTTPFactory({}, hc) + self.http = hf.create_pool_manager(1, url) # Any extra kwargs will be fed into the request call later. self.requests_kwargs = kwargs @@ -41,6 +61,9 @@ def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeo # The SSE spec requires making requests with Cache-Control: nocache if 'headers' not in self.requests_kwargs: self.requests_kwargs['headers'] = {} + + self.requests_kwargs['headers'].update(base_headers) + self.requests_kwargs['headers']['Cache-Control'] = 'no-cache' # The 'Accept' header is not required, but explicit > implicit @@ -59,7 +82,7 @@ def _connect(self): self.resp = self.http.request( 'GET', self.url, - timeout=urllib3.Timeout(connect=self._connect_timeout, read=self._read_timeout), + timeout=self._timeout, preload_content=False, retries=0, # caller is responsible for implementing appropriate retry semantics, e.g. backoff **self.requests_kwargs) diff --git a/ldclient/streaming.py b/ldclient/streaming.py index c159571a..0d70c7e1 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -12,9 +12,10 @@ import logging import time +from ldclient.impl.http import _http_factory from ldclient.interfaces import UpdateProcessor from ldclient.sse_client import SSEClient -from ldclient.util import _stream_headers, log, UnsuccessfulResponseException, http_error_message, is_http_error_recoverable +from ldclient.util import log, UnsuccessfulResponseException, http_error_message, is_http_error_recoverable from ldclient.versioned_data_kind import FEATURES, SEGMENTS # allows for up to 5 minutes to elapse without any data sent across the stream. The heartbeats sent as comments on the @@ -101,11 +102,8 @@ def log_backoff_message(props): def _connect(self): return SSEClient( self._uri, - headers=_stream_headers(self._config), - connect_timeout=self._config.connect_timeout, - read_timeout=stream_read_timeout, - verify_ssl=self._config.verify_ssl, - http_proxy=self._config.http_proxy) + http_factory = _http_factory(self._config) + ) def stop(self): log.info("Stopping StreamingUpdateProcessor") diff --git a/ldclient/util.py b/ldclient/util.py index c6ad2ba7..189247db 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -3,14 +3,13 @@ """ # currently excluded from documentation - see docs/README.md -import certifi import logging from os import environ import six import sys import urllib3 -from ldclient.version import VERSION +from ldclient.impl.http import HTTPFactory, _base_headers log = logging.getLogger(sys.modules[__name__].__name__) @@ -39,27 +38,11 @@ _retryable_statuses = [400, 408, 429] -def _base_headers(config): - headers = {'Authorization': config.sdk_key, - 'User-Agent': 'PythonClient/' + VERSION} - if isinstance(config.wrapper_name, str) and config.wrapper_name != "": - wrapper_version = "" - if isinstance(config.wrapper_version, str) and config.wrapper_version != "": - wrapper_version = "/" + config.wrapper_version - headers.update({'X-LaunchDarkly-Wrapper': config.wrapper_name + wrapper_version}) - return headers - def _headers(config): base_headers = _base_headers(config) base_headers.update({'Content-Type': "application/json"}) return base_headers -def _stream_headers(config): - base_headers = _base_headers(config) - base_headers.update({ 'Cache-Control': "no-cache" - , 'Accept': "text/event-stream" }) - return base_headers - def check_uwsgi(): if 'uwsgi' in sys.modules: # noinspection PyPackageRequirements,PyUnresolvedReferences @@ -95,35 +78,6 @@ def status(self): return self._status -def create_http_pool_manager(num_pools=1, verify_ssl=False, target_base_uri=None, force_proxy=None): - proxy_url = force_proxy or _get_proxy_url(target_base_uri) - - cert_reqs = 'CERT_REQUIRED' if verify_ssl else 'CERT_NONE' - ca_certs = certifi.where() if verify_ssl else None - - if proxy_url is None: - return urllib3.PoolManager( - num_pools=num_pools, - cert_reqs=cert_reqs, - ca_certs=ca_certs - ) - else: - return urllib3.ProxyManager( - proxy_url, - num_pools=num_pools, - cert_reqs=cert_reqs, - ca_certs = ca_certs - ) - -def _get_proxy_url(target_base_uri): - if target_base_uri is None: - return None - is_https = target_base_uri.startswith('https:') - if is_https: - return environ.get('https_proxy') - return environ.get('http_proxy') - - def throw_if_unsuccessful_response(resp): if resp.status >= 400: raise UnsuccessfulResponseException(resp.status) diff --git a/testing/selfsigned.key b/testing/selfsigned.key index 499c501a..7a413c71 100644 --- a/testing/selfsigned.key +++ b/testing/selfsigned.key @@ -1,5 +1,5 @@ -----BEGIN EC PRIVATE KEY----- -MHcCAQEEIIWkym77UXCR7NludcOuJyUc+KwjcWhNstarQewjH/4ZoAoGCCqGSM49 -AwEHoUQDQgAELb4Nb3GZRIOgsiFCRPxEFXYYb9JIR/ViYM76/EKNII7nl5cLQaNG -5BGo7ZVF47nePRerqzluEXHRTMt3oul2yw== +MHcCAQEEIBWQ/QZ+fQB46qfV0LV8e+IaRaLd+Ps9E3fDApDbeg3ioAoGCCqGSM49 +AwEHoUQDQgAEEThBJBr7/2yPpTYwZ0ZzbX0RTC3E6hr1p7oa+DlOxXwAvh2KQ6Pc +HrlZjdRzBd+Dj8xbbMqXBSkl3Ym1M2o1Vg== -----END EC PRIVATE KEY----- diff --git a/testing/selfsigned.pem b/testing/selfsigned.pem index 148948b7..2702b4e6 100644 --- a/testing/selfsigned.pem +++ b/testing/selfsigned.pem @@ -1,10 +1,10 @@ -----BEGIN CERTIFICATE----- -MIIBZzCCAQ6gAwIBAgIRAJL5RmnJTnoxpf27KVMMnecwCgYIKoZIzj0EAwIwDzEN -MAsGA1UEChMEVGVzdDAgFw0yMDAzMTgyMTEyNDVaGA8yMTIwMDIyMzIxMTI0NVow -DzENMAsGA1UEChMEVGVzdDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABC2+DW9x -mUSDoLIhQkT8RBV2GG/SSEf1YmDO+vxCjSCO55eXC0GjRuQRqO2VReO53j0Xq6s5 -bhFx0UzLd6LpdsujSTBHMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEF -BQcDATAPBgNVHRMBAf8EBTADAQH/MA8GA1UdEQQIMAaHBH8AAAEwCgYIKoZIzj0E -AwIDRwAwRAIgXUpCMZGxpjXrWS9Z6K0fHzOAnMmjp78n8ZPMdRKb2eYCIBEmP6MK -O3TJdhTVnB5O3CnC9X/lCGViUR+njcH+sU3z +MIIBbTCCAROgAwIBAgIRALzc2l6J69lYuBHaNafSUGwwCgYIKoZIzj0EAwIwDzEN +MAsGA1UEChMEVGVzdDAgFw0yMDAzMTkwMTQ0NTlaGA8yMTIwMDIyNDAxNDQ1OVow +DzENMAsGA1UEChMEVGVzdDBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABBE4QSQa ++/9sj6U2MGdGc219EUwtxOoa9ae6Gvg5TsV8AL4dikOj3B65WY3UcwXfg4/MW2zK +lwUpJd2JtTNqNVajTjBMMA4GA1UdDwEB/wQEAwICpDATBgNVHSUEDDAKBggrBgEF +BQcDATAPBgNVHRMBAf8EBTADAQH/MBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq +hkjOPQQDAgNIADBFAiBteZciDQOc25Coh6GRtIrOUWVsqpLbJRN6FkZGV1Cs4AIh +ALoYL/JBA4LwanNK7rmevoAR7qqkKs+Y2trfuxjKYecO -----END CERTIFICATE----- diff --git a/testing/test_ldclient_tls.py b/testing/test_ldclient_tls.py index 1a5b7b5f..d6accceb 100644 --- a/testing/test_ldclient_tls.py +++ b/testing/test_ldclient_tls.py @@ -1,4 +1,5 @@ -from ldclient.client import LDClient, Config +from ldclient.client import LDClient +from ldclient.config import Config, HTTPConfig from testing.http_util import start_secure_server import pytest import sys @@ -9,7 +10,7 @@ # problem. @pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") -def test_cannot_connect_with_selfsigned_cert_if_ssl_verify_is_true(): +def test_cannot_connect_with_selfsigned_cert_by_default(): with start_secure_server() as server: server.setup_json_response('/sdk/latest-all', { 'flags': {}, 'segments': {} }) config = Config( @@ -33,3 +34,31 @@ def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): ) with LDClient(config = config) as client: assert client.is_initialized() + +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +def test_can_connect_with_selfsigned_cert_if_disable_ssl_verification_is_true(): + with start_secure_server() as server: + server.setup_json_response('/sdk/latest-all', { 'flags': {}, 'segments': {} }) + config = Config( + sdk_key = 'sdk_key', + base_uri = server.uri, + stream = False, + send_events = False, + http = HTTPConfig(disable_ssl_verification = True) + ) + with LDClient(config = config) as client: + assert client.is_initialized() + +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +def test_can_connect_with_selfsigned_cert_by_setting_ca_certs(): + with start_secure_server() as server: + server.setup_json_response('/sdk/latest-all', { 'flags': {}, 'segments': {} }) + config = Config( + sdk_key = 'sdk_key', + base_uri = server.uri, + stream = False, + send_events = False, + http = HTTPConfig(ca_certs = './testing/selfsigned.pem') + ) + with LDClient(config = config) as client: + assert client.is_initialized() From 770fd71e1d1fe7fd4282a56c492f450acb01a65a Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 26 Mar 2020 17:57:26 -0700 Subject: [PATCH 193/356] make stream retry/backoff/jitter behavior consistent with other SDKs + improve testing (#131) --- .circleci/config.yml | 2 +- ldclient/__init__.py | 14 ++ ldclient/config.py | 10 + ldclient/impl/http.py | 2 +- ldclient/impl/retry_delay.py | 93 ++++++++ ldclient/sse_client.py | 21 +- ldclient/streaming.py | 35 +-- requirements.txt | 1 - testing/__init__.py | 3 - testing/http_util.py | 142 +++++++++--- testing/stub_util.py | 33 ++- testing/test_event_processor.py | 6 +- testing/test_feature_requester.py | 32 +-- testing/test_integration_init.py | 52 ----- testing/test_integration_ldclient.py | 51 ----- testing/test_ldclient_end_to_end.py | 163 ++++++++++++++ testing/test_ldclient_singleton.py | 70 ++++++ testing/test_ldclient_tls.py | 64 ------ testing/test_retry_delay.py | 81 +++++++ testing/test_streaming.py | 313 ++++++++++++++++++++------- 20 files changed, 869 insertions(+), 319 deletions(-) create mode 100644 ldclient/impl/retry_delay.py delete mode 100644 testing/test_integration_init.py delete mode 100644 testing/test_integration_ldclient.py create mode 100644 testing/test_ldclient_end_to_end.py create mode 100644 testing/test_ldclient_singleton.py delete mode 100644 testing/test_ldclient_tls.py create mode 100644 testing/test_retry_delay.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 2920bc7e..6b0e096a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -171,7 +171,7 @@ jobs: pip install -r consul-requirements.txt python setup.py install - run: - name: run tests (2.7) + name: run tests command: | mkdir test-reports $env:Path += ";C:\Python27\;C:\Python27\Scripts\" # has no effect if 2.7 isn't installed diff --git a/ldclient/__init__.py b/ldclient/__init__.py index b5a5281a..e05dfd97 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -122,6 +122,20 @@ def get(): __lock.unlock() +# for testing only +def _reset_client(): + global __client + global __lock + try: + __lock.lock() + c = __client + __client = None + finally: + __lock.unlock() + if c: + c.close() + + # currently hidden from documentation - see docs/README.md class NullHandler(logging.Handler): """A :class:`logging.Handler` implementation that does nothing. diff --git a/ldclient/config.py b/ldclient/config.py index 62b84429..675d5f28 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -92,6 +92,7 @@ def __init__(self, flush_interval=5, stream_uri='https://stream.launchdarkly.com', stream=True, + initial_reconnect_delay=1, verify_ssl=True, defaults=None, send_events=None, @@ -135,6 +136,10 @@ def __init__(self, use the default value. :param bool stream: Whether or not the streaming API should be used to receive flag updates. By default, it is enabled. Streaming should only be disabled on the advice of LaunchDarkly support. + :param float initial_reconnect_delay: The initial reconnect delay (in seconds) for the streaming + connection. The streaming service uses a backoff algorithm (with jitter) every time the connection needs + to be reestablished. The delay for the first reconnection will start near this value, and then + increase exponentially for any subsequent connection failures. :param bool verify_ssl: Deprecated; use `http` instead and specify `disable_ssl_verification` as part of :class:`HTTPConfig` if you want to turn off SSL verification (not recommended). :param bool send_events: Whether or not to send events back to LaunchDarkly. This differs from @@ -198,6 +203,7 @@ def __init__(self, self.__stream_uri = stream_uri.rstrip('\\') self.__update_processor_class = update_processor_class self.__stream = stream + self.__initial_reconnect_delay = initial_reconnect_delay self.__poll_interval = max(poll_interval, 30) self.__use_ldd = use_ldd self.__feature_store = InMemoryFeatureStore() if not feature_store else feature_store @@ -248,6 +254,7 @@ def copy_with_new_sdk_key(self, new_sdk_key): flush_interval=self.__flush_interval, stream_uri=self.__stream_uri, stream=self.__stream, + initial_reconnect_delay=self.__initial_reconnect_delay, verify_ssl=self.__verify_ssl, defaults=self.__defaults, send_events=self.__send_events, @@ -315,6 +322,9 @@ def stream(self): return self.__stream @property + def initial_reconnect_delay(self): + return self.__initial_reconnect_delay + @property def poll_interval(self): return self.__poll_interval diff --git a/ldclient/impl/http.py b/ldclient/impl/http.py index 7e0130de..eaa82077 100644 --- a/ldclient/impl/http.py +++ b/ldclient/impl/http.py @@ -4,7 +4,7 @@ import urllib3 def _base_headers(config): - headers = {'Authorization': config.sdk_key, + headers = {'Authorization': config.sdk_key or '', 'User-Agent': 'PythonClient/' + VERSION} if isinstance(config.wrapper_name, str) and config.wrapper_name != "": wrapper_version = "" diff --git a/ldclient/impl/retry_delay.py b/ldclient/impl/retry_delay.py new file mode 100644 index 00000000..6ede21ab --- /dev/null +++ b/ldclient/impl/retry_delay.py @@ -0,0 +1,93 @@ +from random import Random + +# This implementation is based on the equivalent code in the Go eventsource library. + +class RetryDelayStrategy(object): + """Encapsulation of configurable backoff/jitter behavior, used for stream connections. + + - The system can either be in a "good" state or a "bad" state. The initial state is "bad"; the + caller is responsible for indicating when it transitions to "good". When we ask for a new retry + delay, that implies the state is now transitioning to "bad". + + - There is a configurable base delay, which can be changed at any time (if the SSE server sends + us a "retry:" directive). + + - There are optional strategies for applying backoff and jitter to the delay. + + This object is meant to be used from a single thread once it's been created; its methods are + not safe for concurrent use. + """ + def __init__(self, base_delay, reset_interval, backoff_strategy, jitter_strategy): + self.__base_delay = base_delay + self.__reset_interval = reset_interval + self.__backoff = backoff_strategy + self.__jitter = jitter_strategy + self.__retry_count = 0 + self.__good_since = None + + def next_retry_delay(self, current_time): + """Computes the next retry interval. This also sets the current state to "bad". + + Note that current_time is passed as a parameter instead of computed by this function to + guarantee predictable behavior in tests. + + :param float current_time: the current time, in seconds + """ + if self.__good_since and self.__reset_interval and (current_time - self.__good_since >= self.__reset_interval): + self.__retry_count = 0 + self.__good_since = None + delay = self.__base_delay + if self.__backoff: + delay = self.__backoff.apply_backoff(delay, self.__retry_count) + self.__retry_count += 1 + if self.__jitter: + delay = self.__jitter.apply_jitter(delay) + return delay + + def set_good_since(self, good_since): + """Marks the current state as "good" and records the time. + + :param float good_since: the time that the state became "good", in seconds + """ + self.__good_since = good_since + + def set_base_delay(self, base_delay): + """Changes the initial retry delay and resets the backoff (if any) so the next retry will use + that value. + + This is used to implement the optional SSE behavior where the server sends a "retry:" command to + set the base retry to a specific value. Note that we will still apply a jitter, if jitter is enabled, + and subsequent retries will still increase exponentially. + """ + self.__base_delay = base_delay + self.__retry_count = 0 + +class DefaultBackoffStrategy(object): + """The default implementation of exponential backoff, which doubles the delay each time up to + the specified maximum. + + If a reset_interval was specified for the RetryDelayStrategy, and the system has been in a "good" + state for at least that long, the delay is reset back to the base. This avoids perpetually increasing + delays in a situation where failures are rare). + """ + def __init__(self, max_delay): + self.__max_delay = max_delay + + def apply_backoff(self, delay, retry_count): + d = delay * (2 ** retry_count) + return d if d <= self.__max_delay else self.__max_delay + +class DefaultJitterStrategy(object): + """The default implementation of jitter, which subtracts a pseudo-random amount from each delay. + """ + def __init__(self, ratio, rand_seed = None): + """Creates an instance. + + :param float ratio: a number in the range [0.0, 1.0] representing 0%-100% jitter + :param int rand_seed: if not None, will use this random seed (for test determinacy) + """ + self.__ratio = ratio + self.__random = Random(rand_seed) + + def apply_jitter(self, delay): + return delay - (self.__random.random() * self.__ratio * delay) diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index 1e4bb22f..b257a443 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -111,14 +111,19 @@ def __next__(self): raise EOFError() self.buf += nextline.decode("utf-8") except (StopIteration, EOFError) as e: - time.sleep(self.retry / 1000.0) - self._connect() - - # The SSE spec only supports resuming from a whole message, so - # if we have half a message we should throw it out. - head, sep, tail = self.buf.rpartition('\n') - self.buf = head + sep - continue + if self.retry: + # This retry logic is not what we want in the SDK. It's retained here for backward compatibility in case + # anyone else is using SSEClient. + time.sleep(self.retry / 1000.0) + self._connect() + + # The SSE spec only supports resuming from a whole message, so + # if we have half a message we should throw it out. + head, sep, tail = self.buf.rpartition('\n') + self.buf = head + sep + continue + else: + raise split = re.split(end_of_field, self.buf) head = split[0] diff --git a/ldclient/streaming.py b/ldclient/streaming.py index 0d70c7e1..7e0fd52b 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -8,11 +8,12 @@ import json from threading import Thread -import backoff import logging +import math import time from ldclient.impl.http import _http_factory +from ldclient.impl.retry_delay import RetryDelayStrategy, DefaultBackoffStrategy, DefaultJitterStrategy from ldclient.interfaces import UpdateProcessor from ldclient.sse_client import SSEClient from ldclient.util import log, UnsuccessfulResponseException, http_error_message, is_http_error_recoverable @@ -22,6 +23,10 @@ # stream will keep this from triggering stream_read_timeout = 5 * 60 +MAX_RETRY_DELAY = 30 +BACKOFF_RESET_INTERVAL = 60 +JITTER_RATIO = 0.5 + STREAM_ALL_PATH = '/all' ParsedPath = namedtuple('ParsedPath', ['kind', 'key']) @@ -39,6 +44,11 @@ def __init__(self, config, requester, store, ready, diagnostic_accumulator): self._ready = ready self._diagnostic_accumulator = diagnostic_accumulator self._es_started = None + self._retry_delay = RetryDelayStrategy( + config.initial_reconnect_delay, + BACKOFF_RESET_INTERVAL, + DefaultBackoffStrategy(MAX_RETRY_DELAY), + DefaultJitterStrategy(JITTER_RATIO)) # We need to suppress the default logging behavior of the backoff package, because # it logs messages at ERROR level with variable content (the delay time) which will @@ -53,12 +63,19 @@ def __init__(self, config, requester, store, ready, diagnostic_accumulator): def run(self): log.info("Starting StreamingUpdateProcessor connecting to uri: " + self._uri) self._running = True + attempts = 0 while self._running: + if attempts > 0: + delay = self._retry_delay.next_retry_delay(time.time()) + log.info("Will reconnect after delay of %fs" % delay) + time.sleep(delay) + attempts += 1 try: self._es_started = int(time.time() * 1000) messages = self._connect() for msg in messages: if not self._running: + log.warning("but I'm done") break message_ok = self.process_message(self._store, self._requester, msg) if message_ok: @@ -76,32 +93,20 @@ def run(self): self.stop() break except Exception as e: - log.warning("Caught exception. Restarting stream connection after one second. %s" % e) + log.warning("Unexpected error on stream connection: %s, will retry" % e) self._record_stream_init(True) self._es_started = None # no stacktrace here because, for a typical connection error, it'll just be a lengthy tour of urllib3 internals - time.sleep(1) def _record_stream_init(self, failed): if self._diagnostic_accumulator and self._es_started: current_time = int(time.time() * 1000) self._diagnostic_accumulator.record_stream_init(current_time, current_time - self._es_started, failed) - def _backoff_expo(): - return backoff.expo(max_value=30) - - def should_not_retry(e): - return isinstance(e, UnsuccessfulResponseException) and (not is_http_error_recoverable(e.status)) - - def log_backoff_message(props): - log.error("Streaming connection failed, will attempt to restart") - log.info("Will reconnect after delay of %fs", props['wait']) - - @backoff.on_exception(_backoff_expo, BaseException, max_tries=None, jitter=backoff.full_jitter, - on_backoff=log_backoff_message, giveup=should_not_retry) def _connect(self): return SSEClient( self._uri, + retry = None, # we're implementing our own retry http_factory = _http_factory(self._config) ) diff --git a/requirements.txt b/requirements.txt index f941d6ab..76cd9de6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -backoff>=1.4.3 certifi>=2018.4.16 expiringdict>=1.1.4,<1.2.0 six>=1.10.0 diff --git a/testing/__init__.py b/testing/__init__.py index 0602017d..ccfd5f11 100644 --- a/testing/__init__.py +++ b/testing/__init__.py @@ -1,6 +1,3 @@ import logging -import os logging.basicConfig(level=logging.WARN) - -sdk_key = os.environ.get('LD_SDK_KEY') diff --git a/testing/http_util.py b/testing/http_util.py index 24ae91c8..55842a38 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -1,9 +1,10 @@ import json -from six import iteritems +from six import iteritems, string_types from six.moves import BaseHTTPServer, queue import socket import ssl from threading import Thread +import time def get_available_port(): s = socket.socket(socket.AF_INET, type = socket.SOCK_STREAM) @@ -12,14 +13,30 @@ def get_available_port(): s.close() return port +def poll_until_started(port): + deadline = time.time() + 1 + while time.time() < deadline: + s = socket.socket() + try: + s.connect(('localhost', port)) + return + except socket.error: + pass + finally: + s.close() + time.sleep(0.05) + raise Exception("test server on port %d was not reachable" % port) + def start_server(): sw = MockServerWrapper(get_available_port(), False) sw.start() + poll_until_started(sw.port) return sw def start_secure_server(): sw = MockServerWrapper(get_available_port(), True) sw.start() + poll_until_started(sw.port) return sw class MockServerWrapper(Thread): @@ -44,15 +61,11 @@ def close(self): self.server.server_close() def run(self): - self.server.serve_forever() + self.server.serve_forever(0.1) # 0.1 seconds is how often it'll check to see if it is shutting down - def setup_response(self, uri_path, status, body = None, headers = None): - self.matchers[uri_path] = MockServerResponse(status, body, headers) - - def setup_json_response(self, uri_path, data, headers = None): - final_headers = {} if headers is None else headers.copy() - final_headers['Content-Type'] = 'application/json' - return self.setup_response(uri_path, 200, json.dumps(data), headers) + def for_path(self, uri_path, content): + self.matchers[uri_path] = content + return self def await_request(self): return self.requests.get() @@ -60,6 +73,13 @@ def await_request(self): def require_request(self): return self.requests.get(block=False) + def should_have_requests(self, count): + if self.requests.qsize() != count: + rs = [] + while not self.requests.empty(): + rs.append(str(self.requests.get(False))) + assert False, "expected %d more requests but had %s" % (count, rs) + # enter/exit magic methods allow server to be auto-closed by "with" statement def __enter__(self): return self @@ -79,27 +99,97 @@ def do_POST(self): def _do_request(self): server_wrapper = self.server.server_wrapper - server_wrapper.requests.put(MockServerRequest(self.command, self.path, self.headers)) - if self.path in server_wrapper.matchers: - resp = server_wrapper.matchers[self.path] - self.send_response(resp.status) - if resp.headers is not None: - for key, value in iteritems(resp.headers): - self.send_header(key, value) - self.end_headers() - if resp.body is not None: - self.wfile.write(resp.body.encode('UTF-8')) + server_wrapper.requests.put(MockServerRequest(self)) + handler = server_wrapper.matchers.get(self.path) + if handler: + handler.write(self) else: self.send_error(404) class MockServerRequest(object): - def __init__(self, method, path, headers): - self.method = method - self.path = path - self.headers = headers + def __init__(self, request): + self.method = request.command + self.path = request.path + self.headers = request.headers + content_length = int(request.headers.get('content-length', 0)) + if content_length: + self.body = request.rfile.read(content_length).decode('UTF-8') + else: + self.body = None + + def __str__(self): + return "%s %s" % (self.method, self.path) -class MockServerResponse(object): - def __init__(self, status, body, headers): +class BasicResponse(object): + def __init__(self, status, body = None, headers = None): self.status = status self.body = body - self.headers = headers + self.headers = headers or {} + + def add_headers(self, headers): + for key, value in iteritems(headers or {}): + self.headers[key] = value + + def write(self, request): + request.send_response(self.status) + for key, value in iteritems(self.headers): + request.send_header(key, value) + request.end_headers() + if self.body: + request.wfile.write(self.body.encode('UTF-8')) + +class JsonResponse(BasicResponse): + def __init__(self, data, headers = None): + h = headers or {} + h.update({ 'Content-Type': 'application/json' }) + BasicResponse.__init__(self, 200, json.dumps(data or {}), h) + +class ChunkedResponse(object): + def __init__(self, headers = None): + self.queue = queue.Queue() + self.headers = headers or {} + + def push(self, chunk): + if chunk is not None: + self.queue.put(chunk) + + def close(self): + self.queue.put(None) + + def write(self, request): + request.send_response(200) + request.send_header('Transfer-Encoding', 'chunked') + for key, value in iteritems(self.headers): + request.send_header(key, value) + request.end_headers() + request.wfile.flush() + while True: + chunk = self.queue.get() + if chunk is None: + request.wfile.write('0\r\n\r\n'.encode('UTF-8')) + request.wfile.flush() + break + else: + request.wfile.write(('%x\r\n%s\r\n' % (len(chunk), chunk)).encode('UTF-8')) + request.wfile.flush() + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + +class CauseNetworkError(object): + def write(self, request): + raise Exception('intentional error') + +class SequentialHandler(object): + def __init__(self, *argv): + self.handlers = argv + self.counter = 0 + + def write(self, request): + handler = self.handlers[self.counter] + if self.counter < len(self.handlers) - 1: + self.counter += 1 + handler.write(request) diff --git a/testing/stub_util.py b/testing/stub_util.py index 41970edf..a5aada7d 100644 --- a/testing/stub_util.py +++ b/testing/stub_util.py @@ -1,9 +1,40 @@ from email.utils import formatdate -from requests.structures import CaseInsensitiveDict +import json +from testing.http_util import ChunkedResponse, JsonResponse from ldclient.interfaces import EventProcessor, FeatureRequester, FeatureStore, UpdateProcessor +def make_items_map(items = []): + ret = {} + for item in items: + ret[item['key']] = item + return ret + +def make_put_event(flags = [], segments = []): + data = { "data": { "flags": make_items_map(flags), "segments": make_items_map(segments) } } + return 'event:put\ndata: %s\n\n' % json.dumps(data) + +def make_patch_event(kind, item): + path = '%s%s' % (kind.stream_api_path, item['key']) + data = { "path": path, "data": item } + return 'event:patch\ndata: %s\n\n' % json.dumps(data) + +def make_delete_event(kind, key, version): + path = '%s%s' % (kind.stream_api_path, key) + data = { "path": path, "version": version } + return 'event:delete\ndata: %s\n\n' % json.dumps(data) + +def stream_content(event = None): + stream = ChunkedResponse({ 'Content-Type': 'text/event-stream' }) + if event: + stream.push(event) + return stream + +def poll_content(flags = [], segments = []): + data = { "flags": make_items_map(flags), "segments": make_items_map(segments) } + return JsonResponse(data) + class MockEventProcessor(EventProcessor): def __init__(self, *_): self._running = False diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 5b6a8fad..b5f68d4b 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -8,7 +8,7 @@ from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.event_processor import DefaultEventProcessor from ldclient.util import log -from testing.http_util import start_server +from testing.http_util import start_server, BasicResponse from testing.stub_util import MockResponse, MockHttp @@ -581,7 +581,7 @@ def test_can_use_https_proxy_via_config(): _verify_https_proxy_is_used(server, config) def _verify_http_proxy_is_used(server, config): - server.setup_response(config.events_uri + '/bulk', 200, None) + server.for_path(config.events_uri + '/bulk', BasicResponse(200)) with DefaultEventProcessor(config) as ep: ep.send_event({ 'kind': 'identify', 'user': user }) ep.flush() @@ -594,7 +594,7 @@ def _verify_http_proxy_is_used(server, config): assert req.method == 'POST' def _verify_https_proxy_is_used(server, config): - server.setup_response(config.events_uri + '/bulk', 200, None) + server.for_path(config.events_uri + '/bulk', BasicResponse(200)) with DefaultEventProcessor(config) as ep: ep.send_event({ 'kind': 'identify', 'user': user }) ep.flush() diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py index da72442c..3964ad10 100644 --- a/testing/test_feature_requester.py +++ b/testing/test_feature_requester.py @@ -5,7 +5,7 @@ from ldclient.util import UnsuccessfulResponseException from ldclient.version import VERSION from ldclient.versioned_data_kind import FEATURES, SEGMENTS -from testing.http_util import start_server +from testing.http_util import start_server, BasicResponse, JsonResponse def test_get_all_data_returns_data(): @@ -17,7 +17,7 @@ def test_get_all_data_returns_data(): segments = { 'segment1': { 'key': 'segment1' } } resp_data = { 'flags': flags, 'segments': segments } expected_data = { FEATURES: flags, SEGMENTS: segments } - server.setup_json_response('/sdk/latest-all', resp_data) + server.for_path('/sdk/latest-all', JsonResponse(resp_data)) result = fr.get_all_data() assert result == expected_data @@ -28,7 +28,7 @@ def test_get_all_data_sends_headers(): fr = FeatureRequesterImpl(config) resp_data = { 'flags': {}, 'segments': {} } - server.setup_json_response('/sdk/latest-all', resp_data) + server.for_path('/sdk/latest-all', JsonResponse(resp_data)) fr.get_all_data() req = server.require_request() @@ -43,7 +43,7 @@ def test_get_all_data_sends_wrapper_header(): fr = FeatureRequesterImpl(config) resp_data = { 'flags': {}, 'segments': {} } - server.setup_json_response('/sdk/latest-all', resp_data) + server.for_path('/sdk/latest-all', JsonResponse(resp_data)) fr.get_all_data() req = server.require_request() @@ -56,7 +56,7 @@ def test_get_all_data_sends_wrapper_header_without_version(): fr = FeatureRequesterImpl(config) resp_data = { 'flags': {}, 'segments': {} } - server.setup_json_response('/sdk/latest-all', resp_data) + server.for_path('/sdk/latest-all', JsonResponse(resp_data)) fr.get_all_data() req = server.require_request() @@ -74,28 +74,28 @@ def test_get_all_data_can_use_cached_data(): expected_data1 = { FEATURES: {}, SEGMENTS: {} } expected_data2 = { FEATURES: { 'flag1': { 'key': 'flag1' } }, SEGMENTS: {} } req_path = '/sdk/latest-all' - server.setup_json_response(req_path, resp_data1, { 'Etag': etag1 }) + server.for_path(req_path, JsonResponse(resp_data1, { 'Etag': etag1 })) result = fr.get_all_data() assert result == expected_data1 req = server.require_request() assert 'If-None-Match' not in req.headers.keys() - server.setup_response(req_path, 304, None, { 'Etag': etag1 }) + server.for_path(req_path, BasicResponse(304, None, { 'Etag': etag1 })) result = fr.get_all_data() assert result == expected_data1 req = server.require_request() assert req.headers['If-None-Match'] == etag1 - server.setup_json_response(req_path, resp_data2, { 'Etag': etag2 }) + server.for_path(req_path, JsonResponse(resp_data2, { 'Etag': etag2 })) result = fr.get_all_data() assert result == expected_data2 req = server.require_request() assert req.headers['If-None-Match'] == etag1 - server.setup_response(req_path, 304, None, { 'Etag': etag2 }) + server.for_path(req_path, BasicResponse(304, None, { 'Etag': etag2 })) result = fr.get_all_data() assert result == expected_data2 @@ -108,7 +108,7 @@ def test_get_one_flag_returns_data(): fr = FeatureRequesterImpl(config) key = 'flag1' flag_data = { 'key': key } - server.setup_json_response('/sdk/latest-flags/' + key, flag_data) + server.for_path('/sdk/latest-flags/' + key, JsonResponse(flag_data)) result = fr.get_one(FEATURES, key) assert result == flag_data @@ -118,7 +118,7 @@ def test_get_one_flag_sends_headers(): fr = FeatureRequesterImpl(config) key = 'flag1' flag_data = { 'key': key } - server.setup_json_response('/sdk/latest-flags/' + key, flag_data) + server.for_path('/sdk/latest-flags/' + key, JsonResponse(flag_data)) fr.get_one(FEATURES, key) req = server.require_request() assert req.headers['Authorization'] == 'sdk-key' @@ -132,7 +132,7 @@ def test_get_one_flag_sends_wrapper_header(): fr = FeatureRequesterImpl(config) key = 'flag1' flag_data = { 'key': key } - server.setup_json_response('/sdk/latest-flags/' + key, flag_data) + server.for_path('/sdk/latest-flags/' + key, JsonResponse(flag_data)) fr.get_one(FEATURES, key) req = server.require_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask/0.1.0' @@ -144,7 +144,7 @@ def test_get_one_flag_sends_wrapper_header_without_version(): fr = FeatureRequesterImpl(config) key = 'flag1' flag_data = { 'key': key } - server.setup_json_response('/sdk/latest-flags/' + key, flag_data) + server.for_path('/sdk/latest-flags/' + key, JsonResponse(flag_data)) fr.get_one(FEATURES, key) req = server.require_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' @@ -166,7 +166,7 @@ def test_get_one_flag_does_not_use_etags(): key = 'flag1' flag_data = { 'key': key } req_path = '/sdk/latest-flags/' + key - server.setup_json_response(req_path, flag_data, { 'Etag': etag }) + server.for_path(req_path, JsonResponse(flag_data, { 'Etag': etag })) result = fr.get_one(FEATURES, key) assert result == flag_data @@ -205,7 +205,7 @@ def _verify_http_proxy_is_used(server, config): resp_data = { 'flags': {}, 'segments': {} } expected_data = { FEATURES: {}, SEGMENTS: {} } - server.setup_json_response(config.base_uri + '/sdk/latest-all', resp_data) + server.for_path(config.base_uri + '/sdk/latest-all', JsonResponse(resp_data)) # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the # HTTP client, so we should be able to see the request go through. Note that the URI path will @@ -219,7 +219,7 @@ def _verify_https_proxy_is_used(server, config): fr = FeatureRequesterImpl(config) resp_data = { 'flags': {}, 'segments': {} } - server.setup_json_response(config.base_uri + '/sdk/latest-all', resp_data) + server.for_path(config.base_uri + '/sdk/latest-all', JsonResponse(resp_data)) # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but # it can still record that it *got* the request, which proves that the request went to the proxy. diff --git a/testing/test_integration_init.py b/testing/test_integration_init.py deleted file mode 100644 index 5665ac0b..00000000 --- a/testing/test_integration_init.py +++ /dev/null @@ -1,52 +0,0 @@ -import logging -import sys - -import pytest - -import ldclient -from ldclient import Config -from testing import sdk_key -from testing.sync_util import wait_until - -logging.basicConfig(level=logging.DEBUG) - - -# skipping for Python 2.6 since it is incompatible with LaunchDarkly's streaming connection due to SNI -@pytest.mark.skipif(sdk_key is None or sys.version_info < (2, 7), - reason="Requires Python >=2.7 and LD_SDK_KEY environment variable to be set") -def test_set_sdk_key_before_init(): - ldclient.set_config(Config.default()) - - ldclient.set_sdk_key(sdk_key) - wait_until(ldclient.get().is_initialized, timeout=30) - - ldclient.get().close() - - -# skipping for Python 2.6 since it is incompatible with LaunchDarkly's streaming connection due to SNI -@pytest.mark.skipif(sdk_key is None or sys.version_info < (2, 7), - reason="Requires Python >=2.7 and LD_SDK_KEY environment variable to be set") -def test_set_sdk_key_after_init(): - ldclient.set_config(Config.default()) - assert ldclient.get().is_initialized() is False - ldclient.set_sdk_key(sdk_key) - wait_until(ldclient.get().is_initialized, timeout=30) - - ldclient.get().close() - - -# skipping for Python 2.6 since it is incompatible with LaunchDarkly's streaming connection due to SNI -@pytest.mark.skipif(sdk_key is None or sys.version_info < (2, 7), - reason="Requires Python >=2.7 and LD_SDK_KEY environment variable to be set") -def test_set_config(): - offline_config = ldclient.Config(offline=True) - online_config = ldclient.Config(sdk_key=sdk_key, offline=False) - - ldclient.set_config(offline_config) - assert ldclient.get().is_offline() is True - - ldclient.set_config(online_config) - assert ldclient.get().is_offline() is False - wait_until(ldclient.get().is_initialized, timeout=30) - - ldclient.get().close() diff --git a/testing/test_integration_ldclient.py b/testing/test_integration_ldclient.py deleted file mode 100644 index 7dc0ba74..00000000 --- a/testing/test_integration_ldclient.py +++ /dev/null @@ -1,51 +0,0 @@ -import logging -import sys - -import pytest - -from ldclient import Config -from ldclient import LDClient -from testing import sdk_key -from testing.sync_util import wait_until - -logging.basicConfig(level=logging.DEBUG) - - -# skipping for Python 2.6 since it is incompatible with LaunchDarkly's streaming connection due to SNI -@pytest.mark.skipif(sdk_key is None or sys.version_info < (2, 7), - reason="Requires Python >=2.7 and LD_SDK_KEY environment variable to be set") -def test_ctor_with_sdk_key(): - client = LDClient(sdk_key=sdk_key) - wait_until(client.is_initialized, timeout=10) - - client.close() - - -# skipping for Python 2.6 since it is incompatible with LaunchDarkly's streaming connection due to SNI -@pytest.mark.skipif(sdk_key is None or sys.version_info < (2, 7), - reason="Requires Python >=2.7 and LD_SDK_KEY environment variable to be set") -def test_ctor_with_sdk_key_and_config(): - client = LDClient(sdk_key=sdk_key, config=Config.default()) - wait_until(client.is_initialized, timeout=10) - - client.close() - - -# skipping for Python 2.6 since it is incompatible with LaunchDarkly's streaming connection due to SNI -@pytest.mark.skipif(sdk_key is None or sys.version_info < (2, 7), - reason="Requires Python >=2.7 and LD_SDK_KEY environment variable to be set") -def test_ctor_with_config(): - client = LDClient(config=Config(sdk_key=sdk_key)) - wait_until(client.is_initialized, timeout=10) - - client.close() - - -#polling -@pytest.mark.skipif(sdk_key is None, - reason="requires LD_SDK_KEY environment variable to be set") -def test_ctor_with_config_polling(): - client = LDClient(config=Config(sdk_key=sdk_key, stream=False)) - wait_until(client.is_initialized, timeout=10) - - client.close() diff --git a/testing/test_ldclient_end_to_end.py b/testing/test_ldclient_end_to_end.py new file mode 100644 index 00000000..48968b9f --- /dev/null +++ b/testing/test_ldclient_end_to_end.py @@ -0,0 +1,163 @@ +from ldclient.client import LDClient +from ldclient.config import Config, HTTPConfig +from testing.http_util import BasicResponse, SequentialHandler, start_secure_server, start_server +from testing.stub_util import make_put_event, poll_content, stream_content + +import json +import pytest +import sys + +sdk_key = 'sdk-key' +user = { 'key': 'userkey' } +always_true_flag = { 'key': 'flagkey', 'version': 1, 'on': False, 'offVariation': 1, 'variations': [ False, True ] } + +def test_client_starts_in_streaming_mode(): + with start_server() as stream_server: + with stream_content(make_put_event([ always_true_flag ])) as stream_handler: + stream_server.for_path('/all', stream_handler) + config = Config(sdk_key = sdk_key, stream_uri = stream_server.uri, send_events = False) + + with LDClient(config = config) as client: + assert client.is_initialized() + assert client.variation(always_true_flag['key'], user, False) == True + + r = stream_server.await_request() + assert r.headers['Authorization'] == sdk_key + +def test_client_fails_to_start_in_streaming_mode_with_401_error(): + with start_server() as stream_server: + stream_server.for_path('/all', BasicResponse(401)) + config = Config(sdk_key = sdk_key, stream_uri = stream_server.uri, send_events = False) + + with LDClient(config = config) as client: + assert not client.is_initialized() + assert client.variation(always_true_flag['key'], user, False) == False + +def test_client_retries_connection_in_streaming_mode_with_non_fatal_error(): + with start_server() as stream_server: + with stream_content(make_put_event([ always_true_flag ])) as stream_handler: + error_then_success = SequentialHandler(BasicResponse(503), stream_handler) + stream_server.for_path('/all', error_then_success) + config = Config(sdk_key = sdk_key, stream_uri = stream_server.uri, initial_reconnect_delay = 0.001, send_events = False) + + with LDClient(config = config) as client: + assert client.is_initialized() + assert client.variation(always_true_flag['key'], user, False) == True + + r = stream_server.await_request() + assert r.headers['Authorization'] == sdk_key + +def test_client_starts_in_polling_mode(): + with start_server() as poll_server: + poll_server.for_path('/sdk/latest-all', poll_content([ always_true_flag ])) + config = Config(sdk_key = sdk_key, base_uri = poll_server.uri, stream = False, send_events = False) + + with LDClient(config = config) as client: + assert client.is_initialized() + assert client.variation(always_true_flag['key'], user, False) == True + + r = poll_server.await_request() + assert r.headers['Authorization'] == sdk_key + +def test_client_fails_to_start_in_polling_mode_with_401_error(): + with start_server() as poll_server: + poll_server.for_path('/sdk/latest-all', BasicResponse(401)) + config = Config(sdk_key = sdk_key, base_uri = poll_server.uri, stream = False, send_events = False) + + with LDClient(config = config) as client: + assert not client.is_initialized() + assert client.variation(always_true_flag['key'], user, False) == False + +def test_client_sends_event_without_diagnostics(): + with start_server() as poll_server: + with start_server() as events_server: + poll_server.for_path('/sdk/latest-all', poll_content([ always_true_flag ])) + events_server.for_path('/bulk', BasicResponse(202)) + + config = Config(sdk_key = sdk_key, base_uri = poll_server.uri, events_uri = events_server.uri, stream = False, + diagnostic_opt_out = True) + with LDClient(config = config) as client: + assert client.is_initialized() + client.identify(user) + client.flush() + + r = events_server.await_request() + assert r.headers['Authorization'] == sdk_key + data = json.loads(r.body) + assert len(data) == 1 + assert data[0]['kind'] == 'identify' + +def test_client_sends_diagnostics(): + with start_server() as poll_server: + with start_server() as events_server: + poll_server.for_path('/sdk/latest-all', poll_content([ always_true_flag ])) + events_server.for_path('/diagnostic', BasicResponse(202)) + + config = Config(sdk_key = sdk_key, base_uri = poll_server.uri, events_uri = events_server.uri, stream = False) + with LDClient(config = config) as client: + assert client.is_initialized() + + r = events_server.await_request() + assert r.headers['Authorization'] == sdk_key + data = json.loads(r.body) + assert data['kind'] == 'diagnostic-init' + +# The TLS tests are skipped in Python 3.3 because the embedded HTTPS server does not work correctly, causing +# a TLS handshake failure on the client side. It's unclear whether this is a problem with the self-signed +# certificate we are using or with some other server settings, but it does not appear to be a client-side +# problem. + +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +def test_cannot_connect_with_selfsigned_cert_by_default(): + with start_secure_server() as server: + server.for_path('/sdk/latest-all', poll_content()) + config = Config( + sdk_key = 'sdk_key', + base_uri = server.uri, + stream = False, + send_events = False + ) + with LDClient(config = config, start_wait = 1.5) as client: + assert not client.is_initialized() + +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): + with start_secure_server() as server: + server.for_path('/sdk/latest-all', poll_content()) + config = Config( + sdk_key = 'sdk_key', + base_uri = server.uri, + stream = False, + send_events = False, + verify_ssl = False + ) + with LDClient(config = config) as client: + assert client.is_initialized() + +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +def test_can_connect_with_selfsigned_cert_if_disable_ssl_verification_is_true(): + with start_secure_server() as server: + server.for_path('/sdk/latest-all', poll_content()) + config = Config( + sdk_key = 'sdk_key', + base_uri = server.uri, + stream = False, + send_events = False, + http = HTTPConfig(disable_ssl_verification = True) + ) + with LDClient(config = config) as client: + assert client.is_initialized() + +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +def test_can_connect_with_selfsigned_cert_by_setting_ca_certs(): + with start_secure_server() as server: + server.for_path('/sdk/latest-all', poll_content()) + config = Config( + sdk_key = 'sdk_key', + base_uri = server.uri, + stream = False, + send_events = False, + http = HTTPConfig(ca_certs = './testing/selfsigned.pem') + ) + with LDClient(config = config) as client: + assert client.is_initialized() diff --git a/testing/test_ldclient_singleton.py b/testing/test_ldclient_singleton.py new file mode 100644 index 00000000..6dba8262 --- /dev/null +++ b/testing/test_ldclient_singleton.py @@ -0,0 +1,70 @@ +import ldclient +from ldclient import _reset_client +from ldclient.config import Config +from testing.http_util import start_server, BasicResponse +from testing.stub_util import make_put_event, stream_content +from testing.sync_util import wait_until +import json + +sdk_key = 'sdk-key' + +# These are end-to-end tests like test_ldclient_end_to_end, but less detailed in terms of the client's +# network behavior because what we're really testing is the singleton mechanism. + +def test_set_sdk_key_before_init(): + _reset_client() + with start_server() as stream_server: + with stream_content(make_put_event()) as stream_handler: + try: + stream_server.for_path('/all', stream_handler) + + ldclient.set_config(Config(stream_uri = stream_server.uri, send_events = False)) + ldclient.set_sdk_key(sdk_key) + wait_until(ldclient.get().is_initialized, timeout=10) + + r = stream_server.await_request() + assert r.headers['Authorization'] == sdk_key + finally: + _reset_client() + +def test_set_sdk_key_after_init(): + _reset_client() + with start_server() as stream_server: + with stream_content(make_put_event()) as stream_handler: + try: + stream_server.for_path('/all', BasicResponse(401)) + + ldclient.set_config(Config(stream_uri = stream_server.uri, send_events = False)) + assert ldclient.get().is_initialized() is False + + r = stream_server.await_request() + assert r.headers['Authorization'] == '' + + stream_server.for_path('/all', stream_handler) + + ldclient.set_sdk_key(sdk_key) + wait_until(ldclient.get().is_initialized, timeout=30) + + r = stream_server.await_request() + assert r.headers['Authorization'] == sdk_key + finally: + _reset_client() + +def test_set_config(): + _reset_client() + with start_server() as stream_server: + with stream_content(make_put_event()) as stream_handler: + try: + stream_server.for_path('/all', stream_handler) + + ldclient.set_config(Config(offline=True)) + assert ldclient.get().is_offline() is True + + ldclient.set_config(Config(sdk_key = sdk_key, stream_uri = stream_server.uri, send_events = False)) + assert ldclient.get().is_offline() is False + wait_until(ldclient.get().is_initialized, timeout=10) + + r = stream_server.await_request() + assert r.headers['Authorization'] == sdk_key + finally: + _reset_client() diff --git a/testing/test_ldclient_tls.py b/testing/test_ldclient_tls.py deleted file mode 100644 index d6accceb..00000000 --- a/testing/test_ldclient_tls.py +++ /dev/null @@ -1,64 +0,0 @@ -from ldclient.client import LDClient -from ldclient.config import Config, HTTPConfig -from testing.http_util import start_secure_server -import pytest -import sys - -# These tests are skipped in Python 3.3 because the embedded HTTPS server does not work correctly, causing a -# TLS handshake failure on the client side. It's unclear whether this is a problem with the self-signed -# certificate we are using or with some other server settings, but it does not appear to be a client-side -# problem. - -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") -def test_cannot_connect_with_selfsigned_cert_by_default(): - with start_secure_server() as server: - server.setup_json_response('/sdk/latest-all', { 'flags': {}, 'segments': {} }) - config = Config( - sdk_key = 'sdk_key', - base_uri = server.uri, - stream = False - ) - with LDClient(config = config, start_wait = 1.5) as client: - assert not client.is_initialized() - -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") -def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): - with start_secure_server() as server: - server.setup_json_response('/sdk/latest-all', { 'flags': {}, 'segments': {} }) - config = Config( - sdk_key = 'sdk_key', - base_uri = server.uri, - stream = False, - send_events = False, - verify_ssl = False - ) - with LDClient(config = config) as client: - assert client.is_initialized() - -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") -def test_can_connect_with_selfsigned_cert_if_disable_ssl_verification_is_true(): - with start_secure_server() as server: - server.setup_json_response('/sdk/latest-all', { 'flags': {}, 'segments': {} }) - config = Config( - sdk_key = 'sdk_key', - base_uri = server.uri, - stream = False, - send_events = False, - http = HTTPConfig(disable_ssl_verification = True) - ) - with LDClient(config = config) as client: - assert client.is_initialized() - -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") -def test_can_connect_with_selfsigned_cert_by_setting_ca_certs(): - with start_secure_server() as server: - server.setup_json_response('/sdk/latest-all', { 'flags': {}, 'segments': {} }) - config = Config( - sdk_key = 'sdk_key', - base_uri = server.uri, - stream = False, - send_events = False, - http = HTTPConfig(ca_certs = './testing/selfsigned.pem') - ) - with LDClient(config = config) as client: - assert client.is_initialized() diff --git a/testing/test_retry_delay.py b/testing/test_retry_delay.py new file mode 100644 index 00000000..0538f34f --- /dev/null +++ b/testing/test_retry_delay.py @@ -0,0 +1,81 @@ +from ldclient.impl.retry_delay import RetryDelayStrategy, DefaultBackoffStrategy, DefaultJitterStrategy + +import math +import time + +def test_fixed_retry_delay(): + d0 = 10 + r = RetryDelayStrategy(d0, 0, None, None) + t0 = time.time() - 60 + d1 = r.next_retry_delay(t0) + d2 = r.next_retry_delay(t0 + 1) + d3 = r.next_retry_delay(t0 + 2) + assert d1 == d0 + assert d2 == d0 + assert d3 == d0 + +def test_backoff_without_jitter(): + d0 = 10 + max = 60 + r = RetryDelayStrategy(d0, 0, DefaultBackoffStrategy(max), None) + t0 = time.time() - 60 + d1 = r.next_retry_delay(t0) + d2 = r.next_retry_delay(t0 + 1) + d3 = r.next_retry_delay(t0 + 2) + d4 = r.next_retry_delay(t0 + 3) + assert d1 == d0 + assert d2 == d0 * 2 + assert d3 == d0 * 4 + assert d4 == max + +def test_jitter_without_backoff(): + d0 = 1 + seed = 1000 + r = RetryDelayStrategy(d0, 0, None, DefaultJitterStrategy(0.5, seed)) + t0 = time.time() - 60 + d1 = r.next_retry_delay(t0) + d2 = r.next_retry_delay(t0 + 1) + d3 = r.next_retry_delay(t0 + 2) + assert math.trunc(d1 * 1000) == 611 # these are the randomized values we expect from that fixed seed value + assert math.trunc(d2 * 1000) == 665 + assert math.trunc(d3 * 1000) == 950 + +def test_jitter_with_backoff(): + d0 = 1 + max = 60 + seed = 1000 + r = RetryDelayStrategy(d0, 0, DefaultBackoffStrategy(max), DefaultJitterStrategy(0.5, seed)) + t0 = time.time() - 60 + d1 = r.next_retry_delay(t0) + d2 = r.next_retry_delay(t0 + 1) + d3 = r.next_retry_delay(t0 + 2) + assert math.trunc(d1 * 1000) == 611 + assert math.trunc(d2 / 2 * 1000) == 665 + assert math.trunc(d3 / 4 * 1000) == 950 + +def test_backoff_reset_interval(): + d0 = 10 + max = 60 + reset_interval = 45 + r = RetryDelayStrategy(d0, reset_interval, DefaultBackoffStrategy(max), None) + + t0 = time.time() - 60 + r.set_good_since(50) + + t1 = t0 + 1 + d1 = r.next_retry_delay(t1) + assert d1 == d0 + + t2 = t1 + 1 + r.set_good_since(t2) + + t3 = t2 + 10 + d2 = r.next_retry_delay(t3) + assert d2 == d0 * 2 + + t4 = t3 + d2 + r.set_good_since(t4) + + t5 = t4 + reset_interval + d3 = r.next_retry_delay(t5) + assert d3 == d0 # it's gone back to the initial delay because reset_interval has elapsed since t4 diff --git a/testing/test_streaming.py b/testing/test_streaming.py index 3f6c166d..75da9ea4 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -1,78 +1,214 @@ +import json +import pytest from threading import Event +import time from ldclient.config import Config from ldclient.diagnostics import _DiagnosticAccumulator from ldclient.feature_store import InMemoryFeatureStore from ldclient.streaming import StreamingUpdateProcessor from ldclient.version import VERSION -from testing.http_util import start_server +from ldclient.versioned_data_kind import FEATURES, SEGMENTS +from testing.http_util import start_server, BasicResponse, CauseNetworkError, SequentialHandler +from testing.stub_util import make_delete_event, make_patch_event, make_put_event, stream_content +brief_delay = 0.001 -fake_event = 'event:put\ndata: {"data":{"flags":{},"segments":{}}}\n\n' -response_headers = { 'Content-Type': 'text/event-stream' } +# These long timeouts are necessary because of a problem in the Windows CI environment where HTTP requests to +# the test server running at localhost tests are *extremely* slow. It looks like a similar issue to what's +# described at https://stackoverflow.com/questions/2617615/slow-python-http-server-on-localhost but we had no +# luck with the advice that was given there. +start_wait = 5 +update_wait = 3 -# Note that our simple HTTP stub server implementation does not actually do streaming responses, so -# in these tests the connection will get closed after the response, causing the streaming processor -# to reconnect. For the purposes of the current tests, that's OK because we only care that the initial -# request and response were handled correctly. +def test_request_properties(): + store = InMemoryFeatureStore() + ready = Event() + + with start_server() as server: + with stream_content(make_put_event()) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) + server.for_path('/all', stream) + + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + req = server.await_request() + assert req.method == 'GET' + assert req.headers.get('Authorization') == 'sdk-key' + assert req.headers.get('User-Agent') == 'PythonClient/' + VERSION + assert req.headers.get('X-LaunchDarkly-Wrapper') is None -def test_uses_stream_uri(): +def test_sends_wrapper_header(): store = InMemoryFeatureStore() ready = Event() with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) - server.setup_response('/all', 200, fake_event, response_headers) + with stream_content(make_put_event()) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, + wrapper_name = 'Flask', wrapper_version = '0.1.0') + server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: - sp.start() - req = server.await_request() - assert req.method == 'GET' - ready.wait(1) - assert sp.initialized() + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + req = server.await_request() + assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask/0.1.0' -def test_sends_headers(): +def test_sends_wrapper_header_without_version(): store = InMemoryFeatureStore() ready = Event() with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) - server.setup_response('/all', 200, fake_event, response_headers) + with stream_content(make_put_event()) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, + wrapper_name = 'Flask') + server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: - sp.start() - req = server.await_request() - assert req.headers.get('Authorization') == 'sdk-key' - assert req.headers.get('User-Agent') == 'PythonClient/' + VERSION - assert req.headers.get('X-LaunchDarkly-Wrapper') is None + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + req = server.await_request() + assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' -def test_sends_wrapper_header(): +def test_receives_put_event(): store = InMemoryFeatureStore() ready = Event() + flag = { 'key': 'flagkey', 'version': 1 } + segment = { 'key': 'segkey', 'version': 1 } with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, - wrapper_name = 'Flask', wrapper_version = '0.1.0') - server.setup_response('/all', 200, fake_event, response_headers) + with stream_content(make_put_event([ flag ], [ segment ])) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) + server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: - sp.start() - req = server.await_request() - assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask/0.1.0' + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + ready.wait(start_wait) + assert sp.initialized() + expect_item(store, FEATURES, flag) + expect_item(store, SEGMENTS, segment) -def test_sends_wrapper_header_without_version(): +def test_receives_patch_events(): store = InMemoryFeatureStore() ready = Event() + flagv1 = { 'key': 'flagkey', 'version': 1 } + flagv2 = { 'key': 'flagkey', 'version': 2 } + segmentv1 = { 'key': 'segkey', 'version': 1 } + segmentv2 = { 'key': 'segkey', 'version': 1 } with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, - wrapper_name = 'Flask') - server.setup_response('/all', 200, fake_event, response_headers) + with stream_content(make_put_event([ flagv1 ], [ segmentv1 ])) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) + server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: - sp.start() - req = server.await_request() - assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + ready.wait(start_wait) + assert sp.initialized() + expect_item(store, FEATURES, flagv1) + expect_item(store, SEGMENTS, segmentv1) + + stream.push(make_patch_event(FEATURES, flagv2)) + expect_update(store, FEATURES, flagv2) + + stream.push(make_patch_event(SEGMENTS, segmentv2)) + expect_update(store, SEGMENTS, segmentv2) + +def test_receives_delete_events(): + store = InMemoryFeatureStore() + ready = Event() + flagv1 = { 'key': 'flagkey', 'version': 1 } + segmentv1 = { 'key': 'segkey', 'version': 1 } + + with start_server() as server: + with stream_content(make_put_event([ flagv1 ], [ segmentv1 ])) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) + server.for_path('/all', stream) + + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + ready.wait(start_wait) + assert sp.initialized() + expect_item(store, FEATURES, flagv1) + expect_item(store, SEGMENTS, segmentv1) + + stream.push(make_delete_event(FEATURES, flagv1['key'], 2)) + expect_delete(store, FEATURES, flagv1['key']) + + stream.push(make_delete_event(SEGMENTS, segmentv1['key'], 2)) + expect_delete(store, SEGMENTS, segmentv1['key']) + +def test_reconnects_if_stream_is_broken(): + store = InMemoryFeatureStore() + ready = Event() + flagv1 = { 'key': 'flagkey', 'version': 1 } + flagv2 = { 'key': 'flagkey', 'version': 2 } + + with start_server() as server: + with stream_content(make_put_event([ flagv1 ])) as stream1: + with stream_content(make_put_event([ flagv2 ])) as stream2: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, initial_reconnect_delay = brief_delay) + server.for_path('/all', SequentialHandler(stream1, stream2)) + + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + server.await_request + ready.wait(start_wait) + assert sp.initialized() + expect_item(store, FEATURES, flagv1) + + stream1.close() + server.await_request + expect_update(store, FEATURES, flagv2) + +def test_retries_on_network_error(): + error_handler = CauseNetworkError() + store = InMemoryFeatureStore() + ready = Event() + with start_server() as server: + with stream_content(make_put_event()) as stream: + two_errors_then_success = SequentialHandler(error_handler, error_handler, stream) + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, initial_reconnect_delay = brief_delay) + server.for_path('/all', two_errors_then_success) + + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + ready.wait(start_wait) + assert sp.initialized() + server.await_request + server.await_request + +@pytest.mark.parametrize("status", [ 400, 408, 429, 500, 503 ]) +def test_recoverable_http_error(status): + error_handler = BasicResponse(status) + store = InMemoryFeatureStore() + ready = Event() + with start_server() as server: + with stream_content(make_put_event()) as stream: + two_errors_then_success = SequentialHandler(error_handler, error_handler, stream) + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, initial_reconnect_delay = brief_delay) + server.for_path('/all', two_errors_then_success) + + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + ready.wait(start_wait) + assert sp.initialized() + server.should_have_requests(3) + +@pytest.mark.parametrize("status", [ 401, 403, 404 ]) +def test_unrecoverable_http_error(status): + error_handler = BasicResponse(status) + store = InMemoryFeatureStore() + ready = Event() + with start_server() as server: + with stream_content(make_put_event()) as stream: + error_then_success = SequentialHandler(error_handler, stream) + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, initial_reconnect_delay = brief_delay) + server.for_path('/all', error_then_success) + + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + ready.wait(5) + assert not sp.initialized() + server.should_have_requests(1) def test_can_use_http_proxy_via_environment_var(monkeypatch): with start_server() as server: @@ -99,57 +235,80 @@ def test_can_use_https_proxy_via_config(): def _verify_http_proxy_is_used(server, config): store = InMemoryFeatureStore() ready = Event() - server.setup_response(config.stream_base_uri + '/all', 200, fake_event, response_headers) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: - sp.start() - # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the - # HTTP client, so we should be able to see the request go through. Note that the URI path will - # actually be an absolute URI for a proxy request. - req = server.await_request() - assert req.method == 'GET' - ready.wait(1) - assert sp.initialized() + with stream_content(make_put_event()) as stream: + server.for_path(config.stream_base_uri + '/all', stream) + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the + # HTTP client, so we should be able to see the request go through. Note that the URI path will + # actually be an absolute URI for a proxy request. + req = server.await_request() + assert req.method == 'GET' + ready.wait(start_wait) + assert sp.initialized() def _verify_https_proxy_is_used(server, config): store = InMemoryFeatureStore() ready = Event() - server.setup_response(config.stream_base_uri + '/all', 200, fake_event, response_headers) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: - sp.start() - # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but - # it can still record that it *got* the request, which proves that the request went to the proxy. - req = server.await_request() - assert req.method == 'CONNECT' + with stream_content(make_put_event()) as stream: + server.for_path(config.stream_base_uri + '/all', stream) + with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + sp.start() + # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but + # it can still record that it *got* the request, which proves that the request went to the proxy. + req = server.await_request() + assert req.method == 'CONNECT' def test_records_diagnostic_on_stream_init_success(): store = InMemoryFeatureStore() ready = Event() with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) - server.setup_response('/all', 200, fake_event, response_headers) - diag_accum = _DiagnosticAccumulator(1) + with stream_content(make_put_event()) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) + server.for_path('/all', stream) + diag_accum = _DiagnosticAccumulator(1) - with StreamingUpdateProcessor(config, None, store, ready, diag_accum) as sp: - sp.start() - server.await_request() - server.await_request() - recorded_inits = diag_accum.create_event_and_reset(0, 0)['streamInits'] + with StreamingUpdateProcessor(config, None, store, ready, diag_accum) as sp: + sp.start() + ready.wait(start_wait) + recorded_inits = diag_accum.create_event_and_reset(0, 0)['streamInits'] - assert len(recorded_inits) == 1 - assert recorded_inits[0]['failed'] is False + assert len(recorded_inits) == 1 + assert recorded_inits[0]['failed'] is False def test_records_diagnostic_on_stream_init_failure(): store = InMemoryFeatureStore() ready = Event() with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) - server.setup_response('/all', 200, 'event:put\ndata: {\n\n', response_headers) - diag_accum = _DiagnosticAccumulator(1) + with stream_content(make_put_event()) as stream: + error_then_success = SequentialHandler(BasicResponse(503), stream) + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, initial_reconnect_delay = brief_delay) + server.for_path('/all', error_then_success) + diag_accum = _DiagnosticAccumulator(1) - with StreamingUpdateProcessor(config, None, store, ready, diag_accum) as sp: - sp.start() - server.await_request() - server.await_request() - recorded_inits = diag_accum.create_event_and_reset(0, 0)['streamInits'] + with StreamingUpdateProcessor(config, None, store, ready, diag_accum) as sp: + sp.start() + ready.wait(start_wait) + recorded_inits = diag_accum.create_event_and_reset(0, 0)['streamInits'] + + assert len(recorded_inits) == 2 + assert recorded_inits[0]['failed'] is True + assert recorded_inits[1]['failed'] is False + +def expect_item(store, kind, item): + assert store.get(kind, item['key'], lambda x: x) == item + +def expect_update(store, kind, expected_item): + await_item(store, kind, expected_item['key'], expected_item) + +def expect_delete(store, kind, key): + await_item(store, kind, key, None) - assert recorded_inits[0]['failed'] is True +def await_item(store, kind, key, expected_item): + deadline = time.time() + update_wait + while time.time() < deadline: + time.sleep(0.05) + current_item = store.get(kind, key, lambda x: x) + if current_item == expected_item: + return + assert False, 'expected %s = %s but value was still %s after %d seconds' % (key, json.dumps(expected_item), json.dumps(current_item), update_wait) From 40168879e2d4e6a06fcaa0363b79c0841ac4f075 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 30 Mar 2020 14:47:20 -0700 Subject: [PATCH 194/356] streams shouldn't use the same read timeout as the rest of the SDK (#132) --- ldclient/impl/http.py | 11 +++++++++-- ldclient/sse_client.py | 4 +--- ldclient/streaming.py | 9 ++++++--- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/ldclient/impl/http.py b/ldclient/impl/http.py index eaa82077..bcc97e4e 100644 --- a/ldclient/impl/http.py +++ b/ldclient/impl/http.py @@ -17,15 +17,22 @@ def _http_factory(config): return HTTPFactory(_base_headers(config), config.http) class HTTPFactory(object): - def __init__(self, base_headers, http_config): + def __init__(self, base_headers, http_config, override_read_timeout=None): self.__base_headers = base_headers self.__http_config = http_config - self.__timeout = urllib3.Timeout(connect=http_config.connect_timeout, read=http_config.read_timeout) + self.__timeout = urllib3.Timeout( + connect=http_config.connect_timeout, + read=http_config.read_timeout if override_read_timeout is None else override_read_timeout + ) @property def base_headers(self): return self.__base_headers + @property + def http_config(self): + return self.__http_config + @property def timeout(self): return self.__timeout diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index b257a443..eca088f6 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -28,8 +28,6 @@ def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeo self.url = url self.last_id = last_id self.retry = retry - self._connect_timeout = connect_timeout - self._read_timeout = read_timeout self._chunk_size = chunk_size if http_factory: @@ -37,7 +35,7 @@ def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeo base_headers = http_factory.base_headers else: # for backward compatibility in case anyone else is using this class - self._timeout = urllib3.Timeout(connect=self._connect_timeout, read=self._read_timeout) + self._timeout = urllib3.Timeout(connect=connect_timeout, read=read_timeout) base_headers = {} # Optional support for passing in an HTTP client diff --git a/ldclient/streaming.py b/ldclient/streaming.py index 7e0fd52b..abc54247 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -12,7 +12,7 @@ import math import time -from ldclient.impl.http import _http_factory +from ldclient.impl.http import HTTPFactory, _http_factory from ldclient.impl.retry_delay import RetryDelayStrategy, DefaultBackoffStrategy, DefaultJitterStrategy from ldclient.interfaces import UpdateProcessor from ldclient.sse_client import SSEClient @@ -75,8 +75,8 @@ def run(self): messages = self._connect() for msg in messages: if not self._running: - log.warning("but I'm done") break + self._retry_delay.set_good_since(time.time()) message_ok = self.process_message(self._store, self._requester, msg) if message_ok: self._record_stream_init(False) @@ -104,10 +104,13 @@ def _record_stream_init(self, failed): self._diagnostic_accumulator.record_stream_init(current_time, current_time - self._es_started, failed) def _connect(self): + # We don't want the stream to use the same read timeout as the rest of the SDK. + http_factory = _http_factory(self._config) + stream_http_factory = HTTPFactory(http_factory.base_headers, http_factory.http_config, override_read_timeout=stream_read_timeout) return SSEClient( self._uri, retry = None, # we're implementing our own retry - http_factory = _http_factory(self._config) + http_factory = stream_http_factory ) def stop(self): From 02a803f5b12dae3c5df4b7b6890b4d8b38bf1c90 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 8 May 2020 18:11:04 -0700 Subject: [PATCH 195/356] implement our own retry logic & logging for event posts, don't use urllib3.Retry (#133) --- ldclient/event_processor.py | 99 +++++++++++++++++++++++-------------- ldclient/util.py | 19 +++++-- 2 files changed, 79 insertions(+), 39 deletions(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index bbc18076..b94d800a 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -28,9 +28,8 @@ from ldclient.interfaces import EventProcessor from ldclient.repeating_timer import RepeatingTimer from ldclient.util import UnsuccessfulResponseException -from ldclient.util import _headers, _retryable_statuses from ldclient.util import log -from ldclient.util import http_error_message, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response +from ldclient.util import check_if_error_is_recoverable_and_log, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response, _headers from ldclient.diagnostics import create_diagnostic_init __MAX_FLUSH_THREADS__ = 5 @@ -141,18 +140,6 @@ def _get_userkey(self, event): return str(event['user'].get('key')) -class _EventRetry(urllib3.Retry): - def __init__(self): - urllib3.Retry.__init__(self, total=1, - method_whitelist=False, # Enable retry on POST - status_forcelist=_retryable_statuses, - raise_on_status=False) - - # Override backoff time to be flat 1 second - def get_backoff_time(self): - return 1 - - class EventPayloadSendTask(object): def __init__(self, http, config, formatter, payload, response_fn): self._http = http @@ -175,16 +162,17 @@ def _do_send(self, output_events): try: json_body = json.dumps(output_events) log.debug('Sending events payload: ' + json_body) - hdrs = _headers(self._config) - hdrs['X-LaunchDarkly-Event-Schema'] = str(__CURRENT_EVENT_SCHEMA__) - hdrs['X-LaunchDarkly-Payload-ID'] = str(uuid.uuid4()) - uri = self._config.events_uri - r = self._http.request('POST', uri, - headers=hdrs, - timeout=urllib3.Timeout(connect=self._config.connect_timeout, read=self._config.read_timeout), - body=json_body, - retries=_EventRetry()) - self._response_fn(r) + payload_id = str(uuid.uuid4()) + r = _post_events_with_retry( + self._http, + self._config, + self._config.events_uri, + payload_id, + json_body, + "%d events" % len(self._payload.events) + ) + if r: + self._response_fn(r) return r except Exception as e: log.warning( @@ -202,13 +190,14 @@ def run(self): try: json_body = json.dumps(self._event_body) log.debug('Sending diagnostic event: ' + json_body) - hdrs = _headers(self._config) - uri = self._config.events_base_uri + '/diagnostic' - r = self._http.request('POST', uri, - headers=hdrs, - timeout=urllib3.Timeout(connect=self._config.connect_timeout, read=self._config.read_timeout), - body=json_body, - retries=1) + _post_events_with_retry( + self._http, + self._config, + self._config.events_base_uri + '/diagnostic', + None, + json_body, + "diagnostic event" + ) except Exception as e: log.warning( 'Unhandled exception in event processor. Diagnostic event was not sent. [%s]', e) @@ -381,11 +370,9 @@ def _handle_response(self, r): if server_date is not None: timestamp = int(time.mktime(server_date) * 1000) self._last_known_past_time = timestamp - if r.status > 299: - log.error(http_error_message(r.status, "event delivery", "some events were dropped")) - if not is_http_error_recoverable(r.status): - self._disabled = True - return + if r.status > 299 and not is_http_error_recoverable(r.status): + self._disabled = True + return def _send_and_reset_diagnostics(self): if self._diagnostic_accumulator is not None: @@ -472,3 +459,43 @@ def __enter__(self): def __exit__(self, type, value, traceback): self.stop() + + +def _post_events_with_retry( + http_client, + config, + uri, + payload_id, + body, + events_description +): + hdrs = _headers(config) + hdrs['Content-Type'] = 'application/json' + if payload_id: + hdrs['X-LaunchDarkly-Event-Schema'] = str(__CURRENT_EVENT_SCHEMA__) + hdrs['X-LaunchDarkly-Payload-ID'] = payload_id + can_retry = True + context = "posting %s" % events_description + while True: + next_action_message = "will retry" if can_retry else "some events were dropped" + try: + r = http_client.request( + 'POST', + uri, + headers=hdrs, + body=body, + timeout=urllib3.Timeout(connect=config.connect_timeout, read=config.read_timeout), + retries=0 + ) + if r.status < 300: + return r + recoverable = check_if_error_is_recoverable_and_log(context, r.status, None, next_action_message) + if not recoverable: + return r + except Exception as e: + check_if_error_is_recoverable_and_log(context, None, str(e), next_action_message) + if not can_retry: + return None + can_retry = False + # fixed delay of 1 second for event retries + time.sleep(1) diff --git a/ldclient/util.py b/ldclient/util.py index 189247db..3880c330 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -89,15 +89,28 @@ def is_http_error_recoverable(status): return True # all other errors are recoverable +def http_error_description(status): + return "HTTP error %d%s" % (status, " (invalid SDK key)" if (status == 401 or status == 403) else "") + + def http_error_message(status, context, retryable_message = "will retry"): - return "Received HTTP error %d%s for %s - %s" % ( - status, - " (invalid SDK key)" if (status == 401 or status == 403) else "", + return "Received %s for %s - %s" % ( + http_error_description(status), context, retryable_message if is_http_error_recoverable(status) else "giving up permanently" ) +def check_if_error_is_recoverable_and_log(error_context, status_code, error_desc, recoverable_message): + if status_code and (error_desc is None): + error_desc = http_error_description(status_code) + if status_code and not is_http_error_recoverable(status_code): + log.error("Error %s (giving up permanently): %s" % (error_context, error_desc)) + return False + log.warning("Error %s (%s): %s" % (error_context, recoverable_message, error_desc)) + return True + + def stringify_attrs(attrdict, attrs): if attrdict is None: return None From 50f2d94fb85b77d079bac4a825f6fbfb40b02314 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 23 Jun 2020 12:19:00 -0700 Subject: [PATCH 196/356] remove support for indirect/patch and indirect/put --- ldclient/client.py | 11 ++++++----- ldclient/streaming.py | 19 +++---------------- testing/test_streaming.py | 28 ++++++++++++++-------------- 3 files changed, 23 insertions(+), 35 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index c4406b3e..a02a49f5 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -141,17 +141,18 @@ def _make_update_processor(self, config, store, ready, diagnostic_accumulator): if config.offline or config.use_ldd: return NullUpdateProcessor(config, store, ready) + if config.stream: + return StreamingUpdateProcessor(config, store, ready, diagnostic_accumulator) + + log.info("Disabling streaming API") + log.warning("You should only disable the streaming API if instructed to do so by LaunchDarkly support") + if config.feature_requester_class: feature_requester = config.feature_requester_class(config) else: feature_requester = FeatureRequesterImpl(config) """ :type: FeatureRequester """ - if config.stream: - return StreamingUpdateProcessor(config, feature_requester, store, ready, diagnostic_accumulator) - - log.info("Disabling streaming API") - log.warning("You should only disable the streaming API if instructed to do so by LaunchDarkly support") return PollingUpdateProcessor(config, feature_requester, store, ready) def get_sdk_key(self): diff --git a/ldclient/streaming.py b/ldclient/streaming.py index abc54247..061bca65 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -33,12 +33,11 @@ class StreamingUpdateProcessor(Thread, UpdateProcessor): - def __init__(self, config, requester, store, ready, diagnostic_accumulator): + def __init__(self, config, store, ready, diagnostic_accumulator): Thread.__init__(self) self.daemon = True self._uri = config.stream_base_uri + STREAM_ALL_PATH self._config = config - self._requester = requester self._store = store self._running = False self._ready = ready @@ -77,7 +76,7 @@ def run(self): if not self._running: break self._retry_delay.set_good_since(time.time()) - message_ok = self.process_message(self._store, self._requester, msg) + message_ok = self.process_message(self._store, msg) if message_ok: self._record_stream_init(False) self._es_started = None @@ -122,7 +121,7 @@ def initialized(self): # Returns True if we initialized the feature store @staticmethod - def process_message(store, requester, msg): + def process_message(store, msg): if msg.event == 'put': all_data = json.loads(msg.data) init_data = { @@ -143,18 +142,6 @@ def process_message(store, requester, msg): store.upsert(target.kind, obj) else: log.warning("Patch for unknown path: %s", path) - elif msg.event == "indirect/patch": - path = msg.data - log.debug("Received indirect/patch event for %s", path) - target = StreamingUpdateProcessor._parse_path(path) - if target is not None: - store.upsert(target.kind, requester.get_one(target.kind, target.key)) - else: - log.warning("Indirect patch for unknown path: %s", path) - elif msg.event == "indirect/put": - log.debug("Received indirect/put event") - store.init(requester.get_all_data()) - return True elif msg.event == 'delete': payload = json.loads(msg.data) path = payload['path'] diff --git a/testing/test_streaming.py b/testing/test_streaming.py index 75da9ea4..dadac824 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -30,7 +30,7 @@ def test_request_properties(): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() req = server.await_request() assert req.method == 'GET' @@ -48,7 +48,7 @@ def test_sends_wrapper_header(): wrapper_name = 'Flask', wrapper_version = '0.1.0') server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() req = server.await_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask/0.1.0' @@ -63,7 +63,7 @@ def test_sends_wrapper_header_without_version(): wrapper_name = 'Flask') server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() req = server.await_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' @@ -79,7 +79,7 @@ def test_receives_put_event(): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() ready.wait(start_wait) assert sp.initialized() @@ -99,7 +99,7 @@ def test_receives_patch_events(): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() ready.wait(start_wait) assert sp.initialized() @@ -123,7 +123,7 @@ def test_receives_delete_events(): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri) server.for_path('/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() ready.wait(start_wait) assert sp.initialized() @@ -148,7 +148,7 @@ def test_reconnects_if_stream_is_broken(): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, initial_reconnect_delay = brief_delay) server.for_path('/all', SequentialHandler(stream1, stream2)) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() server.await_request ready.wait(start_wait) @@ -169,7 +169,7 @@ def test_retries_on_network_error(): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, initial_reconnect_delay = brief_delay) server.for_path('/all', two_errors_then_success) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() ready.wait(start_wait) assert sp.initialized() @@ -187,7 +187,7 @@ def test_recoverable_http_error(status): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, initial_reconnect_delay = brief_delay) server.for_path('/all', two_errors_then_success) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() ready.wait(start_wait) assert sp.initialized() @@ -204,7 +204,7 @@ def test_unrecoverable_http_error(status): config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, initial_reconnect_delay = brief_delay) server.for_path('/all', error_then_success) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() ready.wait(5) assert not sp.initialized() @@ -237,7 +237,7 @@ def _verify_http_proxy_is_used(server, config): ready = Event() with stream_content(make_put_event()) as stream: server.for_path(config.stream_base_uri + '/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the # HTTP client, so we should be able to see the request go through. Note that the URI path will @@ -252,7 +252,7 @@ def _verify_https_proxy_is_used(server, config): ready = Event() with stream_content(make_put_event()) as stream: server.for_path(config.stream_base_uri + '/all', stream) - with StreamingUpdateProcessor(config, None, store, ready, None) as sp: + with StreamingUpdateProcessor(config, store, ready, None) as sp: sp.start() # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but # it can still record that it *got* the request, which proves that the request went to the proxy. @@ -268,7 +268,7 @@ def test_records_diagnostic_on_stream_init_success(): server.for_path('/all', stream) diag_accum = _DiagnosticAccumulator(1) - with StreamingUpdateProcessor(config, None, store, ready, diag_accum) as sp: + with StreamingUpdateProcessor(config, store, ready, diag_accum) as sp: sp.start() ready.wait(start_wait) recorded_inits = diag_accum.create_event_and_reset(0, 0)['streamInits'] @@ -286,7 +286,7 @@ def test_records_diagnostic_on_stream_init_failure(): server.for_path('/all', error_then_success) diag_accum = _DiagnosticAccumulator(1) - with StreamingUpdateProcessor(config, None, store, ready, diag_accum) as sp: + with StreamingUpdateProcessor(config, store, ready, diag_accum) as sp: sp.start() ready.wait(start_wait) recorded_inits = diag_accum.create_event_and_reset(0, 0)['streamInits'] From a9fe2180346b41847a38676c68f2e8a2d01bef69 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 25 Jun 2020 15:34:41 -0700 Subject: [PATCH 197/356] remove unused logic for individual flag/segment poll for indirect/patch --- ldclient/feature_requester.py | 29 +++++------- ldclient/interfaces.py | 7 --- testing/stub_util.py | 3 -- testing/test_feature_requester.py | 76 ------------------------------- 4 files changed, 12 insertions(+), 103 deletions(-) diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 3ab812fe..4557104f 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -27,29 +27,20 @@ def __init__(self, config): self._cache = dict() self._http = _http_factory(config).create_pool_manager(1, config.base_uri) self._config = config + self._poll_uri = config.base_uri + LATEST_ALL_URI def get_all_data(self): - all_data = self._do_request(self._config.base_uri + LATEST_ALL_URI, True) - return { - FEATURES: all_data['flags'], - SEGMENTS: all_data['segments'] - } - - def get_one(self, kind, key): - return self._do_request(self._config.base_uri + kind.request_api_path + '/' + key, False) - - def _do_request(self, uri, allow_cache): + uri = self._poll_uri hdrs = _headers(self._config) - if allow_cache: - cache_entry = self._cache.get(uri) - if cache_entry is not None: - hdrs['If-None-Match'] = cache_entry.etag + cache_entry = self._cache.get(uri) + if cache_entry is not None: + hdrs['If-None-Match'] = cache_entry.etag r = self._http.request('GET', uri, headers=hdrs, timeout=urllib3.Timeout(connect=self._config.connect_timeout, read=self._config.read_timeout), retries=1) throw_if_unsuccessful_response(r) - if r.status == 304 and allow_cache and cache_entry is not None: + if r.status == 304 and cache_entry is not None: data = cache_entry.data etag = cache_entry.etag from_cache = True @@ -57,8 +48,12 @@ def _do_request(self, uri, allow_cache): data = json.loads(r.data.decode('UTF-8')) etag = r.getheader('ETag') from_cache = False - if allow_cache and etag is not None: + if etag is not None: self._cache[uri] = CacheEntry(data=data, etag=etag) log.debug("%s response status:[%d] From cache? [%s] ETag:[%s]", uri, r.status, from_cache, etag) - return data + + return { + FEATURES: data['flags'], + SEGMENTS: data['segments'] + } diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 1a319494..6b49782c 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -263,13 +263,6 @@ def get_all(self): """ pass - def get_one(self, kind, key): - """ - Gets one Feature flag - :return: - """ - pass - class DiagnosticDescription(object): """ diff --git a/testing/stub_util.py b/testing/stub_util.py index a5aada7d..a5bd6b9f 100644 --- a/testing/stub_util.py +++ b/testing/stub_util.py @@ -67,9 +67,6 @@ def get_all_data(self): raise self.exception return self.all_data - def get_one(self, kind, key): - pass - class MockResponse(object): def __init__(self, status, headers): self._status = status diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py index 3964ad10..10f8d11e 100644 --- a/testing/test_feature_requester.py +++ b/testing/test_feature_requester.py @@ -102,82 +102,6 @@ def test_get_all_data_can_use_cached_data(): req = server.require_request() assert req.headers['If-None-Match'] == etag2 -def test_get_one_flag_returns_data(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', base_uri = server.uri) - fr = FeatureRequesterImpl(config) - key = 'flag1' - flag_data = { 'key': key } - server.for_path('/sdk/latest-flags/' + key, JsonResponse(flag_data)) - result = fr.get_one(FEATURES, key) - assert result == flag_data - -def test_get_one_flag_sends_headers(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', base_uri = server.uri) - fr = FeatureRequesterImpl(config) - key = 'flag1' - flag_data = { 'key': key } - server.for_path('/sdk/latest-flags/' + key, JsonResponse(flag_data)) - fr.get_one(FEATURES, key) - req = server.require_request() - assert req.headers['Authorization'] == 'sdk-key' - assert req.headers['User-Agent'] == 'PythonClient/' + VERSION - assert req.headers.get('X-LaunchDarkly-Wrapper') is None - -def test_get_one_flag_sends_wrapper_header(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', base_uri = server.uri, - wrapper_name = 'Flask', wrapper_version = '0.1.0') - fr = FeatureRequesterImpl(config) - key = 'flag1' - flag_data = { 'key': key } - server.for_path('/sdk/latest-flags/' + key, JsonResponse(flag_data)) - fr.get_one(FEATURES, key) - req = server.require_request() - assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask/0.1.0' - -def test_get_one_flag_sends_wrapper_header_without_version(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', base_uri = server.uri, - wrapper_name = 'Flask') - fr = FeatureRequesterImpl(config) - key = 'flag1' - flag_data = { 'key': key } - server.for_path('/sdk/latest-flags/' + key, JsonResponse(flag_data)) - fr.get_one(FEATURES, key) - req = server.require_request() - assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' - -def test_get_one_flag_throws_on_error(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', base_uri = server.uri) - fr = FeatureRequesterImpl(config) - with pytest.raises(UnsuccessfulResponseException) as e: - fr.get_one(FEATURES, 'didnt-set-up-a-response-for-this-flag') - assert e.value.status == 404 - -def test_get_one_flag_does_not_use_etags(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', base_uri = server.uri) - fr = FeatureRequesterImpl(config) - - etag = 'my-etag' - key = 'flag1' - flag_data = { 'key': key } - req_path = '/sdk/latest-flags/' + key - server.for_path(req_path, JsonResponse(flag_data, { 'Etag': etag })) - - result = fr.get_one(FEATURES, key) - assert result == flag_data - req = server.require_request() - assert 'If-None-Match' not in req.headers.keys() - - result = fr.get_one(FEATURES, key) - assert result == flag_data - req = server.require_request() - assert 'If-None-Match' not in req.headers.keys() # did not send etag from previous request - def test_can_use_http_proxy_via_environment_var(monkeypatch): with start_server() as server: monkeypatch.setenv('http_proxy', server.uri) From e1c93da19de419a076e0c343b9652823d5f6f680 Mon Sep 17 00:00:00 2001 From: Elliot <35050275+Apache-HB@users.noreply.github.com> Date: Thu, 30 Jul 2020 10:45:03 -0400 Subject: [PATCH 198/356] Ehaisley/84082/remove python2 (#136) * remove all references to six and remove queue fallback imports * remove NullHandler logger backwards compat * update circleci config to remove python 2.7 tests * remove ordereddict backwards compat * update setup.py to no longer list python 2.7 as compatible * no longer inherit from object for python 2 backwards compat * update readme and manifest to reflect python 2.7 removal * remove unicode type compatibility * remove 2.7 support from circleci --- .circleci/config.yml | 27 ++-------- MANIFEST.in | 1 - README.md | 4 +- docs/requirements.txt | 1 - ldclient/__init__.py | 27 +--------- ldclient/client.py | 52 ++++++++----------- ldclient/config.py | 6 +-- ldclient/diagnostics.py | 2 +- ldclient/event_processor.py | 37 ++++++------- ldclient/event_summarizer.py | 2 +- ldclient/feature_store.py | 19 ++++--- ldclient/fixed_thread_pool.py | 18 +++---- ldclient/flag.py | 26 ++++------ ldclient/flags_state.py | 12 ++--- ldclient/impl/event_factory.py | 10 ++-- ldclient/impl/http.py | 10 ++-- .../dynamodb/dynamodb_feature_store.py | 28 +++++----- .../integrations/files/file_data_source.py | 37 +++++++------ ldclient/impl/retry_delay.py | 16 +++--- ldclient/integrations.py | 16 +++--- ldclient/interfaces.py | 18 +++---- ldclient/lru_cache.py | 26 +--------- ldclient/memoized_value.py | 2 +- ldclient/operators.py | 9 +--- ldclient/repeating_timer.py | 2 +- ldclient/sse_client.py | 17 ++---- ldclient/user_filter.py | 7 +-- ldclient/util.py | 24 ++------- requirements.txt | 1 - runtests.py | 2 +- setup.py | 4 +- testing/http_util.py | 46 ++++++++-------- testing/stub_util.py | 16 +++--- testing/test_diagnostics.py | 2 +- testing/test_feature_store.py | 16 +++--- testing/test_file_data_source.py | 3 +- testing/test_ldclient.py | 6 +-- 37 files changed, 208 insertions(+), 344 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6b0e096a..d6894b79 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,10 +6,6 @@ orbs: workflows: test: jobs: - - test-linux: - name: Python 2.7 - docker-image: circleci/python:2.7-jessie - test-with-codeclimate: true # we only need to run CodeClimate in one job - test-linux: name: Python 3.3 docker-image: circleci/python:3.3-jessie @@ -32,9 +28,6 @@ workflows: - test-linux: name: Python 3.8 docker-image: circleci/python:3.8-buster - - test-windows: - name: Windows Py2.7 - py3: false - test-windows: name: Windows Py3.3 py3: true @@ -115,21 +108,9 @@ jobs: type: boolean steps: - checkout - - when: - condition: <> - steps: - - run: - name: install Python 3 - command: choco install python --no-progress - - unless: - condition: <> - steps: - - run: - name: install Python 2.7 - command: | - $ProgressPreference = "SilentlyContinue" # prevents console errors from CircleCI host - iwr -outf python-2.7.16.amd64.msi https://www.python.org/ftp/python/2.7.16/python-2.7.16.amd64.msi - Start-Process msiexec.exe -Wait -ArgumentList '/I python-2.7.16.amd64.msi /quiet' + - run: + name: install Python 3 + command: choco install python --no-progress - run: name: set up DynamoDB command: | @@ -165,7 +146,6 @@ jobs: - run: name: install requirements command: | - $env:Path += ";C:\Python27\;C:\Python27\Scripts\" # has no effect if 2.7 isn't installed python --version pip install -r test-requirements.txt pip install -r consul-requirements.txt @@ -174,7 +154,6 @@ jobs: name: run tests command: | mkdir test-reports - $env:Path += ";C:\Python27\;C:\Python27\Scripts\" # has no effect if 2.7 isn't installed python -m pytest -s --junitxml=test-reports/junit.xml testing; - store_test_results: path: test-reports diff --git a/MANIFEST.in b/MANIFEST.in index 1a398256..35367703 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,4 +4,3 @@ include test-requirements.txt include consul-requirements.txt include dynamodb-requirements.txt include redis-requirements.txt -include python2.6-requirements.txt \ No newline at end of file diff --git a/README.md b/README.md index 7858bbc9..e2252f4e 100644 --- a/README.md +++ b/README.md @@ -8,12 +8,12 @@ ## LaunchDarkly overview [LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves over 100 billion feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/docs/getting-started) using LaunchDarkly today! - + [![Twitter Follow](https://img.shields.io/twitter/follow/launchdarkly.svg?style=social&label=Follow&maxAge=2592000)](https://twitter.com/intent/follow?screen_name=launchdarkly) ## Supported Python versions -This version of the LaunchDarkly SDK is compatible with Python 2.7 and 3.3 through 3.7. It is tested with the most recent patch releases of those versions. Python 2.6 is no longer supported. +This version of the LaunchDarkly SDK is compatible with Python 3.3 through 3.7. It is tested with the most recent patch releases of those versions. Python 2.7 is no longer supported. ## Getting started diff --git a/docs/requirements.txt b/docs/requirements.txt index f6c80357..15b59476 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -4,7 +4,6 @@ sphinx_rtd_theme backoff>=1.4.3 certifi>=2018.4.16 expiringdict>=1.1.4 -six>=1.10.0 pyRFC3339>=1.0 jsonpickle==0.9.3 semver>=2.7.9 diff --git a/ldclient/__init__.py b/ldclient/__init__.py index e05dfd97..773d6374 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -2,8 +2,6 @@ The ldclient module contains the most common top-level entry points for the SDK. """ -import logging - from ldclient.rwlock import ReadWriteLock from ldclient.version import VERSION from .client import * @@ -136,27 +134,4 @@ def _reset_client(): c.close() -# currently hidden from documentation - see docs/README.md -class NullHandler(logging.Handler): - """A :class:`logging.Handler` implementation that does nothing. - - .. deprecated:: 6.0.0 - You should not need to use this class. It was originally used in order to support Python 2.6, - which requires that at least one logging handler must always be configured. However, the SDK - no longer supports Python 2.6. - """ - def emit(self, record): - pass - - -if not log.handlers: - log.addHandler(NullHandler()) - -try: - # noinspection PyUnresolvedReferences - unicode -except NameError: - __BASE_TYPES__ = (str, float, int, bool) -else: - # noinspection PyUnresolvedReferences - __BASE_TYPES__ = (str, float, int, bool, unicode) +__BASE_TYPES__ = (str, float, int, bool) diff --git a/ldclient/client.py b/ldclient/client.py index a02a49f5..34340c22 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -21,13 +21,7 @@ from ldclient.streaming import StreamingUpdateProcessor from ldclient.util import check_uwsgi, log from ldclient.versioned_data_kind import FEATURES, SEGMENTS - -# noinspection PyBroadException -try: - import queue -except: - # noinspection PyUnresolvedReferences,PyPep8Naming - import Queue as queue # Python 3 +import queue from threading import Lock @@ -40,7 +34,7 @@ class _FeatureStoreClientWrapper(FeatureStore): def __init__(self, store): self.store = store - + def init(self, all_data): return self.store.init(_FeatureStoreDataSetSorter.sort_all_collections(all_data)) @@ -61,14 +55,14 @@ def initialized(self): return self.store.initialized -class LDClient(object): +class LDClient: """The LaunchDarkly SDK client object. Applications should configure the client at startup time and continue to use it throughout the lifetime of the application, rather than creating instances on the fly. The best way to do this is with the singleton methods :func:`ldclient.set_sdk_key()`, :func:`ldclient.set_config()`, and :func:`ldclient.get()`. However, you may also call the constructor directly if you need to maintain multiple instances. - + Client instances are thread-safe. """ def __init__(self, sdk_key=None, config=None, start_wait=5): @@ -140,7 +134,7 @@ def _make_update_processor(self, config, store, ready, diagnostic_accumulator): if config.offline or config.use_ldd: return NullUpdateProcessor(config, store, ready) - + if config.stream: return StreamingUpdateProcessor(config, store, ready, diagnostic_accumulator) @@ -164,7 +158,7 @@ def get_sdk_key(self): def close(self): """Releases all threads and network connections used by the LaunchDarkly client. - + Do not attempt to use the client after calling this method. """ log.info("Closing LaunchDarkly client..") @@ -174,10 +168,10 @@ def close(self): # These magic methods allow a client object to be automatically cleaned up by the "with" scope operator def __enter__(self): return self - + def __exit__(self, type, value, traceback): self.close() - + def _send_event(self, event): self._event_processor.send_event(event) @@ -263,15 +257,15 @@ def variation(self, key, user, default): :return: one of the flag's variation values, or the default value """ return self._evaluate_internal(key, user, default, self._event_factory_default).value - + def variation_detail(self, key, user, default): """Determines the variation of a feature flag for a user, like :func:`variation()`, but also provides additional information about how this value was calculated, in the form of an :class:`ldclient.flag.EvaluationDetail` object. - + Calling this method also causes the "reason" data to be included in analytics events, if you are capturing detailed event data for this flag. - + :param string key: the unique key for the feature flag :param dict user: a dictionary containing parameters for the end user requesting the flag :param object default: the default value of the flag, to be used if the value is not @@ -280,13 +274,13 @@ def variation_detail(self, key, user, default): :rtype: EvaluationDetail """ return self._evaluate_internal(key, user, default, self._event_factory_with_reasons) - + def _evaluate_internal(self, key, user, default, event_factory): default = self._config.get_default(key, default) if self._config.offline: return EvaluationDetail(default, None, error_reason('CLIENT_NOT_READY')) - + if not self.is_initialized(): if self._store.initialized: log.warning("Feature Flag evaluation attempted before client has initialized - using last known values from feature store for feature key: " + key) @@ -296,7 +290,7 @@ def _evaluate_internal(self, key, user, default, event_factory): reason = error_reason('CLIENT_NOT_READY') self._send_event(event_factory.new_unknown_flag_event(key, user, default, reason)) return EvaluationDetail(default, None, reason) - + if user is not None and user.get('key', "") == "": log.warning("User key is blank. Flag evaluation will proceed, but the user will not be stored in LaunchDarkly.") @@ -333,10 +327,10 @@ def _evaluate_internal(self, key, user, default, event_factory): reason = error_reason('EXCEPTION') self._send_event(event_factory.new_default_event(flag, user, default, reason)) return EvaluationDetail(default, None, reason) - + def all_flags(self, user): """Returns all feature flag values for the given user. - + This method is deprecated - please use :func:`all_flags_state()` instead. Current versions of the client-side SDK will not generate analytics events correctly if you pass the result of ``all_flags``. @@ -349,13 +343,13 @@ def all_flags(self, user): if not state.valid: return None return state.to_values_map() - + def all_flags_state(self, user, **kwargs): """Returns an object that encapsulates the state of all feature flags for a given user, including the flag values and also metadata that can be used on the front end. See the JavaScript SDK Reference Guide on `Bootstrapping `_. - + This method does not send analytics events back to LaunchDarkly. :param dict user: the end user requesting the feature flags @@ -390,7 +384,7 @@ def all_flags_state(self, user, **kwargs): if user is None or user.get('key') is None: log.warning("User or user key is None when calling all_flags_state(). Returning empty state.") return FeatureFlagsState(False) - + state = FeatureFlagsState(True) client_only = kwargs.get('client_side_only', False) with_reasons = kwargs.get('with_reasons', False) @@ -402,7 +396,7 @@ def all_flags_state(self, user, **kwargs): except Exception as e: log.error("Unable to read flags for all_flag_state: %s" % repr(e)) return FeatureFlagsState(False) - + for key, flag in flags_map.items(): if client_only and not flag.get('clientSide', False): continue @@ -415,16 +409,16 @@ def all_flags_state(self, user, **kwargs): log.debug(traceback.format_exc()) reason = {'kind': 'ERROR', 'errorKind': 'EXCEPTION'} state.add_flag(flag, None, None, reason if with_reasons else None, details_only_if_tracked) - + return state - + def secure_mode_hash(self, user): """Computes an HMAC signature of a user signed with the client's SDK key, for use with the JavaScript SDK. For more information, see the JavaScript SDK Reference Guide on `Secure mode `_. - + :param dict user: the attributes of the user :return: a hash string that can be passed to the front end :rtype: string diff --git a/ldclient/config.py b/ldclient/config.py index 675d5f28..7df25dd7 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -11,7 +11,7 @@ STREAM_FLAGS_PATH = '/flags' -class HTTPConfig(object): +class HTTPConfig: """Advanced HTTP configuration options for the SDK client. This class groups together HTTP/HTTPS-related configuration properties that rarely need to be changed. @@ -76,7 +76,7 @@ def cert_file(self): def disable_ssl_verification(self): return self.__disable_ssl_verification -class Config(object): +class Config: """Advanced configuration options for the SDK client. To use these options, create an instance of ``Config`` and pass it to either :func:`ldclient.set_config()` @@ -428,7 +428,7 @@ def http(self): disable_ssl_verification=not self.__verify_ssl ) return self.__http - + def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key == '': log.warning("Missing or blank sdk_key.") diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index 2890ca3a..fc3486b5 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -10,7 +10,7 @@ from ldclient.version import VERSION -class _DiagnosticAccumulator(object): +class _DiagnosticAccumulator: def __init__(self, diagnostic_id): self.diagnostic_id = diagnostic_id self.data_since_date = int(time.time() * 1000) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index b94d800a..de5f8107 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -8,17 +8,10 @@ import errno import json from threading import Event, Lock, Thread -import six import time -import urllib3 import uuid - -# noinspection PyBroadException -try: - import queue -except: - # noinspection PyUnresolvedReferences,PyPep8Naming - import Queue as queue +import queue +import urllib3 from ldclient.event_summarizer import EventSummarizer from ldclient.fixed_thread_pool import FixedThreadPool @@ -40,7 +33,7 @@ EventProcessorMessage = namedtuple('EventProcessorMessage', ['type', 'param']) -class EventOutputFormatter(object): +class EventOutputFormatter: def __init__(self, config): self._inline_users = config.inline_users_in_events self._user_filter = UserFilter(config) @@ -50,7 +43,7 @@ def make_output_events(self, events, summary): if len(summary.counters) > 0: events_out.append(self.make_summary_event(summary)) return events_out - + def make_output_event(self, e): kind = e['kind'] if kind == 'feature': @@ -131,16 +124,16 @@ def make_summary_event(self, summary): 'endDate': summary.end_date, 'features': flags_out } - + def _process_user(self, event): filtered = self._user_filter.filter_user_props(event['user']) return stringify_attrs(filtered, __USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__) - + def _get_userkey(self, event): return str(event['user'].get('key')) -class EventPayloadSendTask(object): +class EventPayloadSendTask: def __init__(self, http, config, formatter, payload, response_fn): self._http = http self._config = config @@ -179,7 +172,7 @@ def _do_send(self, output_events): 'Unhandled exception in event processor. Analytics events were not processed. [%s]', e) -class DiagnosticEventSendTask(object): +class DiagnosticEventSendTask: def __init__(self, http, config, event_body): self._http = http self._config = config @@ -206,14 +199,14 @@ def run(self): FlushPayload = namedtuple('FlushPayload', ['events', 'summary']) -class EventBuffer(object): +class EventBuffer: def __init__(self, capacity): self._capacity = capacity self._events = [] self._summarizer = EventSummarizer() self._exceeded_capacity = False self._dropped_events = 0 - + def add_event(self, event): if len(self._events) >= self._capacity: self._dropped_events += 1 @@ -223,7 +216,7 @@ def add_event(self, event): else: self._events.append(event) self._exceeded_capacity = False - + def add_to_summary(self, event): self._summarizer.summarize_event(event) @@ -234,13 +227,13 @@ def get_and_clear_dropped_count(self): def get_payload(self): return FlushPayload(self._events, self._summarizer.snapshot()) - + def clear(self): self._events = [] self._summarizer.clear() -class EventDispatcher(object): +class EventDispatcher: def __init__(self, inbox, config, http_client, diagnostic_accumulator=None): self._inbox = inbox self._config = config @@ -291,7 +284,7 @@ def _run_main_loop(self): return except Exception: log.error('Unhandled exception in event processor', exc_info=True) - + def _process_event(self, event): if self._disabled: return @@ -456,7 +449,7 @@ def _post_message_and_wait(self, type): # These magic methods allow use of the "with" block in tests def __enter__(self): return self - + def __exit__(self, type, value, traceback): self.stop() diff --git a/ldclient/event_summarizer.py b/ldclient/event_summarizer.py index e046a347..c0b10eef 100644 --- a/ldclient/event_summarizer.py +++ b/ldclient/event_summarizer.py @@ -9,7 +9,7 @@ EventSummary = namedtuple('EventSummary', ['start_date', 'end_date', 'counters']) -class EventSummarizer(object): +class EventSummarizer: def __init__(self): self.start_date = 0 self.end_date = 0 diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index 501d8667..df443510 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -10,7 +10,6 @@ from ldclient.util import log from ldclient.interfaces import DiagnosticDescription, FeatureStore from ldclient.rwlock import ReadWriteLock -from six import iteritems class CacheConfig: @@ -41,15 +40,15 @@ def default(): :rtype: ldclient.feature_store.CacheConfig """ return CacheConfig() - + @staticmethod def disabled(): """Returns an instance of CacheConfig specifying that caching should be disabled. - + :rtype: ldclient.feature_store.CacheConfig """ return CacheConfig(expiration = 0) - + @property def enabled(self): """Returns True if caching is enabled in this configuration. @@ -57,7 +56,7 @@ def enabled(self): :rtype: bool """ return self._expiration > 0 - + @property def expiration(self): """Returns the configured cache TTL, in seconds. @@ -65,7 +64,7 @@ def expiration(self): :rtype: float """ return self._expiration - + @property def capacity(self): """Returns the configured maximum number of cacheable items. @@ -163,7 +162,7 @@ def initialized(self): return self._initialized finally: self._lock.runlock() - + def describe_configuration(self, config): return 'memory' @@ -191,7 +190,7 @@ def priority_order(kind): items = all_data[kind] outer_hash[kind] = _FeatureStoreDataSetSorter._sort_collection(kind, items) return outer_hash - + @staticmethod def _sort_collection(kind, input): if len(input) == 0 or not hasattr(kind, 'get_dependency_keys'): @@ -203,11 +202,11 @@ def _sort_collection(kind, input): items_out = OrderedDict() while len(remaining_items) > 0: # pick a random item that hasn't been updated yet - for key, item in iteritems(remaining_items): + for key, item in remaining_items.items(): _FeatureStoreDataSetSorter._add_with_dependencies_first(item, dependency_fn, remaining_items, items_out) break return items_out - + @staticmethod def _add_with_dependencies_first(item, dependency_fn, remaining_items, items_out): key = item.get('key') diff --git a/ldclient/fixed_thread_pool.py b/ldclient/fixed_thread_pool.py index 0a8a7c0e..3428daa8 100644 --- a/ldclient/fixed_thread_pool.py +++ b/ldclient/fixed_thread_pool.py @@ -4,20 +4,14 @@ # currently excluded from documentation - see docs/README.md from threading import Event, Lock, Thread - -# noinspection PyBroadException -try: - import queue -except: - # noinspection PyUnresolvedReferences,PyPep8Naming - import Queue as queue +import queue from ldclient.util import log """ A simple fixed-size thread pool that rejects jobs when its limit is reached. """ -class FixedThreadPool(object): +class FixedThreadPool: def __init__(self, size, name): self._size = size self._lock = Lock() @@ -29,7 +23,7 @@ def __init__(self, size, name): thread.name = "%s.%d" % (name, i + 1) thread.daemon = True thread.start() - + """ Schedules a job for execution if there is an available worker thread, and returns true if successful; returns false if all threads are busy. @@ -41,7 +35,7 @@ def execute(self, jobFn): self._busy_count = self._busy_count + 1 self._job_queue.put(jobFn) return True - + """ Waits until all currently busy worker threads have completed their jobs. """ @@ -52,14 +46,14 @@ def wait(self): return self._event.clear() self._event.wait() - + """ Tells all the worker threads to terminate once all active jobs have completed. """ def stop(self): for i in range(0, self._size): self._job_queue.put('stop') - + def _run_worker(self): while True: item = self._job_queue.get(block = True) diff --git a/ldclient/flag.py b/ldclient/flag.py index 422a56f0..dbf63b45 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -6,7 +6,6 @@ import hashlib import logging -import six import sys from ldclient import operators @@ -25,7 +24,7 @@ log = logging.getLogger(sys.modules[__name__].__name__) -class EvaluationDetail(object): +class EvaluationDetail: """ The return type of :func:`ldclient.client.LDClient.variation_detail()`, combining the result of a flag evaluation with information about how it was calculated. @@ -36,7 +35,7 @@ def __init__(self, value, variation_index, reason): self.__value = value self.__variation_index = variation_index self.__reason = reason - + @property def value(self): """The result of the flag evaluation. This will be either one of the flag's @@ -44,7 +43,7 @@ def value(self): :func:`ldclient.client.LDClient.variation_detail()` method. """ return self.__value - + @property def variation_index(self): """The index of the returned value within the flag's list of variations, e.g. @@ -53,14 +52,14 @@ def variation_index(self): :rtype: int or None """ return self.__variation_index - + @property def reason(self): """A dictionary describing the main factor that influenced the flag evaluation value. It contains the following properties: * ``kind``: The general category of reason, as follows: - + * ``"OFF"``: the flag was off * ``"FALLTHROUGH"`` -- the flag was on but the user did not match any targets or rules * ``"TARGET_MATCH"`` -- the user was specifically targeted for this flag @@ -81,7 +80,7 @@ def reason(self): :rtype: dict """ return self.__reason - + def is_default_value(self): """Returns True if the flag evaluated to the default value rather than one of its variations. @@ -89,13 +88,13 @@ def is_default_value(self): :rtype: bool """ return self.__variation_index is None - + def __eq__(self, other): return self.value == other.value and self.variation_index == other.variation_index and self.reason == other.reason def __ne__(self, other): return not self.__eq__(other) - + def __str__(self): return "(value=%s, variation_index=%s, reason=%s)" % (self.value, self.variation_index, self.reason) @@ -119,7 +118,7 @@ def evaluate(flag, user, store, event_factory): def _evaluate(flag, user, store, prereq_events, event_factory): if not flag.get('on', False): return _get_off_value(flag, {'kind': 'OFF'}) - + prereq_failure_reason = _check_prerequisites(flag, user, store, prereq_events, event_factory) if prereq_failure_reason is not None: return _get_off_value(flag, prereq_failure_reason) @@ -240,12 +239,7 @@ def _bucket_user(user, key, salt, bucket_by): def _bucketable_string_value(u_value): - if isinstance(u_value, six.string_types): - return u_value - if isinstance(u_value, six.integer_types): - return str(u_value) - return None - + return str(u_value) if isinstance(u_value, (str, int)) else None def _rule_matches_user(rule, user, store): for clause in rule.get('clauses') or []: diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index 2f611aa6..4ea41aaa 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -5,7 +5,7 @@ import json import time -class FeatureFlagsState(object): +class FeatureFlagsState: """ A snapshot of the state of all feature flags with regard to a specific user, generated by calling the :func:`ldclient.client.LDClient.all_flags_state()` method. Serializing this @@ -39,7 +39,7 @@ def add_flag(self, flag, value, variation, reason, details_only_if_tracked): if flag.get('debugEventsUntilDate') is not None: meta['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') self.__flag_metadata[key] = meta - + @property def valid(self): """True if this object contains a valid snapshot of feature flag state, or False if the @@ -48,7 +48,7 @@ def valid(self): :rtype: bool """ return self.__valid - + def get_flag_value(self, key): """Returns the value of an individual feature flag at the time the state was recorded. @@ -56,7 +56,7 @@ def get_flag_value(self, key): :return: the flag's value; None if the flag returned the default value, or if there was no such flag """ return self.__flag_values.get(key) - + def get_flag_reason(self, key): """Returns the evaluation reason for an individual feature flag at the time the state was recorded. @@ -67,7 +67,7 @@ def get_flag_reason(self, key): """ meta = self.__flag_metadata.get(key) return None if meta is None else meta.get('reason') - + def to_values_map(self): """Returns a dictionary of flag keys to flag values. If the flag would have evaluated to the default value, its value will be None. @@ -90,7 +90,7 @@ def to_json_dict(self): ret['$flagsState'] = self.__flag_metadata ret['$valid'] = self.__valid return ret - + def to_json_string(self): """Same as to_json_dict, but serializes the JSON structure into a string. diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index d2a62ad8..c35d3bbe 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -6,10 +6,10 @@ # Note that none of these methods fill in the "creationDate" property, because in the Python # client, that is done by DefaultEventProcessor.send_event(). -class _EventFactory(object): +class _EventFactory: def __init__(self, with_reasons): self._with_reasons = with_reasons - + def new_eval_event(self, flag, user, detail, default_value, prereq_of_flag = None): add_experiment_data = self._is_experiment(flag, detail.reason) e = { @@ -31,7 +31,7 @@ def new_eval_event(self, flag, user, detail, default_value, prereq_of_flag = Non if add_experiment_data or self._with_reasons: e['reason'] = detail.reason return e - + def new_default_event(self, flag, user, default_value, reason): e = { 'kind': 'feature', @@ -49,7 +49,7 @@ def new_default_event(self, flag, user, default_value, reason): if self._with_reasons: e['reason'] = reason return e - + def new_unknown_flag_event(self, key, user, default_value, reason): e = { 'kind': 'feature', @@ -61,7 +61,7 @@ def new_unknown_flag_event(self, key, user, default_value, reason): if self._with_reasons: e['reason'] = reason return e - + def new_identify_event(self, user): return { 'kind': 'identify', diff --git a/ldclient/impl/http.py b/ldclient/impl/http.py index bcc97e4e..7b6693a0 100644 --- a/ldclient/impl/http.py +++ b/ldclient/impl/http.py @@ -16,7 +16,7 @@ def _base_headers(config): def _http_factory(config): return HTTPFactory(_base_headers(config), config.http) -class HTTPFactory(object): +class HTTPFactory: def __init__(self, base_headers, http_config, override_read_timeout=None): self.__base_headers = base_headers self.__http_config = http_config @@ -24,19 +24,19 @@ def __init__(self, base_headers, http_config, override_read_timeout=None): connect=http_config.connect_timeout, read=http_config.read_timeout if override_read_timeout is None else override_read_timeout ) - + @property def base_headers(self): return self.__base_headers - + @property def http_config(self): return self.__http_config - + @property def timeout(self): return self.__timeout - + def create_pool_manager(self, num_pools, target_base_uri): proxy_url = self.__http_config.http_proxy or _get_proxy_url(target_base_uri) diff --git a/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py b/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py index 79842ef6..ae6bef59 100644 --- a/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py +++ b/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py @@ -12,22 +12,22 @@ from ldclient.feature_store_helpers import CachingStoreWrapper from ldclient.interfaces import DiagnosticDescription, FeatureStore, FeatureStoreCore -# +# # Internal implementation of the DynamoDB feature store. -# +# # Implementation notes: -# +# # * Feature flags, segments, and any other kind of entity the LaunchDarkly client may wish # to store, are all put in the same table. The only two required attributes are "key" (which # is present in all storeable entities) and "namespace" (a parameter from the client that is # used to disambiguate between flags and segments). -# +# # * Because of DynamoDB's restrictions on attribute values (e.g. empty strings are not # allowed), the standard DynamoDB marshaling mechanism with one attribute per object property # is not used. Instead, the entire object is serialized to JSON and stored in a single # attribute, "item". The "version" property is also stored as a separate attribute since it # is used for updates. -# +# # * Since DynamoDB doesn't have transactions, the init() method - which replaces the entire data # store - is not atomic, so there can be a race condition if another process is adding new data # via upsert(). To minimize this, we don't delete all the data at the start; instead, we update @@ -35,10 +35,10 @@ # deleting new data from another process, but that would be the case anyway if the init() # happened to execute later than the upsert(); we are relying on the fact that normally the # process that did the init() will also receive the new data shortly and do its own upsert(). -# +# # * DynamoDB has a maximum item size of 400KB. Since each feature flag or user segment is # stored as a single item, this mechanism will not work for extremely large flags or segments. -# +# class _DynamoDBFeatureStoreCore(FeatureStoreCore): PARTITION_KEY = 'namespace' @@ -73,7 +73,7 @@ def init_internal(self, all_data): for combined_key in unused_old_keys: if combined_key[0] != inited_key: requests.append({ 'DeleteRequest': { 'Key': self._make_keys(combined_key[0], combined_key[1]) } }) - + # Now set the special key that we check in initialized_internal() requests.append({ 'PutRequest': { 'Item': self._make_keys(inited_key, inited_key) } }) @@ -122,7 +122,7 @@ def initialized_internal(self): def describe_configuration(self, config): return 'DynamoDB' - + def _prefixed_namespace(self, base): return base if self._prefix is None else (self._prefix + ':' + base) @@ -131,13 +131,13 @@ def _namespace_for_kind(self, kind): def _inited_key(self): return self._prefixed_namespace('$inited') - + def _make_keys(self, namespace, key): return { self.PARTITION_KEY: { 'S': namespace }, self.SORT_KEY: { 'S': key } } - + def _make_query_for_kind(self, kind): return { 'TableName': self._table_name, @@ -171,14 +171,14 @@ def _read_existing_keys(self, kinds): key = item[self.SORT_KEY]['S'] keys.add((namespace, key)) return keys - + def _marshal_item(self, kind, item): json_str = json.dumps(item) ret = self._make_keys(self._namespace_for_kind(kind), item['key']) ret[self.VERSION_ATTRIBUTE] = { 'N': str(item['version']) } ret[self.ITEM_JSON_ATTRIBUTE] = { 'S': json_str } return ret - + def _unmarshal_item(self, item): if item is None: return None @@ -186,7 +186,7 @@ def _unmarshal_item(self, item): return None if json_attr is None else json.loads(json_attr['S']) -class _DynamoDBHelpers(object): +class _DynamoDBHelpers: @staticmethod def batch_write_requests(client, table_name, requests): batch_size = 25 diff --git a/ldclient/impl/integrations/files/file_data_source.py b/ldclient/impl/integrations/files/file_data_source.py index 9f9f3eaf..8e197a6e 100644 --- a/ldclient/impl/integrations/files/file_data_source.py +++ b/ldclient/impl/integrations/files/file_data_source.py @@ -1,6 +1,5 @@ import json import os -import six import traceback have_yaml = False @@ -30,22 +29,22 @@ def __init__(self, store, ready, paths, auto_update, poll_interval, force_pollin self._ready = ready self._inited = False self._paths = paths - if isinstance(self._paths, six.string_types): + if isinstance(self._paths, str): self._paths = [ self._paths ] self._auto_update = auto_update self._auto_updater = None self._poll_interval = poll_interval self._force_polling = force_polling - + def start(self): self._load_all() if self._auto_update: self._auto_updater = self._start_auto_updater() - + # We will signal readiness immediately regardless of whether the file load succeeded or failed - # the difference can be detected by checking initialized() - self._ready.set() + self._ready.set() def stop(self): if self._auto_updater: @@ -65,24 +64,24 @@ def _load_all(self): return self._store.init(all_data) self._inited = True - + def _load_file(self, path, all_data): content = None with open(path, 'r') as f: content = f.read() parsed = self._parse_content(content) - for key, flag in six.iteritems(parsed.get('flags', {})): + for key, flag in parsed.get('flags', {}).items(): self._add_item(all_data, FEATURES, flag) - for key, value in six.iteritems(parsed.get('flagValues', {})): + for key, value in parsed.get('flagValues', {}).items(): self._add_item(all_data, FEATURES, self._make_flag_with_value(key, value)) - for key, segment in six.iteritems(parsed.get('segments', {})): + for key, segment in parsed.get('segments', {}).items(): self._add_item(all_data, SEGMENTS, segment) - + def _parse_content(self, content): if have_yaml: return yaml.safe_load(content) # pyyaml correctly parses JSON too return json.loads(content) - + def _add_item(self, all_data, kind, item): items = all_data[kind] key = item.get('key') @@ -112,10 +111,10 @@ def _start_auto_updater(self): return _FileDataSource.WatchdogAutoUpdater(resolved_paths, self._load_all) else: return _FileDataSource.PollingAutoUpdater(resolved_paths, self._load_all, self._poll_interval) - + # Watch for changes to data files using the watchdog package. This uses native OS filesystem notifications # if available for the current platform. - class WatchdogAutoUpdater(object): + class WatchdogAutoUpdater: def __init__(self, resolved_paths, reloader): watched_files = set(resolved_paths) @@ -123,11 +122,11 @@ class LDWatchdogHandler(watchdog.events.FileSystemEventHandler): def on_any_event(self, event): if event.src_path in watched_files: reloader() - + dir_paths = set() for path in resolved_paths: dir_paths.add(os.path.dirname(path)) - + self._observer = watchdog.observers.Observer() handler = LDWatchdogHandler() for path in dir_paths: @@ -140,21 +139,21 @@ def stop(self): # Watch for changes to data files by polling their modification times. This is used if auto-update is # on but the watchdog package is not installed. - class PollingAutoUpdater(object): + class PollingAutoUpdater: def __init__(self, resolved_paths, reloader, interval): self._paths = resolved_paths self._reloader = reloader self._file_times = self._check_file_times() self._timer = RepeatingTimer(interval, self._poll) self._timer.start() - + def stop(self): self._timer.stop() - + def _poll(self): new_times = self._check_file_times() changed = False - for file_path, file_time in six.iteritems(self._file_times): + for file_path, file_time in self._file_times.items(): if new_times.get(file_path) is not None and new_times.get(file_path) != file_time: changed = True break diff --git a/ldclient/impl/retry_delay.py b/ldclient/impl/retry_delay.py index 6ede21ab..f07d8405 100644 --- a/ldclient/impl/retry_delay.py +++ b/ldclient/impl/retry_delay.py @@ -2,7 +2,7 @@ # This implementation is based on the equivalent code in the Go eventsource library. -class RetryDelayStrategy(object): +class RetryDelayStrategy: """Encapsulation of configurable backoff/jitter behavior, used for stream connections. - The system can either be in a "good" state or a "bad" state. The initial state is "bad"; the @@ -24,7 +24,7 @@ def __init__(self, base_delay, reset_interval, backoff_strategy, jitter_strategy self.__jitter = jitter_strategy self.__retry_count = 0 self.__good_since = None - + def next_retry_delay(self, current_time): """Computes the next retry interval. This also sets the current state to "bad". @@ -43,14 +43,14 @@ def next_retry_delay(self, current_time): if self.__jitter: delay = self.__jitter.apply_jitter(delay) return delay - + def set_good_since(self, good_since): """Marks the current state as "good" and records the time. :param float good_since: the time that the state became "good", in seconds """ self.__good_since = good_since - + def set_base_delay(self, base_delay): """Changes the initial retry delay and resets the backoff (if any) so the next retry will use that value. @@ -62,7 +62,7 @@ def set_base_delay(self, base_delay): self.__base_delay = base_delay self.__retry_count = 0 -class DefaultBackoffStrategy(object): +class DefaultBackoffStrategy: """The default implementation of exponential backoff, which doubles the delay each time up to the specified maximum. @@ -72,12 +72,12 @@ class DefaultBackoffStrategy(object): """ def __init__(self, max_delay): self.__max_delay = max_delay - + def apply_backoff(self, delay, retry_count): d = delay * (2 ** retry_count) return d if d <= self.__max_delay else self.__max_delay -class DefaultJitterStrategy(object): +class DefaultJitterStrategy: """The default implementation of jitter, which subtracts a pseudo-random amount from each delay. """ def __init__(self, ratio, rand_seed = None): @@ -88,6 +88,6 @@ def __init__(self, ratio, rand_seed = None): """ self.__ratio = ratio self.__random = Random(rand_seed) - + def apply_jitter(self, delay): return delay - (self.__random.random() * self.__ratio * delay) diff --git a/ldclient/integrations.py b/ldclient/integrations.py index a1e9d2f8..15816f72 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -11,10 +11,10 @@ from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore -class Consul(object): +class Consul: """Provides factory methods for integrations between the LaunchDarkly SDK and Consul. """ - + """The key prefix that is used if you do not specify one.""" DEFAULT_PREFIX = "launchdarkly" @@ -53,10 +53,10 @@ def new_feature_store(host=None, return CachingStoreWrapper(core, caching) -class DynamoDB(object): +class DynamoDB: """Provides factory methods for integrations between the LaunchDarkly SDK and DynamoDB. """ - + @staticmethod def new_feature_store(table_name, prefix=None, @@ -95,13 +95,13 @@ def new_feature_store(table_name, return CachingStoreWrapper(core, caching) -class Redis(object): +class Redis: """Provides factory methods for integrations between the LaunchDarkly SDK and Redis. """ DEFAULT_URL = 'redis://localhost:6379/0' DEFAULT_PREFIX = 'launchdarkly' DEFAULT_MAX_CONNECTIONS = 16 - + @staticmethod def new_feature_store(url='redis://localhost:6379/0', prefix='launchdarkly', @@ -134,7 +134,7 @@ def new_feature_store(url='redis://localhost:6379/0', return wrapper -class Files(object): +class Files: """Provides factory methods for integrations with filesystem data. """ @@ -162,7 +162,7 @@ def new_data_source(paths, auto_update=False, poll_interval=1, force_polling=Fal Note that in order to use YAML, you will need to install the ``pyyaml`` package. If the data source encounters any error in any file-- malformed content, a missing file, or a - duplicate key-- it will not load flags from any of the files. + duplicate key-- it will not load flags from any of the files. :param array paths: the paths of the source files for loading flag data. These may be absolute paths or relative to the current working directory. Files will be parsed as JSON unless the ``pyyaml`` diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 6b49782c..ea3c9fbd 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -7,7 +7,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty -class FeatureStore(object): +class FeatureStore: """ Interface for a versioned store for feature flags and related objects received from LaunchDarkly. Implementations should permit concurrent access and updates. @@ -15,10 +15,10 @@ class FeatureStore(object): An "object", for ``FeatureStore``, is simply a dict of arbitrary data which must have at least three properties: ``key`` (its unique key), ``version`` (the version number provided by LaunchDarkly), and ``deleted`` (True if this is a placeholder for a deleted object). - + Delete and upsert requests are versioned: if the version number in the request is less than the currently stored version of the object, the request should be ignored. - + These semantics support the primary use case for the store, which synchronizes a collection of objects based on update messages that may be received out-of-order. """ @@ -101,7 +101,7 @@ def initialized(self): """ -class FeatureStoreCore(object): +class FeatureStoreCore: """ Interface for a simplified subset of the functionality of :class:`FeatureStore`, to be used in conjunction with :class:`ldclient.feature_store_helpers.CachingStoreWrapper`. This allows @@ -181,7 +181,7 @@ def initialized_internal(self): # Internal use only. Common methods for components that perform a task in the background. -class BackgroundOperation(object): +class BackgroundOperation: # noinspection PyMethodMayBeStatic def start(self): @@ -221,7 +221,7 @@ def initialized(self): """ -class EventProcessor(object): +class EventProcessor: """ Interface for the component that buffers analytics events and sends them to LaunchDarkly. The default implementation can be replaced for testing purposes. @@ -242,7 +242,7 @@ def flush(self): until a later time. However, calling ``stop()`` will synchronously deliver any events that were not yet delivered prior to shutting down. """ - + @abstractmethod def stop(self): """ @@ -250,7 +250,7 @@ def stop(self): """ -class FeatureRequester(object): +class FeatureRequester: """ Interface for the component that acquires feature flag data in polling mode. The default implementation can be replaced for testing purposes. @@ -264,7 +264,7 @@ def get_all(self): pass -class DiagnosticDescription(object): +class DiagnosticDescription: """ Optional interface for components to describe their own configuration. """ diff --git a/ldclient/lru_cache.py b/ldclient/lru_cache.py index f8f18e37..d65c59c4 100644 --- a/ldclient/lru_cache.py +++ b/ldclient/lru_cache.py @@ -5,35 +5,13 @@ from collections import OrderedDict - -# Backport of Python 3.2 move_to_end method which doesn't exist in 2.7 -class _OrderedDictWithReordering(OrderedDict): - if not hasattr(OrderedDict, 'move_to_end'): - # backport of Python 3.2 logic - def move_to_end(self, key, last=True): - link_prev, link_next, key = link = self._OrderedDict__map[key] - link_prev[1] = link_next - link_next[0] = link_prev - root = self._OrderedDict__root - if last: - last = root[0] - link[0] = last - link[1] = root - last[1] = root[0] = link - else: - first = root[1] - link[0] = root - link[1] = first - root[1] = first[0] = link - - -class SimpleLRUCache(object): +class SimpleLRUCache: """A dictionary-based cache that removes the oldest entries when its limit is exceeded. Values are only refreshed by writing, not by reading. Not thread-safe. """ def __init__(self, capacity): self.capacity = capacity - self.cache = _OrderedDictWithReordering() + self.cache = OrderedDict() def get(self, key): return self.cache.get(key) diff --git a/ldclient/memoized_value.py b/ldclient/memoized_value.py index 7abc944f..3cf2dd22 100644 --- a/ldclient/memoized_value.py +++ b/ldclient/memoized_value.py @@ -5,7 +5,7 @@ from threading import RLock -class MemoizedValue(object): +class MemoizedValue """Simple implementation of a thread-safe memoized value whose generator function will never be run more than once, and whose value can be overridden by explicit assignment. diff --git a/ldclient/operators.py b/ldclient/operators.py index 158455ca..bf083a06 100644 --- a/ldclient/operators.py +++ b/ldclient/operators.py @@ -11,18 +11,13 @@ from collections import defaultdict from numbers import Number -import six import pyrfc3339 log = logging.getLogger(sys.modules[__name__].__name__) def _string_operator(u, c, fn): - if isinstance(u, six.string_types): - if isinstance(c, six.string_types): - return fn(u, c) - return False - + return fn(u, c) if isinstance(u, str) and isinstance(c, str) else False def _numeric_operator(u, c, fn): # bool is a subtype of int, and we don't want to try and compare it as a number. @@ -50,7 +45,7 @@ def _parse_time(input): if isinstance(input, Number): return float(input) - if isinstance(input, six.string_types): + if isinstance(input, str): try: parsed_time = pyrfc3339.parse(input) timestamp = (parsed_time - epoch).total_seconds() diff --git a/ldclient/repeating_timer.py b/ldclient/repeating_timer.py index eb8aa771..91a0f52d 100644 --- a/ldclient/repeating_timer.py +++ b/ldclient/repeating_timer.py @@ -5,7 +5,7 @@ from threading import Event, Thread -class RepeatingTimer(object): +class RepeatingTimer: def __init__(self, interval, callable): self._interval = interval self._action = callable diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index eca088f6..e1531f8c 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -8,8 +8,6 @@ import re import time -import six - import urllib3 from ldclient.config import HTTPConfig @@ -22,7 +20,7 @@ end_of_field = re.compile(r'\r\n\r\n|\r\r|\n\n') -class SSEClient(object): +class SSEClient: def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeout=300, chunk_size=10000, verify_ssl=False, http=None, http_proxy=None, http_factory=None, **kwargs): self.url = url @@ -37,7 +35,7 @@ def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeo # for backward compatibility in case anyone else is using this class self._timeout = urllib3.Timeout(connect=connect_timeout, read=read_timeout) base_headers = {} - + # Optional support for passing in an HTTP client if http: self.http = http @@ -59,7 +57,7 @@ def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeo # The SSE spec requires making requests with Cache-Control: nocache if 'headers' not in self.requests_kwargs: self.requests_kwargs['headers'] = {} - + self.requests_kwargs['headers'].update(base_headers) self.requests_kwargs['headers']['Cache-Control'] = 'no-cache' @@ -141,15 +139,8 @@ def __next__(self): return msg - # The following two lines make our iterator class compatible with both Python 2.x and 3.x, - # even though they expect different magic method names. We could accomplish the same thing - # by importing builtins.object and deriving from that, but this way it's easier to see - # what we're doing. - if six.PY2: - next = __next__ - -class Event(object): +class Event: sse_line_pattern = re.compile('(?P[^:]*):?( ?(?P.*))?') diff --git a/ldclient/user_filter.py b/ldclient/user_filter.py index fe5baa39..acca254f 100644 --- a/ldclient/user_filter.py +++ b/ldclient/user_filter.py @@ -3,9 +3,6 @@ """ # currently excluded from documentation - see docs/README.md -import six - - class UserFilter: IGNORE_ATTRS = frozenset(['key', 'custom', 'anonymous']) ALLOWED_TOP_LEVEL_ATTRS = frozenset(['key', 'secondary', 'ip', 'country', 'email', @@ -14,7 +11,7 @@ class UserFilter: def __init__(self, config): self._private_attribute_names = config.private_attribute_names self._all_attributes_private = config.all_attributes_private - + def _is_private_attr(self, name, user_private_attrs): if name in UserFilter.IGNORE_ATTRS: return False @@ -28,7 +25,7 @@ def filter_user_props(self, user_props): user_private_attrs = user_props.get('privateAttributeNames', []) def filter_private_attrs(attrs, allowed_attrs = frozenset()): - for key, value in six.iteritems(attrs): + for key, value in attrs.items(): if (not allowed_attrs) or (key in allowed_attrs): if self._is_private_attr(key, user_private_attrs): all_private_attrs.add(key) diff --git a/ldclient/util.py b/ldclient/util.py index 3880c330..a3e6cfbc 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -5,7 +5,6 @@ import logging from os import environ -import six import sys import urllib3 @@ -13,13 +12,7 @@ log = logging.getLogger(sys.modules[__name__].__name__) - -# noinspection PyBroadException -try: - import queue -except: - # noinspection PyUnresolvedReferences,PyPep8Naming - import Queue as queue +import queue __LONG_SCALE__ = float(0xFFFFFFFFFFFFFFF) @@ -27,14 +20,8 @@ __BUILTINS__ = ["key", "ip", "country", "email", "firstName", "lastName", "avatar", "name", "anonymous"] -try: - # noinspection PyUnresolvedReferences - unicode -except NameError: - __BASE_TYPES__ = (str, float, int, bool) -else: - # noinspection PyUnresolvedReferences - __BASE_TYPES__ = (str, float, int, bool, unicode) +__BASE_TYPES__ = (str, float, int, bool) + _retryable_statuses = [400, 408, 429] @@ -56,8 +43,7 @@ def check_uwsgi(): 'To learn more, see http://docs.launchdarkly.com/v1.0/docs/python-sdk-reference#configuring-uwsgi') -class Event(object): - +class Event: def __init__(self, data='', event='message', event_id=None, retry=None): self.data = data self.event = event @@ -117,7 +103,7 @@ def stringify_attrs(attrdict, attrs): newdict = None for attr in attrs: val = attrdict.get(attr) - if val is not None and not isinstance(val, six.string_types): + if val is not None and not isinstance(val, str): if newdict is None: newdict = attrdict.copy() newdict[attr] = str(val) diff --git a/requirements.txt b/requirements.txt index 76cd9de6..97dddee5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ certifi>=2018.4.16 expiringdict>=1.1.4,<1.2.0 -six>=1.10.0 pyRFC3339>=1.0 semver>=2.7.9 urllib3>=1.22.0 diff --git a/runtests.py b/runtests.py index 474f5e22..6fc85fe3 100644 --- a/runtests.py +++ b/runtests.py @@ -3022,7 +3022,7 @@ import zlib -class DictImporter(object): +class DictImporter: def __init__(self, sources): self.sources = sources diff --git a/setup.py b/setup.py index a0f4452f..e717a5b9 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ with open('./ldclient/version.py') as f: exec(f.read(), version_module_globals) ldclient_version = version_module_globals['VERSION'] - + def parse_requirements(filename): """ load requirements from a pip requirements file """ lineiter = (line.strip() for line in open(filename)) @@ -61,8 +61,6 @@ def run(self): 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', diff --git a/testing/http_util.py b/testing/http_util.py index 55842a38..0c6ee52d 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -1,10 +1,10 @@ import json -from six import iteritems, string_types -from six.moves import BaseHTTPServer, queue import socket import ssl from threading import Thread import time +import queue +from http.server import HTTPServer, BaseHTTPRequestHandler def get_available_port(): s = socket.socket(socket.AF_INET, type = socket.SOCK_STREAM) @@ -44,9 +44,9 @@ def __init__(self, port, secure): Thread.__init__(self) self.port = port self.uri = '%s://localhost:%d' % ('https' if secure else 'http', port) - self.server = BaseHTTPServer.HTTPServer(('localhost', port), MockServerRequestHandler) + self.server = HTTPServer(('localhost', port), MockServerRequestHandler) if secure: - self.server.socket = ssl.wrap_socket( + self.server.socket = ssl.wrap_socket( self.server.socket, certfile='./testing/selfsigned.pem', # this is a pre-generated self-signed cert that is valid for 100 years keyfile='./testing/selfsigned.key', @@ -55,24 +55,24 @@ def __init__(self, port, secure): self.server.server_wrapper = self self.matchers = {} self.requests = queue.Queue() - + def close(self): self.server.shutdown() self.server.server_close() - + def run(self): self.server.serve_forever(0.1) # 0.1 seconds is how often it'll check to see if it is shutting down - + def for_path(self, uri_path, content): self.matchers[uri_path] = content return self def await_request(self): return self.requests.get() - + def require_request(self): return self.requests.get(block=False) - + def should_have_requests(self, count): if self.requests.qsize() != count: rs = [] @@ -87,7 +87,7 @@ def __enter__(self): def __exit__(self, type, value, traceback): self.close() -class MockServerRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): +class MockServerRequestHandler(BaseHTTPRequestHandler): def do_CONNECT(self): self._do_request() @@ -106,7 +106,7 @@ def _do_request(self): else: self.send_error(404) -class MockServerRequest(object): +class MockServerRequest: def __init__(self, request): self.method = request.command self.path = request.path @@ -116,23 +116,23 @@ def __init__(self, request): self.body = request.rfile.read(content_length).decode('UTF-8') else: self.body = None - + def __str__(self): return "%s %s" % (self.method, self.path) -class BasicResponse(object): +class BasicResponse: def __init__(self, status, body = None, headers = None): self.status = status self.body = body self.headers = headers or {} def add_headers(self, headers): - for key, value in iteritems(headers or {}): + for key, value in (headers or {}).items(): self.headers[key] = value - + def write(self, request): request.send_response(self.status) - for key, value in iteritems(self.headers): + for key, value in self.headers.items(): request.send_header(key, value) request.end_headers() if self.body: @@ -144,22 +144,22 @@ def __init__(self, data, headers = None): h.update({ 'Content-Type': 'application/json' }) BasicResponse.__init__(self, 200, json.dumps(data or {}), h) -class ChunkedResponse(object): +class ChunkedResponse: def __init__(self, headers = None): self.queue = queue.Queue() self.headers = headers or {} - + def push(self, chunk): if chunk is not None: self.queue.put(chunk) - + def close(self): self.queue.put(None) - + def write(self, request): request.send_response(200) request.send_header('Transfer-Encoding', 'chunked') - for key, value in iteritems(self.headers): + for key, value in self.headers.items(): request.send_header(key, value) request.end_headers() request.wfile.flush() @@ -179,11 +179,11 @@ def __enter__(self): def __exit__(self, type, value, traceback): self.close() -class CauseNetworkError(object): +class CauseNetworkError: def write(self, request): raise Exception('intentional error') -class SequentialHandler(object): +class SequentialHandler: def __init__(self, *argv): self.handlers = argv self.counter = 0 diff --git a/testing/stub_util.py b/testing/stub_util.py index a5bd6b9f..5a7e99ad 100644 --- a/testing/stub_util.py +++ b/testing/stub_util.py @@ -67,7 +67,7 @@ def get_all_data(self): raise self.exception return self.all_data -class MockResponse(object): +class MockResponse: def __init__(self, status, headers): self._status = status self._headers = headers @@ -79,7 +79,7 @@ def status(self): def getheader(self, name): return self._headers.get(name.lower()) -class MockHttp(object): +class MockHttp: def __init__(self): self._recorded_requests = [] self._request_data = None @@ -146,22 +146,22 @@ class CapturingFeatureStore(FeatureStore): def init(self, all_data): self.data = all_data - def get(self, kind, key, callback=lambda x: x): + def get(self, kind, key, callback=lambda x: x): pass - + def all(self, kind, callback=lambda x: x): pass - + def delete(self, kind, key, version): pass - + def upsert(self, kind, item): pass - + @property def initialized(self): return True - + @property def received_data(self): return self.data diff --git a/testing/test_diagnostics.py b/testing/test_diagnostics.py index 8bff0055..6fd8e90d 100644 --- a/testing/test_diagnostics.py +++ b/testing/test_diagnostics.py @@ -90,7 +90,7 @@ def test_create_diagnostic_config_custom(): assert diag_config['diagnosticRecordingIntervalMillis'] == 60000 assert diag_config['dataStoreType'] == 'MyFavoriteStore' -class _TestStoreForDiagnostics(object): +class _TestStoreForDiagnostics: def describe_configuration(self, config): return 'MyFavoriteStore' diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index f1211264..a5c0a4c9 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -22,7 +22,7 @@ skip_db_tests = os.environ.get('LD_SKIP_DATABASE_TESTS') == '1' -class InMemoryTester(object): +class InMemoryTester: def init_store(self): return InMemoryFeatureStore() @@ -31,13 +31,13 @@ def supports_prefix(self): return False -class RedisTester(object): +class RedisTester: redis_host = 'localhost' redis_port = 6379 def __init__(self, cache_config): self._cache_config = cache_config - + def init_store(self, prefix=None): self._clear_data() return Redis.new_feature_store(caching=self._cache_config, prefix=prefix) @@ -61,7 +61,7 @@ def supports_prefix(self): return True -class ConsulTester(object): +class ConsulTester: def __init__(self, cache_config): self._cache_config = cache_config @@ -80,7 +80,7 @@ def _clear_data(self, prefix): client.kv.delete(key) -class DynamoDBTester(object): +class DynamoDBTester: table_name = 'LD_DYNAMODB_TEST_TABLE' table_created = False options = { @@ -92,7 +92,7 @@ class DynamoDBTester(object): def __init__(self, cache_config): self._cache_config = cache_config - + def init_store(self, prefix=None): self._create_table() self._clear_data() @@ -148,7 +148,7 @@ def _create_table(self): return except client.exceptions.ResourceNotFoundException: time.sleep(0.5) - + def _clear_data(self): client = boto3.client('dynamodb', **self.options) delete_requests = [] @@ -226,7 +226,7 @@ def base_initialized_store(self, store): def test_not_initialized_before_init(self, store): assert store.initialized is False - + def test_initialized(self, store): store = self.base_initialized_store(store) assert store.initialized is True diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 7b13cf9b..191309d7 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -1,7 +1,6 @@ import json import os import pytest -import six import tempfile import threading import time @@ -106,7 +105,7 @@ def make_data_source(**kwargs): def make_temp_file(content): f, path = tempfile.mkstemp() - os.write(f, six.b(content)) + os.write(f, content.encode("latin-1")) os.close(f) return path diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index a6789e4d..e16af123 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -11,11 +11,7 @@ import pytest from testing.stub_util import CapturingFeatureStore, MockEventProcessor, MockUpdateProcessor from testing.sync_util import wait_until - -try: - import queue -except: - import Queue as queue +import queue unreachable_uri="http://fake" From 3095315fd53c3bf723b0f16b0c18acadef4dfb3e Mon Sep 17 00:00:00 2001 From: Gabor Angeli Date: Mon, 14 Sep 2020 22:44:24 -0700 Subject: [PATCH 199/356] Allow authenticating with proxy This commit allows for authenticating with a proxy configured with the `http_proxy` environment variable. Authentication requires passing a header, and is not parsed by urllib3 from the proxy_url. --- ldclient/impl/http.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ldclient/impl/http.py b/ldclient/impl/http.py index bcc97e4e..b46fec44 100644 --- a/ldclient/impl/http.py +++ b/ldclient/impl/http.py @@ -54,11 +54,18 @@ def create_pool_manager(self, num_pools, target_base_uri): ca_certs=ca_certs ) else: + # Get proxy authentication, if provided + url = urllib3.util.parse_url(proxy_url) + proxy_headers = None + if url.auth != None: + proxy_headers = urllib3.util.make_headers(proxy_basic_auth=url.auth) + # Create a proxied connection return urllib3.ProxyManager( proxy_url, num_pools=num_pools, cert_reqs=cert_reqs, - ca_certs = ca_certs + ca_certs = ca_certs, + proxy_headers=proxy_headers ) def _get_proxy_url(target_base_uri): From 1de076943b23db2ea3249eaf09d67f801d7282ca Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 21 Sep 2020 15:50:52 -0700 Subject: [PATCH 200/356] reimplement proxy tests for DRY and add test of proxy auth params --- testing/http_util.py | 6 ++- testing/proxy_test_util.py | 55 +++++++++++++++++++++++++ testing/test_event_processor.py | 54 ++++-------------------- testing/test_feature_requester.py | 68 ++++++++----------------------- testing/test_streaming.py | 67 +++++++++--------------------- 5 files changed, 103 insertions(+), 147 deletions(-) create mode 100644 testing/proxy_test_util.py diff --git a/testing/http_util.py b/testing/http_util.py index 55842a38..bdaeca04 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -72,7 +72,11 @@ def await_request(self): def require_request(self): return self.requests.get(block=False) - + + def wait_until_request_received(self): + req = self.requests.get() + self.requests.put(req) + def should_have_requests(self, count): if self.requests.qsize() != count: rs = [] diff --git a/testing/proxy_test_util.py b/testing/proxy_test_util.py new file mode 100644 index 00000000..b9483f7b --- /dev/null +++ b/testing/proxy_test_util.py @@ -0,0 +1,55 @@ +from ldclient.config import Config, HTTPConfig +from testing.http_util import start_server, BasicResponse, JsonResponse + +# Runs tests of all of our supported proxy server configurations: secure or insecure, configured +# by Config.http_proxy or by an environment variable, with or without authentication. The action +# parameter is a function that takes three parameters: server, config, secure; the expectation is +# that it causes an HTTP/HTTPS request to be made via the configured proxy. The caller must pass +# in the monkeypatch fixture from pytest. +def do_proxy_tests(action, action_method, monkeypatch): + # We'll test each permutation of use_env_vars, secure, and use_auth, except that if secure is + # true then we'll only test with use_auth=false because we don't have a way to test proxy + # authorization over HTTPS (even though we believe it works). + for (use_env_vars, secure, use_auth) in [ + (False, False, False), + (False, False, True), + (False, True, False), + (True, False, False), + (True, False, True), + (True, True, False)]: + test_desc = "%s, %s, %s" % ( + "using env vars" if use_env_vars else "using Config", + "secure" if secure else "insecure", + "with auth" if use_auth else "no auth") + with start_server() as server: + proxy_uri = server.uri.replace('http://', 'http://user:pass@') if use_auth else server.uri + target_uri = 'https://not-real' if secure else 'http://not-real' + if use_env_vars: + monkeypatch.setenv('https_proxy' if secure else 'http_proxy', proxy_uri) + config = Config( + sdk_key = 'sdk_key', + base_uri = target_uri, + events_uri = target_uri, + stream_uri = target_uri, + http = None if use_env_vars else HTTPConfig(http_proxy=proxy_uri), + diagnostic_opt_out = True) + try: + action(server, config, secure) + except: + print("test action failed (%s)" % test_desc) + raise + # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the + # HTTP client, so we should be able to see the request go through. Note that the URI path will + # actually be an absolute URI for a proxy request. + try: + req = server.require_request() + except: + print("server did not receive a request (%s)" % test_desc) + raise + expected_method = 'CONNECT' if secure else action_method + assert req.method == expected_method, "method should be %s, was %s (%s)" % (expected_method, req.method, test_desc) + if use_auth: + expected_auth = 'Basic dXNlcjpwYXNz' + actual_auth = req.headers.get('Proxy-Authorization') + assert actual_auth == expected_auth, "auth header should be %s, was %s (%s)" % (expected_auth, actual_auth, test_desc) + print("do_proxy_tests succeeded for: %s" % test_desc) diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index b5f68d4b..76208784 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -9,6 +9,7 @@ from ldclient.event_processor import DefaultEventProcessor from ldclient.util import log from testing.http_util import start_server, BasicResponse +from testing.proxy_test_util import do_proxy_tests from testing.stub_util import MockResponse, MockHttp @@ -558,52 +559,13 @@ def start_consuming_events(): assert message1.param == event1 assert had_no_more -def test_can_use_http_proxy_via_environment_var(monkeypatch): - with start_server() as server: - monkeypatch.setenv('http_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real', diagnostic_opt_out = True) - _verify_http_proxy_is_used(server, config) - -def test_can_use_https_proxy_via_environment_var(monkeypatch): - with start_server() as server: - monkeypatch.setenv('https_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real', diagnostic_opt_out = True) - _verify_https_proxy_is_used(server, config) - -def test_can_use_http_proxy_via_config(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', events_uri = 'http://not-real', http_proxy=server.uri, diagnostic_opt_out = True) - _verify_http_proxy_is_used(server, config) - -def test_can_use_https_proxy_via_config(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', events_uri = 'https://not-real', http_proxy=server.uri, diagnostic_opt_out = True) - _verify_https_proxy_is_used(server, config) - -def _verify_http_proxy_is_used(server, config): - server.for_path(config.events_uri + '/bulk', BasicResponse(200)) - with DefaultEventProcessor(config) as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) - ep.flush() - ep._wait_until_inactive() - - # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the - # HTTP client, so we should be able to see the request go through. Note that the URI path will - # actually be an absolute URI for a proxy request. - req = server.require_request() - assert req.method == 'POST' - -def _verify_https_proxy_is_used(server, config): - server.for_path(config.events_uri + '/bulk', BasicResponse(200)) - with DefaultEventProcessor(config) as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) - ep.flush() - ep._wait_until_inactive() - - # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but - # it can still record that it *got* the request, which proves that the request went to the proxy. - req = server.require_request() - assert req.method == 'CONNECT' +def test_http_proxy(monkeypatch): + def _event_processor_proxy_test(server, config, secure): + with DefaultEventProcessor(config) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + do_proxy_tests(_event_processor_proxy_test, 'POST', monkeypatch) def verify_unrecoverable_http_error(status): with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py index 10f8d11e..db18f555 100644 --- a/testing/test_feature_requester.py +++ b/testing/test_feature_requester.py @@ -6,7 +6,7 @@ from ldclient.version import VERSION from ldclient.versioned_data_kind import FEATURES, SEGMENTS from testing.http_util import start_server, BasicResponse, JsonResponse - +from testing.proxy_test_util import do_proxy_tests def test_get_all_data_returns_data(): with start_server() as server: @@ -102,54 +102,18 @@ def test_get_all_data_can_use_cached_data(): req = server.require_request() assert req.headers['If-None-Match'] == etag2 -def test_can_use_http_proxy_via_environment_var(monkeypatch): - with start_server() as server: - monkeypatch.setenv('http_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', base_uri = 'http://not-real') - _verify_http_proxy_is_used(server, config) - -def test_can_use_https_proxy_via_environment_var(monkeypatch): - with start_server() as server: - monkeypatch.setenv('https_proxy', server.uri) - config = Config(sdk_key = 'sdk-key', base_uri = 'https://not-real') - _verify_https_proxy_is_used(server, config) - -def test_can_use_http_proxy_via_config(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', base_uri = 'http://not-real', http_proxy = server.uri) - _verify_http_proxy_is_used(server, config) - -def test_can_use_https_proxy_via_config(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', base_uri = 'https://not-real', http_proxy = server.uri) - _verify_https_proxy_is_used(server, config) - -def _verify_http_proxy_is_used(server, config): - fr = FeatureRequesterImpl(config) - - resp_data = { 'flags': {}, 'segments': {} } - expected_data = { FEATURES: {}, SEGMENTS: {} } - server.for_path(config.base_uri + '/sdk/latest-all', JsonResponse(resp_data)) - - # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the - # HTTP client, so we should be able to see the request go through. Note that the URI path will - # actually be an absolute URI for a proxy request. - result = fr.get_all_data() - assert result == expected_data - req = server.require_request() - assert req.method == 'GET' - -def _verify_https_proxy_is_used(server, config): - fr = FeatureRequesterImpl(config) - - resp_data = { 'flags': {}, 'segments': {} } - server.for_path(config.base_uri + '/sdk/latest-all', JsonResponse(resp_data)) - - # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but - # it can still record that it *got* the request, which proves that the request went to the proxy. - try: - fr.get_all_data() - except: - pass - req = server.require_request() - assert req.method == 'CONNECT' +def test_http_proxy(monkeypatch): + def _feature_requester_proxy_test(server, config, secure): + resp_data = { 'flags': {}, 'segments': {} } + expected_data = { FEATURES: {}, SEGMENTS: {} } + server.for_path(config.base_uri + '/sdk/latest-all', JsonResponse(resp_data)) + fr = FeatureRequesterImpl(config) + if secure: + try: + fr.get_all_data() + except: + pass # we expect this to fail because we don't have a real HTTPS proxy server + else: + result = fr.get_all_data() + assert result == expected_data + do_proxy_tests(_feature_requester_proxy_test, 'GET', monkeypatch) diff --git a/testing/test_streaming.py b/testing/test_streaming.py index dadac824..1838e500 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -10,6 +10,7 @@ from ldclient.version import VERSION from ldclient.versioned_data_kind import FEATURES, SEGMENTS from testing.http_util import start_server, BasicResponse, CauseNetworkError, SequentialHandler +from testing.proxy_test_util import do_proxy_tests from testing.stub_util import make_delete_event, make_patch_event, make_put_event, stream_content brief_delay = 0.001 @@ -210,54 +211,24 @@ def test_unrecoverable_http_error(status): assert not sp.initialized() server.should_have_requests(1) -def test_can_use_http_proxy_via_environment_var(monkeypatch): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = 'http://not-real') - monkeypatch.setenv('http_proxy', server.uri) - _verify_http_proxy_is_used(server, config) - -def test_can_use_https_proxy_via_environment_var(monkeypatch): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = 'https://not-real') - monkeypatch.setenv('https_proxy', server.uri) - _verify_https_proxy_is_used(server, config) - -def test_can_use_http_proxy_via_config(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = 'http://not-real', http_proxy=server.uri) - _verify_http_proxy_is_used(server, config) - -def test_can_use_https_proxy_via_config(): - with start_server() as server: - config = Config(sdk_key = 'sdk-key', stream_uri = 'https://not-real', http_proxy=server.uri) - _verify_https_proxy_is_used(server, config) - -def _verify_http_proxy_is_used(server, config): - store = InMemoryFeatureStore() - ready = Event() - with stream_content(make_put_event()) as stream: - server.for_path(config.stream_base_uri + '/all', stream) - with StreamingUpdateProcessor(config, store, ready, None) as sp: - sp.start() - # For an insecure proxy request, our stub server behaves enough like the real thing to satisfy the - # HTTP client, so we should be able to see the request go through. Note that the URI path will - # actually be an absolute URI for a proxy request. - req = server.await_request() - assert req.method == 'GET' - ready.wait(start_wait) - assert sp.initialized() - -def _verify_https_proxy_is_used(server, config): - store = InMemoryFeatureStore() - ready = Event() - with stream_content(make_put_event()) as stream: - server.for_path(config.stream_base_uri + '/all', stream) - with StreamingUpdateProcessor(config, store, ready, None) as sp: - sp.start() - # Our simple stub server implementation can't really do HTTPS proxying, so the request will fail, but - # it can still record that it *got* the request, which proves that the request went to the proxy. - req = server.await_request() - assert req.method == 'CONNECT' +def test_http_proxy(monkeypatch): + def _stream_processor_proxy_test(server, config, secure): + store = InMemoryFeatureStore() + ready = Event() + with stream_content(make_put_event()) as stream: + server.for_path(config.stream_base_uri + '/all', stream) + with StreamingUpdateProcessor(config, store, ready, None) as sp: + sp.start() + # Wait till the server has received a request. We need to do this even though do_proxy_tests also + # does it, because if we return too soon out of this block, the object returned by stream_content + # could be closed and the test server would no longer work. + server.wait_until_request_received() + if not secure: + # We only do this part with HTTP, because with HTTPS we don't have a real enough proxy server + # for the stream connection to work correctly - we can only detect the request. + ready.wait(start_wait) + assert sp.initialized() + do_proxy_tests(_stream_processor_proxy_test, 'GET', monkeypatch) def test_records_diagnostic_on_stream_init_success(): store = InMemoryFeatureStore() From ed01be037629990c3549fe4498306c7c960e2914 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 15 Sep 2020 19:54:59 -0700 Subject: [PATCH 201/356] doc comment on auth params in proxy URL --- ldclient/config.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ldclient/config.py b/ldclient/config.py index 675d5f28..9ece6154 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -37,6 +37,7 @@ def __init__(self, variable, this is used regardless of whether the target URI is HTTP or HTTPS (the actual LaunchDarkly service uses HTTPS, but a Relay Proxy instance could use HTTP). Setting this Config parameter will override any proxy specified by an environment variable, but only for LaunchDarkly SDK connections. + The URL may contain authentication parameters in the form http://username:password@host:port. :param string ca_certs: If using a custom certificate authority, set this to the file path of the certificate bundle. :param string cert_file: If using a custom client certificate, set this to the file path of the From c35fa6184ce1a274fd5c6d226cb3f1f7a795901a Mon Sep 17 00:00:00 2001 From: Elliot Date: Mon, 28 Sep 2020 11:38:07 -0700 Subject: [PATCH 202/356] add type hints to some of the public facing api. update some docs --- .circleci/config.yml | 13 +-- ldclient/__init__.py | 6 +- ldclient/client.py | 22 ++--- ldclient/config.py | 94 ++++++++++--------- ldclient/feature_store.py | 11 +-- ldclient/feature_store_helpers.py | 10 +- ldclient/flag.py | 10 +- ldclient/flags_state.py | 18 ++-- .../integrations/redis/redis_feature_store.py | 2 +- ldclient/integrations.py | 29 +++--- ldclient/interfaces.py | 24 ++--- test-requirements.txt | 1 - 12 files changed, 120 insertions(+), 120 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d6894b79..17886070 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,16 +6,6 @@ orbs: workflows: test: jobs: - - test-linux: - name: Python 3.3 - docker-image: circleci/python:3.3-jessie - consul-supported: false # Consul isn't supported in 3.3 - filesource-supported: false # FileDataSource isn't supported in 3.3 - test-packaging: false # packaging test requires virtualenv, which isn't supported in 3.3 - - test-linux: - name: Python 3.4 - docker-image: circleci/python:3.4-jessie - consul-supported: false # Consul isn't supported in 3.4 - test-linux: name: Python 3.5 docker-image: circleci/python:3.5-jessie @@ -28,6 +18,9 @@ workflows: - test-linux: name: Python 3.8 docker-image: circleci/python:3.8-buster + - test-linux: + name: Python 3.9 prerelease + docker-image: circleci/python:3.9.0rc2 - test-windows: name: Windows Py3.3 py3: true diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 773d6374..24dcfc67 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -22,7 +22,7 @@ __lock = ReadWriteLock() -def set_config(config): +def set_config(config: Config): """Sets the configuration for the shared SDK client instance. If this is called prior to :func:`ldclient.get()`, it stores the configuration that will be used when the @@ -48,7 +48,7 @@ def set_config(config): __lock.unlock() -def set_sdk_key(sdk_key): +def set_sdk_key(sdk_key: str): """Sets the SDK key for the shared SDK client instance. If this is called prior to :func:`ldclient.get()`, it stores the SDK key that will be used when the client is @@ -87,7 +87,7 @@ def set_sdk_key(sdk_key): __lock.unlock() -def get(): +def get() -> LDClient: """Returns the shared SDK client instance, using the current global configuration. To use the SDK as a singleton, first make sure you have called :func:`ldclient.set_sdk_key()` or diff --git a/ldclient/client.py b/ldclient/client.py index 34340c22..6e23fbdc 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -65,7 +65,7 @@ class LDClient: Client instances are thread-safe. """ - def __init__(self, sdk_key=None, config=None, start_wait=5): + def __init__(self, sdk_key: str=None, config: Config=None, start_wait: float=5): """Constructs a new LDClient instance. :param string sdk_key: the SDK key for your LaunchDarkly environment @@ -115,7 +115,7 @@ def __init__(self, sdk_key=None, config=None, start_wait=5): log.warning("Initialization timeout exceeded for LaunchDarkly Client or an error occurred. " "Feature Flags may not yet be available.") - def _set_event_processor(self, config): + def _set_event_processor(self, config: Config): if config.offline or not config.send_events: self._event_processor = NullEventProcessor() return None @@ -175,7 +175,7 @@ def __exit__(self, type, value, traceback): def _send_event(self, event): self._event_processor.send_event(event) - def track(self, event_name, user, data=None, metric_value=None): + def track(self, event_name: str, user: dict, data=None, metric_value=None): """Tracks that a user performed an event. LaunchDarkly automatically tracks pageviews and clicks that are specified in the Goals @@ -194,7 +194,7 @@ def track(self, event_name, user, data=None, metric_value=None): else: self._send_event(self._event_factory_default.new_custom_event(event_name, user, data, metric_value)) - def identify(self, user): + def identify(self, user: dict): """Registers the user. This simply creates an analytics event that will transmit the given user properties to @@ -208,14 +208,14 @@ def identify(self, user): else: self._send_event(self._event_factory_default.new_identify_event(user)) - def is_offline(self): + def is_offline(self) -> bool: """Returns true if the client is in offline mode. :rtype: bool """ return self._config.offline - def is_initialized(self): + def is_initialized(self) -> bool: """Returns true if the client has successfully connected to LaunchDarkly. If this returns false, it means that the client has not yet successfully connected to LaunchDarkly. @@ -247,7 +247,7 @@ def toggle(self, key, user, default): log.warning("Deprecated method: toggle() called. Use variation() instead.") return self.variation(key, user, default) - def variation(self, key, user, default): + def variation(self, key: str, user: dict, default: object) -> object: """Determines the variation of a feature flag for a user. :param string key: the unique key for the feature flag @@ -258,7 +258,7 @@ def variation(self, key, user, default): """ return self._evaluate_internal(key, user, default, self._event_factory_default).value - def variation_detail(self, key, user, default): + def variation_detail(self, key: str, user: dict, default: object) -> EvaluationDetail: """Determines the variation of a feature flag for a user, like :func:`variation()`, but also provides additional information about how this value was calculated, in the form of an :class:`ldclient.flag.EvaluationDetail` object. @@ -328,7 +328,7 @@ def _evaluate_internal(self, key, user, default, event_factory): self._send_event(event_factory.new_default_event(flag, user, default, reason)) return EvaluationDetail(default, None, reason) - def all_flags(self, user): + def all_flags(self, user: dict) -> dict: """Returns all feature flag values for the given user. This method is deprecated - please use :func:`all_flags_state()` instead. Current versions of the @@ -344,7 +344,7 @@ def all_flags(self, user): return None return state.to_values_map() - def all_flags_state(self, user, **kwargs): + def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: """Returns an object that encapsulates the state of all feature flags for a given user, including the flag values and also metadata that can be used on the front end. See the JavaScript SDK Reference Guide on @@ -412,7 +412,7 @@ def all_flags_state(self, user, **kwargs): return state - def secure_mode_hash(self, user): + def secure_mode_hash(self, user: dict) -> str: """Computes an HMAC signature of a user signed with the client's SDK key, for use with the JavaScript SDK. diff --git a/ldclient/config.py b/ldclient/config.py index a14505f7..3e4acab3 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -3,10 +3,14 @@ Note that the same class can also be imported from the ``ldclient.client`` submodule. """ +from __future__ import annotations + from ldclient.feature_store import InMemoryFeatureStore from ldclient.util import log +from typing import Callable + GET_LATEST_FEATURES_PATH = '/sdk/latest-flags' STREAM_FLAGS_PATH = '/flags' @@ -23,12 +27,12 @@ class HTTPConfig: corresponding `Config` properties will be ignored. """ def __init__(self, - connect_timeout=10, - read_timeout=15, - http_proxy=None, - ca_certs=None, - cert_file=None, - disable_ssl_verification=False): + connect_timeout: float=10, + read_timeout: float=15, + http_proxy: str=None, + ca_certs: str=None, + cert_file: str=None, + disable_ssl_verification: bool=False): """ :param float connect_timeout: The connect timeout for network connections in seconds. :param float read_timeout: The read timeout for network connections in seconds. @@ -54,27 +58,27 @@ def __init__(self, self.__disable_ssl_verification = disable_ssl_verification @property - def connect_timeout(self): + def connect_timeout(self) -> float: return self.__connect_timeout @property - def read_timeout(self): + def read_timeout(self) -> float: return self.__read_timeout @property - def http_proxy(self): + def http_proxy(self) -> str: return self.__http_proxy @property - def ca_certs(self): + def ca_certs(self) -> str: return self.__ca_certs @property - def cert_file(self): + def cert_file(self) -> str: return self.__cert_file @property - def disable_ssl_verification(self): + def disable_ssl_verification(self) -> bool: return self.__disable_ssl_verification class Config: @@ -84,38 +88,38 @@ class Config: if you are using the singleton client, or the :class:`ldclient.client.LDClient` constructor otherwise. """ def __init__(self, - sdk_key=None, - base_uri='https://app.launchdarkly.com', - events_uri='https://events.launchdarkly.com', - connect_timeout=10, - read_timeout=15, - events_max_pending=10000, - flush_interval=5, - stream_uri='https://stream.launchdarkly.com', - stream=True, - initial_reconnect_delay=1, - verify_ssl=True, - defaults=None, - send_events=None, - events_enabled=True, - update_processor_class=None, - poll_interval=30, - use_ldd=False, - feature_store=None, - feature_requester_class=None, - event_processor_class=None, + sdk_key: str=None, + base_uri: str='https://app.launchdarkly.com', + events_uri: str='https://events.launchdarkly.com', + connect_timeout: float=10, + read_timeout: float=15, + events_max_pending: int=10000, + flush_interval: float=5, + stream_uri: str='https://stream.launchdarkly.com', + stream: bool=True, + initial_reconnect_delay: float=1, + verify_ssl: bool=True, + defaults: dict=None, + send_events: bool=None, + events_enabled: bool=True, + update_processor_class: Callable[[str, Config, FeatureStore], UpdateProcessor]=None, + poll_interval: float=30, + use_ldd: bool=False, + feature_store: FeatureStore=None, + feature_requester_class: Callable[[str, Config, FeatureStore], FeatureRequester]=None, + event_processor_class: Callable[[Config], EventProcessor]=None, private_attribute_names=(), - all_attributes_private=False, - offline=False, - user_keys_capacity=1000, - user_keys_flush_interval=300, - inline_users_in_events=False, - http_proxy=None, - diagnostic_opt_out=False, - diagnostic_recording_interval=900, - wrapper_name=None, - wrapper_version=None, - http=None): + all_attributes_private: bool=False, + offline: bool=False, + user_keys_capacity: int=1000, + user_keys_flush_interval: float=300, + inline_users_in_events: bool=False, + http_proxy=None, # deprecated: will not type hint + diagnostic_opt_out: bool=False, + diagnostic_recording_interval: int=900, + wrapper_name: str=None, + wrapper_version: str=None, + http: HTTPConfig=None): """ :param string sdk_key: The SDK key for your LaunchDarkly account. :param string base_uri: The base URL for the LaunchDarkly server. Most users should use the default @@ -233,14 +237,14 @@ def __init__(self, self.__http = http @classmethod - def default(cls): + def default(cls) -> Config: """Returns a ``Config`` instance with default values for all properties. :rtype: ldclient.config.Config """ return cls() - def copy_with_new_sdk_key(self, new_sdk_key): + def copy_with_new_sdk_key(self, new_sdk_key: str) -> Config: """Returns a new ``Config`` instance that is the same as this one, except for having a different SDK key. :param string new_sdk_key: the new SDK key diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index df443510..09d331a4 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -11,7 +11,6 @@ from ldclient.interfaces import DiagnosticDescription, FeatureStore from ldclient.rwlock import ReadWriteLock - class CacheConfig: """Encapsulates caching parameters for feature store implementations that support local caching. """ @@ -20,8 +19,8 @@ class CacheConfig: DEFAULT_CAPACITY = 1000 def __init__(self, - expiration = DEFAULT_EXPIRATION, - capacity = DEFAULT_CAPACITY): + expiration: float = DEFAULT_EXPIRATION, + capacity: int = DEFAULT_CAPACITY): """Constructs an instance of CacheConfig. :param float expiration: the cache TTL, in seconds. Items will be evicted from the cache after @@ -50,7 +49,7 @@ def disabled(): return CacheConfig(expiration = 0) @property - def enabled(self): + def enabled(self) -> bool: """Returns True if caching is enabled in this configuration. :rtype: bool @@ -58,7 +57,7 @@ def enabled(self): return self._expiration > 0 @property - def expiration(self): + def expiration(self) -> float: """Returns the configured cache TTL, in seconds. :rtype: float @@ -66,7 +65,7 @@ def expiration(self): return self._expiration @property - def capacity(self): + def capacity(self) -> int: """Returns the configured maximum number of cacheable items. :rtype: int diff --git a/ldclient/feature_store_helpers.py b/ldclient/feature_store_helpers.py index 0f371f7b..13d8b3cb 100644 --- a/ldclient/feature_store_helpers.py +++ b/ldclient/feature_store_helpers.py @@ -4,8 +4,8 @@ from expiringdict import ExpiringDict -from ldclient.interfaces import DiagnosticDescription, FeatureStore - +from ldclient.interfaces import DiagnosticDescription, FeatureStore, FeatureStoreCore +from ldclient.feature_store import CacheConfig class CachingStoreWrapper(DiagnosticDescription, FeatureStore): """A partial implementation of :class:`ldclient.interfaces.FeatureStore`. @@ -17,7 +17,7 @@ class CachingStoreWrapper(DiagnosticDescription, FeatureStore): """ __INITED_CACHE_KEY__ = "$inited" - def __init__(self, core, cache_config): + def __init__(self, core: FeatureStoreCore, cache_config: CacheConfig): """Constructs an instance by wrapping a core implementation object. :param FeatureStoreCore core: the implementation object @@ -84,7 +84,7 @@ def upsert(self, kind, item): self._cache.pop(self._all_cache_key(kind), None) @property - def initialized(self): + def initialized(self) -> bool: """ """ if self._inited: @@ -100,7 +100,7 @@ def initialized(self): self._inited = True return result - def describe_configuration(self, config): + def describe_configuration(self, config) -> str: if callable(getattr(self._core, 'describe_configuration', None)): return self._core.describe_configuration(config) return "custom" diff --git a/ldclient/flag.py b/ldclient/flag.py index dbf63b45..8ba12c60 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -81,7 +81,7 @@ def reason(self): """ return self.__reason - def is_default_value(self): + def is_default_value(self) -> bool: """Returns True if the flag evaluated to the default value rather than one of its variations. @@ -89,16 +89,16 @@ def is_default_value(self): """ return self.__variation_index is None - def __eq__(self, other): + def __eq__(self, other) -> bool: return self.value == other.value and self.variation_index == other.variation_index and self.reason == other.reason - def __ne__(self, other): + def __ne__(self, other) -> bool: return not self.__eq__(other) - def __str__(self): + def __str__(self) -> str: return "(value=%s, variation_index=%s, reason=%s)" % (self.value, self.variation_index, self.reason) - def __repr__(self): + def __repr__(self) -> str: return self.__str__() diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index 4ea41aaa..198567ce 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -5,6 +5,8 @@ import json import time +from typing import Optional + class FeatureFlagsState: """ A snapshot of the state of all feature flags with regard to a specific user, generated by @@ -13,7 +15,7 @@ class FeatureFlagsState: appropriate data structure for bootstrapping the LaunchDarkly JavaScript client. See the JavaScript SDK Reference Guide on `Bootstrapping `_. """ - def __init__(self, valid): + def __init__(self, valid: bool): self.__flag_values = {} self.__flag_metadata = {} self.__valid = valid @@ -41,7 +43,7 @@ def add_flag(self, flag, value, variation, reason, details_only_if_tracked): self.__flag_metadata[key] = meta @property - def valid(self): + def valid(self) -> bool: """True if this object contains a valid snapshot of feature flag state, or False if the state could not be computed (for instance, because the client was offline or there was no user). @@ -49,7 +51,7 @@ def valid(self): """ return self.__valid - def get_flag_value(self, key): + def get_flag_value(self, key: str) -> object: """Returns the value of an individual feature flag at the time the state was recorded. :param string key: the feature flag key @@ -57,7 +59,7 @@ def get_flag_value(self, key): """ return self.__flag_values.get(key) - def get_flag_reason(self, key): + def get_flag_reason(self, key: str) -> Optional[dict]: """Returns the evaluation reason for an individual feature flag at the time the state was recorded. :param string key: the feature flag key @@ -68,7 +70,7 @@ def get_flag_reason(self, key): meta = self.__flag_metadata.get(key) return None if meta is None else meta.get('reason') - def to_values_map(self): + def to_values_map(self) -> dict: """Returns a dictionary of flag keys to flag values. If the flag would have evaluated to the default value, its value will be None. @@ -79,7 +81,7 @@ def to_values_map(self): """ return self.__flag_values - def to_json_dict(self): + def to_json_dict(self) -> dict: """Returns a dictionary suitable for passing as JSON, in the format used by the LaunchDarkly JavaScript SDK. Use this method if you are passing data to the front end in order to "bootstrap" the JavaScript client. @@ -91,14 +93,14 @@ def to_json_dict(self): ret['$valid'] = self.__valid return ret - def to_json_string(self): + def to_json_string(self) -> str: """Same as to_json_dict, but serializes the JSON structure into a string. :rtype: string """ return json.dumps(self.to_json_dict()) - def __getstate__(self): + def __getstate__(self) -> dict: """Equivalent to to_json_dict() - used if you are serializing the object with jsonpickle. """ return self.to_json_dict() diff --git a/ldclient/impl/integrations/redis/redis_feature_store.py b/ldclient/impl/integrations/redis/redis_feature_store.py index eebe205d..0c676580 100644 --- a/ldclient/impl/integrations/redis/redis_feature_store.py +++ b/ldclient/impl/integrations/redis/redis_feature_store.py @@ -13,7 +13,7 @@ class _RedisFeatureStoreCore(DiagnosticDescription, FeatureStoreCore): - def __init__(self, url, prefix, max_connections): + def __init__(self, url: str, prefix: str, max_connections: int): if not have_redis: raise NotImplementedError("Cannot use Redis feature store because redis package is not installed") self._prefix = prefix or 'launchdarkly' diff --git a/ldclient/integrations.py b/ldclient/integrations.py index 15816f72..f55e0bed 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -10,6 +10,7 @@ from ldclient.impl.integrations.files.file_data_source import _FileDataSource from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore +from typing import List class Consul: """Provides factory methods for integrations between the LaunchDarkly SDK and Consul. @@ -19,11 +20,11 @@ class Consul: DEFAULT_PREFIX = "launchdarkly" @staticmethod - def new_feature_store(host=None, - port=None, - prefix=None, - consul_opts=None, - caching=CacheConfig.default()): + def new_feature_store(host: str=None, + port: int=None, + prefix: str=None, + consul_opts: dict=None, + caching: CacheConfig=CacheConfig.default()): """Creates a Consul-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -58,10 +59,10 @@ class DynamoDB: """ @staticmethod - def new_feature_store(table_name, - prefix=None, - dynamodb_opts={}, - caching=CacheConfig.default()): + def new_feature_store(table_name: str, + prefix: str=None, + dynamodb_opts: dict={}, + caching: CacheConfig=CacheConfig.default()): """Creates a DynamoDB-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -103,10 +104,10 @@ class Redis: DEFAULT_MAX_CONNECTIONS = 16 @staticmethod - def new_feature_store(url='redis://localhost:6379/0', - prefix='launchdarkly', - max_connections=16, - caching=CacheConfig.default()): + def new_feature_store(url: str='redis://localhost:6379/0', + prefix: str='launchdarkly', + max_connections: int=16, + caching: CacheConfig=CacheConfig.default()): """Creates a Redis-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -139,7 +140,7 @@ class Files: """ @staticmethod - def new_data_source(paths, auto_update=False, poll_interval=1, force_polling=False): + def new_data_source(paths: List[str], auto_update: bool=False, poll_interval: float=1, force_polling: bool=False): """Provides a way to use local files as a source of feature flag state. This would typically be used in a test environment, to operate using a predetermined feature flag state without an actual LaunchDarkly connection. diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index ea3c9fbd..16926609 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -5,7 +5,9 @@ """ from abc import ABCMeta, abstractmethod, abstractproperty +from .versioned_data_kind import VersionedDataKind +from typing import Mapping class FeatureStore: """ @@ -41,7 +43,7 @@ def get(self, kind, key, callback=lambda x: x): """ @abstractmethod - def all(self, kind, callback=lambda x: x): + def all(self, kind: VersionedDataKind, callback=lambda x: x): """ Retrieves a dictionary of all associated objects of a given kind. The retrieved dict of keys to objects can be transformed by the specified callback. @@ -54,7 +56,7 @@ def all(self, kind, callback=lambda x: x): """ @abstractmethod - def init(self, all_data): + def init(self, all_data: Mapping[VersionedDataKind, Mapping[str, dict]]): """ Initializes (or re-initializes) the store with the specified set of objects. Any existing entries will be removed. Implementations can assume that this set of objects is up to date-- there is no @@ -65,7 +67,7 @@ def init(self, all_data): """ @abstractmethod - def delete(self, kind, key, version): + def delete(self, kind: VersionedDataKind, key: str, version: int): """ Deletes the object associated with the specified key, if it exists and its version is less than the specified version. The object should be replaced in the data store by a @@ -80,7 +82,7 @@ def delete(self, kind, key, version): """ @abstractmethod - def upsert(self, kind, item): + def upsert(self, kind: VersionedDataKind, item: dict): """ Updates or inserts the object associated with the specified key. If an item with the same key already exists, it should update it only if the new item's version property is greater than @@ -89,11 +91,11 @@ def upsert(self, kind, item): :param kind: The kind of object to update :type kind: VersionedDataKind :param item: The object to update or insert - :type feature: dict + :type item: dict """ @abstractproperty - def initialized(self): + def initialized(self) -> bool: """ Returns whether the store has been initialized yet or not @@ -112,7 +114,7 @@ class FeatureStoreCore: __metaclass__ = ABCMeta @abstractmethod - def get_internal(self, kind, key): + def get_internal(self, kind: VersionedDataKind, key: str) -> dict: """ Returns the object to which the specified key is mapped, or None if no such item exists. The method should not attempt to filter out any items based on their deleted property, @@ -127,7 +129,7 @@ def get_internal(self, kind, key): """ @abstractmethod - def get_all_internal(self, callback): + def get_all_internal(self, callback: VersionedDataKind) -> Mapping[str, dict]: """ Returns a dictionary of all associated objects of a given kind. The method should not attempt to filter out any items based on their deleted property, nor to cache any items. @@ -139,7 +141,7 @@ def get_all_internal(self, callback): """ @abstractmethod - def init_internal(self, all_data): + def init_internal(self, all_data: Mapping[VersionedDataKind, Mapping[str, dict]]): """ Initializes (or re-initializes) the store with the specified set of objects. Any existing entries will be removed. Implementations can assume that this set of objects is up to date-- there is no @@ -151,7 +153,7 @@ def init_internal(self, all_data): """ @abstractmethod - def upsert_internal(self, kind, item): + def upsert_internal(self, kind: VersionedDataKind, item: dict) -> dict: """ Updates or inserts the object associated with the specified key. If an item with the same key already exists, it should update it only if the new item's version property is greater than @@ -169,7 +171,7 @@ def upsert_internal(self, kind, item): """ @abstractmethod - def initialized_internal(self): + def initialized_internal(self) -> bool: """ Returns true if this store has been initialized. In a shared data store, it should be able to detect this even if initInternal was called in a different process, i.e. the test should be diff --git a/test-requirements.txt b/test-requirements.txt index bc5b43f2..015898af 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,6 +4,5 @@ redis>=2.10.5 boto3>=1.9.71 coverage>=4.4 jsonpickle==0.9.3 -pytest-capturelog>=0.7 pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 From 77458815e9f1e2a6299e08df503c819261f516b3 Mon Sep 17 00:00:00 2001 From: Elliot Date: Mon, 28 Sep 2020 12:16:11 -0700 Subject: [PATCH 203/356] Revert "add type hints to some of the public facing api." This reverts commit c35fa6184ce1a274fd5c6d226cb3f1f7a795901a. --- .circleci/config.yml | 13 ++- ldclient/__init__.py | 6 +- ldclient/client.py | 22 ++--- ldclient/config.py | 94 +++++++++---------- ldclient/feature_store.py | 11 ++- ldclient/feature_store_helpers.py | 10 +- ldclient/flag.py | 10 +- ldclient/flags_state.py | 18 ++-- .../integrations/redis/redis_feature_store.py | 2 +- ldclient/integrations.py | 29 +++--- ldclient/interfaces.py | 24 +++-- test-requirements.txt | 1 + 12 files changed, 120 insertions(+), 120 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 17886070..d6894b79 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,6 +6,16 @@ orbs: workflows: test: jobs: + - test-linux: + name: Python 3.3 + docker-image: circleci/python:3.3-jessie + consul-supported: false # Consul isn't supported in 3.3 + filesource-supported: false # FileDataSource isn't supported in 3.3 + test-packaging: false # packaging test requires virtualenv, which isn't supported in 3.3 + - test-linux: + name: Python 3.4 + docker-image: circleci/python:3.4-jessie + consul-supported: false # Consul isn't supported in 3.4 - test-linux: name: Python 3.5 docker-image: circleci/python:3.5-jessie @@ -18,9 +28,6 @@ workflows: - test-linux: name: Python 3.8 docker-image: circleci/python:3.8-buster - - test-linux: - name: Python 3.9 prerelease - docker-image: circleci/python:3.9.0rc2 - test-windows: name: Windows Py3.3 py3: true diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 24dcfc67..773d6374 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -22,7 +22,7 @@ __lock = ReadWriteLock() -def set_config(config: Config): +def set_config(config): """Sets the configuration for the shared SDK client instance. If this is called prior to :func:`ldclient.get()`, it stores the configuration that will be used when the @@ -48,7 +48,7 @@ def set_config(config: Config): __lock.unlock() -def set_sdk_key(sdk_key: str): +def set_sdk_key(sdk_key): """Sets the SDK key for the shared SDK client instance. If this is called prior to :func:`ldclient.get()`, it stores the SDK key that will be used when the client is @@ -87,7 +87,7 @@ def set_sdk_key(sdk_key: str): __lock.unlock() -def get() -> LDClient: +def get(): """Returns the shared SDK client instance, using the current global configuration. To use the SDK as a singleton, first make sure you have called :func:`ldclient.set_sdk_key()` or diff --git a/ldclient/client.py b/ldclient/client.py index 6e23fbdc..34340c22 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -65,7 +65,7 @@ class LDClient: Client instances are thread-safe. """ - def __init__(self, sdk_key: str=None, config: Config=None, start_wait: float=5): + def __init__(self, sdk_key=None, config=None, start_wait=5): """Constructs a new LDClient instance. :param string sdk_key: the SDK key for your LaunchDarkly environment @@ -115,7 +115,7 @@ def __init__(self, sdk_key: str=None, config: Config=None, start_wait: float=5): log.warning("Initialization timeout exceeded for LaunchDarkly Client or an error occurred. " "Feature Flags may not yet be available.") - def _set_event_processor(self, config: Config): + def _set_event_processor(self, config): if config.offline or not config.send_events: self._event_processor = NullEventProcessor() return None @@ -175,7 +175,7 @@ def __exit__(self, type, value, traceback): def _send_event(self, event): self._event_processor.send_event(event) - def track(self, event_name: str, user: dict, data=None, metric_value=None): + def track(self, event_name, user, data=None, metric_value=None): """Tracks that a user performed an event. LaunchDarkly automatically tracks pageviews and clicks that are specified in the Goals @@ -194,7 +194,7 @@ def track(self, event_name: str, user: dict, data=None, metric_value=None): else: self._send_event(self._event_factory_default.new_custom_event(event_name, user, data, metric_value)) - def identify(self, user: dict): + def identify(self, user): """Registers the user. This simply creates an analytics event that will transmit the given user properties to @@ -208,14 +208,14 @@ def identify(self, user: dict): else: self._send_event(self._event_factory_default.new_identify_event(user)) - def is_offline(self) -> bool: + def is_offline(self): """Returns true if the client is in offline mode. :rtype: bool """ return self._config.offline - def is_initialized(self) -> bool: + def is_initialized(self): """Returns true if the client has successfully connected to LaunchDarkly. If this returns false, it means that the client has not yet successfully connected to LaunchDarkly. @@ -247,7 +247,7 @@ def toggle(self, key, user, default): log.warning("Deprecated method: toggle() called. Use variation() instead.") return self.variation(key, user, default) - def variation(self, key: str, user: dict, default: object) -> object: + def variation(self, key, user, default): """Determines the variation of a feature flag for a user. :param string key: the unique key for the feature flag @@ -258,7 +258,7 @@ def variation(self, key: str, user: dict, default: object) -> object: """ return self._evaluate_internal(key, user, default, self._event_factory_default).value - def variation_detail(self, key: str, user: dict, default: object) -> EvaluationDetail: + def variation_detail(self, key, user, default): """Determines the variation of a feature flag for a user, like :func:`variation()`, but also provides additional information about how this value was calculated, in the form of an :class:`ldclient.flag.EvaluationDetail` object. @@ -328,7 +328,7 @@ def _evaluate_internal(self, key, user, default, event_factory): self._send_event(event_factory.new_default_event(flag, user, default, reason)) return EvaluationDetail(default, None, reason) - def all_flags(self, user: dict) -> dict: + def all_flags(self, user): """Returns all feature flag values for the given user. This method is deprecated - please use :func:`all_flags_state()` instead. Current versions of the @@ -344,7 +344,7 @@ def all_flags(self, user: dict) -> dict: return None return state.to_values_map() - def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: + def all_flags_state(self, user, **kwargs): """Returns an object that encapsulates the state of all feature flags for a given user, including the flag values and also metadata that can be used on the front end. See the JavaScript SDK Reference Guide on @@ -412,7 +412,7 @@ def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: return state - def secure_mode_hash(self, user: dict) -> str: + def secure_mode_hash(self, user): """Computes an HMAC signature of a user signed with the client's SDK key, for use with the JavaScript SDK. diff --git a/ldclient/config.py b/ldclient/config.py index 3e4acab3..a14505f7 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -3,14 +3,10 @@ Note that the same class can also be imported from the ``ldclient.client`` submodule. """ -from __future__ import annotations - from ldclient.feature_store import InMemoryFeatureStore from ldclient.util import log -from typing import Callable - GET_LATEST_FEATURES_PATH = '/sdk/latest-flags' STREAM_FLAGS_PATH = '/flags' @@ -27,12 +23,12 @@ class HTTPConfig: corresponding `Config` properties will be ignored. """ def __init__(self, - connect_timeout: float=10, - read_timeout: float=15, - http_proxy: str=None, - ca_certs: str=None, - cert_file: str=None, - disable_ssl_verification: bool=False): + connect_timeout=10, + read_timeout=15, + http_proxy=None, + ca_certs=None, + cert_file=None, + disable_ssl_verification=False): """ :param float connect_timeout: The connect timeout for network connections in seconds. :param float read_timeout: The read timeout for network connections in seconds. @@ -58,27 +54,27 @@ def __init__(self, self.__disable_ssl_verification = disable_ssl_verification @property - def connect_timeout(self) -> float: + def connect_timeout(self): return self.__connect_timeout @property - def read_timeout(self) -> float: + def read_timeout(self): return self.__read_timeout @property - def http_proxy(self) -> str: + def http_proxy(self): return self.__http_proxy @property - def ca_certs(self) -> str: + def ca_certs(self): return self.__ca_certs @property - def cert_file(self) -> str: + def cert_file(self): return self.__cert_file @property - def disable_ssl_verification(self) -> bool: + def disable_ssl_verification(self): return self.__disable_ssl_verification class Config: @@ -88,38 +84,38 @@ class Config: if you are using the singleton client, or the :class:`ldclient.client.LDClient` constructor otherwise. """ def __init__(self, - sdk_key: str=None, - base_uri: str='https://app.launchdarkly.com', - events_uri: str='https://events.launchdarkly.com', - connect_timeout: float=10, - read_timeout: float=15, - events_max_pending: int=10000, - flush_interval: float=5, - stream_uri: str='https://stream.launchdarkly.com', - stream: bool=True, - initial_reconnect_delay: float=1, - verify_ssl: bool=True, - defaults: dict=None, - send_events: bool=None, - events_enabled: bool=True, - update_processor_class: Callable[[str, Config, FeatureStore], UpdateProcessor]=None, - poll_interval: float=30, - use_ldd: bool=False, - feature_store: FeatureStore=None, - feature_requester_class: Callable[[str, Config, FeatureStore], FeatureRequester]=None, - event_processor_class: Callable[[Config], EventProcessor]=None, + sdk_key=None, + base_uri='https://app.launchdarkly.com', + events_uri='https://events.launchdarkly.com', + connect_timeout=10, + read_timeout=15, + events_max_pending=10000, + flush_interval=5, + stream_uri='https://stream.launchdarkly.com', + stream=True, + initial_reconnect_delay=1, + verify_ssl=True, + defaults=None, + send_events=None, + events_enabled=True, + update_processor_class=None, + poll_interval=30, + use_ldd=False, + feature_store=None, + feature_requester_class=None, + event_processor_class=None, private_attribute_names=(), - all_attributes_private: bool=False, - offline: bool=False, - user_keys_capacity: int=1000, - user_keys_flush_interval: float=300, - inline_users_in_events: bool=False, - http_proxy=None, # deprecated: will not type hint - diagnostic_opt_out: bool=False, - diagnostic_recording_interval: int=900, - wrapper_name: str=None, - wrapper_version: str=None, - http: HTTPConfig=None): + all_attributes_private=False, + offline=False, + user_keys_capacity=1000, + user_keys_flush_interval=300, + inline_users_in_events=False, + http_proxy=None, + diagnostic_opt_out=False, + diagnostic_recording_interval=900, + wrapper_name=None, + wrapper_version=None, + http=None): """ :param string sdk_key: The SDK key for your LaunchDarkly account. :param string base_uri: The base URL for the LaunchDarkly server. Most users should use the default @@ -237,14 +233,14 @@ def __init__(self, self.__http = http @classmethod - def default(cls) -> Config: + def default(cls): """Returns a ``Config`` instance with default values for all properties. :rtype: ldclient.config.Config """ return cls() - def copy_with_new_sdk_key(self, new_sdk_key: str) -> Config: + def copy_with_new_sdk_key(self, new_sdk_key): """Returns a new ``Config`` instance that is the same as this one, except for having a different SDK key. :param string new_sdk_key: the new SDK key diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index 09d331a4..df443510 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -11,6 +11,7 @@ from ldclient.interfaces import DiagnosticDescription, FeatureStore from ldclient.rwlock import ReadWriteLock + class CacheConfig: """Encapsulates caching parameters for feature store implementations that support local caching. """ @@ -19,8 +20,8 @@ class CacheConfig: DEFAULT_CAPACITY = 1000 def __init__(self, - expiration: float = DEFAULT_EXPIRATION, - capacity: int = DEFAULT_CAPACITY): + expiration = DEFAULT_EXPIRATION, + capacity = DEFAULT_CAPACITY): """Constructs an instance of CacheConfig. :param float expiration: the cache TTL, in seconds. Items will be evicted from the cache after @@ -49,7 +50,7 @@ def disabled(): return CacheConfig(expiration = 0) @property - def enabled(self) -> bool: + def enabled(self): """Returns True if caching is enabled in this configuration. :rtype: bool @@ -57,7 +58,7 @@ def enabled(self) -> bool: return self._expiration > 0 @property - def expiration(self) -> float: + def expiration(self): """Returns the configured cache TTL, in seconds. :rtype: float @@ -65,7 +66,7 @@ def expiration(self) -> float: return self._expiration @property - def capacity(self) -> int: + def capacity(self): """Returns the configured maximum number of cacheable items. :rtype: int diff --git a/ldclient/feature_store_helpers.py b/ldclient/feature_store_helpers.py index 13d8b3cb..0f371f7b 100644 --- a/ldclient/feature_store_helpers.py +++ b/ldclient/feature_store_helpers.py @@ -4,8 +4,8 @@ from expiringdict import ExpiringDict -from ldclient.interfaces import DiagnosticDescription, FeatureStore, FeatureStoreCore -from ldclient.feature_store import CacheConfig +from ldclient.interfaces import DiagnosticDescription, FeatureStore + class CachingStoreWrapper(DiagnosticDescription, FeatureStore): """A partial implementation of :class:`ldclient.interfaces.FeatureStore`. @@ -17,7 +17,7 @@ class CachingStoreWrapper(DiagnosticDescription, FeatureStore): """ __INITED_CACHE_KEY__ = "$inited" - def __init__(self, core: FeatureStoreCore, cache_config: CacheConfig): + def __init__(self, core, cache_config): """Constructs an instance by wrapping a core implementation object. :param FeatureStoreCore core: the implementation object @@ -84,7 +84,7 @@ def upsert(self, kind, item): self._cache.pop(self._all_cache_key(kind), None) @property - def initialized(self) -> bool: + def initialized(self): """ """ if self._inited: @@ -100,7 +100,7 @@ def initialized(self) -> bool: self._inited = True return result - def describe_configuration(self, config) -> str: + def describe_configuration(self, config): if callable(getattr(self._core, 'describe_configuration', None)): return self._core.describe_configuration(config) return "custom" diff --git a/ldclient/flag.py b/ldclient/flag.py index 8ba12c60..dbf63b45 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -81,7 +81,7 @@ def reason(self): """ return self.__reason - def is_default_value(self) -> bool: + def is_default_value(self): """Returns True if the flag evaluated to the default value rather than one of its variations. @@ -89,16 +89,16 @@ def is_default_value(self) -> bool: """ return self.__variation_index is None - def __eq__(self, other) -> bool: + def __eq__(self, other): return self.value == other.value and self.variation_index == other.variation_index and self.reason == other.reason - def __ne__(self, other) -> bool: + def __ne__(self, other): return not self.__eq__(other) - def __str__(self) -> str: + def __str__(self): return "(value=%s, variation_index=%s, reason=%s)" % (self.value, self.variation_index, self.reason) - def __repr__(self) -> str: + def __repr__(self): return self.__str__() diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index 198567ce..4ea41aaa 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -5,8 +5,6 @@ import json import time -from typing import Optional - class FeatureFlagsState: """ A snapshot of the state of all feature flags with regard to a specific user, generated by @@ -15,7 +13,7 @@ class FeatureFlagsState: appropriate data structure for bootstrapping the LaunchDarkly JavaScript client. See the JavaScript SDK Reference Guide on `Bootstrapping `_. """ - def __init__(self, valid: bool): + def __init__(self, valid): self.__flag_values = {} self.__flag_metadata = {} self.__valid = valid @@ -43,7 +41,7 @@ def add_flag(self, flag, value, variation, reason, details_only_if_tracked): self.__flag_metadata[key] = meta @property - def valid(self) -> bool: + def valid(self): """True if this object contains a valid snapshot of feature flag state, or False if the state could not be computed (for instance, because the client was offline or there was no user). @@ -51,7 +49,7 @@ def valid(self) -> bool: """ return self.__valid - def get_flag_value(self, key: str) -> object: + def get_flag_value(self, key): """Returns the value of an individual feature flag at the time the state was recorded. :param string key: the feature flag key @@ -59,7 +57,7 @@ def get_flag_value(self, key: str) -> object: """ return self.__flag_values.get(key) - def get_flag_reason(self, key: str) -> Optional[dict]: + def get_flag_reason(self, key): """Returns the evaluation reason for an individual feature flag at the time the state was recorded. :param string key: the feature flag key @@ -70,7 +68,7 @@ def get_flag_reason(self, key: str) -> Optional[dict]: meta = self.__flag_metadata.get(key) return None if meta is None else meta.get('reason') - def to_values_map(self) -> dict: + def to_values_map(self): """Returns a dictionary of flag keys to flag values. If the flag would have evaluated to the default value, its value will be None. @@ -81,7 +79,7 @@ def to_values_map(self) -> dict: """ return self.__flag_values - def to_json_dict(self) -> dict: + def to_json_dict(self): """Returns a dictionary suitable for passing as JSON, in the format used by the LaunchDarkly JavaScript SDK. Use this method if you are passing data to the front end in order to "bootstrap" the JavaScript client. @@ -93,14 +91,14 @@ def to_json_dict(self) -> dict: ret['$valid'] = self.__valid return ret - def to_json_string(self) -> str: + def to_json_string(self): """Same as to_json_dict, but serializes the JSON structure into a string. :rtype: string """ return json.dumps(self.to_json_dict()) - def __getstate__(self) -> dict: + def __getstate__(self): """Equivalent to to_json_dict() - used if you are serializing the object with jsonpickle. """ return self.to_json_dict() diff --git a/ldclient/impl/integrations/redis/redis_feature_store.py b/ldclient/impl/integrations/redis/redis_feature_store.py index 0c676580..eebe205d 100644 --- a/ldclient/impl/integrations/redis/redis_feature_store.py +++ b/ldclient/impl/integrations/redis/redis_feature_store.py @@ -13,7 +13,7 @@ class _RedisFeatureStoreCore(DiagnosticDescription, FeatureStoreCore): - def __init__(self, url: str, prefix: str, max_connections: int): + def __init__(self, url, prefix, max_connections): if not have_redis: raise NotImplementedError("Cannot use Redis feature store because redis package is not installed") self._prefix = prefix or 'launchdarkly' diff --git a/ldclient/integrations.py b/ldclient/integrations.py index f55e0bed..15816f72 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -10,7 +10,6 @@ from ldclient.impl.integrations.files.file_data_source import _FileDataSource from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore -from typing import List class Consul: """Provides factory methods for integrations between the LaunchDarkly SDK and Consul. @@ -20,11 +19,11 @@ class Consul: DEFAULT_PREFIX = "launchdarkly" @staticmethod - def new_feature_store(host: str=None, - port: int=None, - prefix: str=None, - consul_opts: dict=None, - caching: CacheConfig=CacheConfig.default()): + def new_feature_store(host=None, + port=None, + prefix=None, + consul_opts=None, + caching=CacheConfig.default()): """Creates a Consul-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -59,10 +58,10 @@ class DynamoDB: """ @staticmethod - def new_feature_store(table_name: str, - prefix: str=None, - dynamodb_opts: dict={}, - caching: CacheConfig=CacheConfig.default()): + def new_feature_store(table_name, + prefix=None, + dynamodb_opts={}, + caching=CacheConfig.default()): """Creates a DynamoDB-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -104,10 +103,10 @@ class Redis: DEFAULT_MAX_CONNECTIONS = 16 @staticmethod - def new_feature_store(url: str='redis://localhost:6379/0', - prefix: str='launchdarkly', - max_connections: int=16, - caching: CacheConfig=CacheConfig.default()): + def new_feature_store(url='redis://localhost:6379/0', + prefix='launchdarkly', + max_connections=16, + caching=CacheConfig.default()): """Creates a Redis-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -140,7 +139,7 @@ class Files: """ @staticmethod - def new_data_source(paths: List[str], auto_update: bool=False, poll_interval: float=1, force_polling: bool=False): + def new_data_source(paths, auto_update=False, poll_interval=1, force_polling=False): """Provides a way to use local files as a source of feature flag state. This would typically be used in a test environment, to operate using a predetermined feature flag state without an actual LaunchDarkly connection. diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 16926609..ea3c9fbd 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -5,9 +5,7 @@ """ from abc import ABCMeta, abstractmethod, abstractproperty -from .versioned_data_kind import VersionedDataKind -from typing import Mapping class FeatureStore: """ @@ -43,7 +41,7 @@ def get(self, kind, key, callback=lambda x: x): """ @abstractmethod - def all(self, kind: VersionedDataKind, callback=lambda x: x): + def all(self, kind, callback=lambda x: x): """ Retrieves a dictionary of all associated objects of a given kind. The retrieved dict of keys to objects can be transformed by the specified callback. @@ -56,7 +54,7 @@ def all(self, kind: VersionedDataKind, callback=lambda x: x): """ @abstractmethod - def init(self, all_data: Mapping[VersionedDataKind, Mapping[str, dict]]): + def init(self, all_data): """ Initializes (or re-initializes) the store with the specified set of objects. Any existing entries will be removed. Implementations can assume that this set of objects is up to date-- there is no @@ -67,7 +65,7 @@ def init(self, all_data: Mapping[VersionedDataKind, Mapping[str, dict]]): """ @abstractmethod - def delete(self, kind: VersionedDataKind, key: str, version: int): + def delete(self, kind, key, version): """ Deletes the object associated with the specified key, if it exists and its version is less than the specified version. The object should be replaced in the data store by a @@ -82,7 +80,7 @@ def delete(self, kind: VersionedDataKind, key: str, version: int): """ @abstractmethod - def upsert(self, kind: VersionedDataKind, item: dict): + def upsert(self, kind, item): """ Updates or inserts the object associated with the specified key. If an item with the same key already exists, it should update it only if the new item's version property is greater than @@ -91,11 +89,11 @@ def upsert(self, kind: VersionedDataKind, item: dict): :param kind: The kind of object to update :type kind: VersionedDataKind :param item: The object to update or insert - :type item: dict + :type feature: dict """ @abstractproperty - def initialized(self) -> bool: + def initialized(self): """ Returns whether the store has been initialized yet or not @@ -114,7 +112,7 @@ class FeatureStoreCore: __metaclass__ = ABCMeta @abstractmethod - def get_internal(self, kind: VersionedDataKind, key: str) -> dict: + def get_internal(self, kind, key): """ Returns the object to which the specified key is mapped, or None if no such item exists. The method should not attempt to filter out any items based on their deleted property, @@ -129,7 +127,7 @@ def get_internal(self, kind: VersionedDataKind, key: str) -> dict: """ @abstractmethod - def get_all_internal(self, callback: VersionedDataKind) -> Mapping[str, dict]: + def get_all_internal(self, callback): """ Returns a dictionary of all associated objects of a given kind. The method should not attempt to filter out any items based on their deleted property, nor to cache any items. @@ -141,7 +139,7 @@ def get_all_internal(self, callback: VersionedDataKind) -> Mapping[str, dict]: """ @abstractmethod - def init_internal(self, all_data: Mapping[VersionedDataKind, Mapping[str, dict]]): + def init_internal(self, all_data): """ Initializes (or re-initializes) the store with the specified set of objects. Any existing entries will be removed. Implementations can assume that this set of objects is up to date-- there is no @@ -153,7 +151,7 @@ def init_internal(self, all_data: Mapping[VersionedDataKind, Mapping[str, dict]] """ @abstractmethod - def upsert_internal(self, kind: VersionedDataKind, item: dict) -> dict: + def upsert_internal(self, kind, item): """ Updates or inserts the object associated with the specified key. If an item with the same key already exists, it should update it only if the new item's version property is greater than @@ -171,7 +169,7 @@ def upsert_internal(self, kind: VersionedDataKind, item: dict) -> dict: """ @abstractmethod - def initialized_internal(self) -> bool: + def initialized_internal(self): """ Returns true if this store has been initialized. In a shared data store, it should be able to detect this even if initInternal was called in a different process, i.e. the test should be diff --git a/test-requirements.txt b/test-requirements.txt index 015898af..bc5b43f2 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,5 +4,6 @@ redis>=2.10.5 boto3>=1.9.71 coverage>=4.4 jsonpickle==0.9.3 +pytest-capturelog>=0.7 pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 From 43b4c315c6c2643bf072bd68b0b743c629a4d79e Mon Sep 17 00:00:00 2001 From: Elliot <35050275+Apache-HB@users.noreply.github.com> Date: Thu, 15 Oct 2020 09:42:21 -0400 Subject: [PATCH 204/356] Ehaisley/ch86857/type hints (#138) * add typehints to the public API * validate typehints in the public api and tests with mypy --- .circleci/config.yml | 41 +++--- README.md | 2 +- docs/Makefile | 2 +- docs/conf.py | 3 +- docs/requirements.txt | 3 +- ldclient/__init__.py | 18 ++- ldclient/client.py | 81 +++++------ ldclient/config.py | 226 ++++++++++++++---------------- ldclient/feature_store.py | 39 +++--- ldclient/feature_store_helpers.py | 16 ++- ldclient/flag.py | 32 ++--- ldclient/flags_state.py | 35 ++--- ldclient/impl/__init__.py | 3 + ldclient/integrations.py | 68 ++++----- ldclient/interfaces.py | 62 +++----- ldclient/memoized_value.py | 2 +- ldclient/operators.py | 8 +- mypy.ini | 3 + requirements.txt | 2 +- runtests.py | 3 +- setup.py | 3 +- test-requirements.txt | 2 +- testing/http_util.py | 2 +- testing/test_feature_store.py | 17 ++- testing/test_user_filter.py | 2 +- 25 files changed, 312 insertions(+), 363 deletions(-) create mode 100644 mypy.ini diff --git a/.circleci/config.yml b/.circleci/config.yml index d6894b79..87453577 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,16 +6,6 @@ orbs: workflows: test: jobs: - - test-linux: - name: Python 3.3 - docker-image: circleci/python:3.3-jessie - consul-supported: false # Consul isn't supported in 3.3 - filesource-supported: false # FileDataSource isn't supported in 3.3 - test-packaging: false # packaging test requires virtualenv, which isn't supported in 3.3 - - test-linux: - name: Python 3.4 - docker-image: circleci/python:3.4-jessie - consul-supported: false # Consul isn't supported in 3.4 - test-linux: name: Python 3.5 docker-image: circleci/python:3.5-jessie @@ -28,8 +18,11 @@ workflows: - test-linux: name: Python 3.8 docker-image: circleci/python:3.8-buster + - test-linux: + name: Python 3.9 + docker-image: circleci/python:3.9-rc-buster - test-windows: - name: Windows Py3.3 + name: Windows Python 3 py3: true jobs: @@ -37,18 +30,15 @@ jobs: parameters: docker-image: type: string - consul-supported: - type: boolean - default: true - filesource-supported: - type: boolean - default: true test-packaging: type: boolean default: true test-with-codeclimate: type: boolean default: false + test-with-mypy: + type: boolean + default: true docker: - image: <> - image: redis @@ -61,12 +51,8 @@ jobs: command: | sudo pip install --upgrade pip virtualenv; sudo pip install -r test-requirements.txt; - if [[ "<>" == "true" ]]; then - sudo pip install -r test-filesource-optional-requirements.txt; - fi; - if [[ "<>" == "true" ]]; then - sudo pip install -r consul-requirements.txt; - fi; + sudo pip install -r test-filesource-optional-requirements.txt; + sudo pip install -r consul-requirements.txt; sudo python setup.py install; pip freeze - when: @@ -94,6 +80,15 @@ jobs: command: | sudo rm -rf dist *.egg-info ./test-packaging/test-packaging.sh + - when: + condition: <> + steps: + - run: + name: verify typehints + command: | + pip install mypy + export PATH="/home/circleci/.local/bin:$PATH" + mypy --config-file mypy.ini --python-version 3.5 ldclient/*.py testing/*.py - store_test_results: path: test-reports - store_artifacts: diff --git a/README.md b/README.md index e2252f4e..80bd937b 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ ## Supported Python versions -This version of the LaunchDarkly SDK is compatible with Python 3.3 through 3.7. It is tested with the most recent patch releases of those versions. Python 2.7 is no longer supported. +This version of the LaunchDarkly SDK is compatible with Python 3.5 through 3.9. It is tested with the most recent patch releases of those versions. Python versions 2.7 to 3.4 are no longer supported. ## Getting started diff --git a/docs/Makefile b/docs/Makefile index aea5aff6..fb0093da 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -13,7 +13,7 @@ help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) install: - pip install -r requirements.txt + pip3 install -r requirements.txt html: install @$(SPHINXBUILD) -M html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/conf.py b/docs/conf.py index b93d3c36..c4442b65 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# +# type: ignore # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a @@ -46,6 +46,7 @@ # ones. extensions = [ 'sphinx.ext.autodoc', + 'sphinx_autodoc_typehints', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', ] diff --git a/docs/requirements.txt b/docs/requirements.txt index 15b59476..3d7f6394 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,5 +1,6 @@ -sphinx<2.0 +sphinx sphinx_rtd_theme +sphinx-autodoc-typehints backoff>=1.4.3 certifi>=2018.4.16 diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 773d6374..8eb9ac36 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -22,7 +22,7 @@ __lock = ReadWriteLock() -def set_config(config): +def set_config(config: Config): """Sets the configuration for the shared SDK client instance. If this is called prior to :func:`ldclient.get()`, it stores the configuration that will be used when the @@ -30,7 +30,7 @@ def set_config(config): re-initialized with the new configuration (this will result in the next call to :func:`ldclient.get()` returning a new client instance). - :param ldclient.config.Config config: the client configuration + :param config: the client configuration """ global __config global __client @@ -38,7 +38,7 @@ def set_config(config): try: __lock.lock() if __client: - log.info("Reinitializing LaunchDarkly Client " + version.VERSION + " with new config") + log.info("Reinitializing LaunchDarkly Client " + VERSION + " with new config") new_client = LDClient(config=config, start_wait=start_wait) old_client = __client __client = new_client @@ -48,7 +48,7 @@ def set_config(config): __lock.unlock() -def set_sdk_key(sdk_key): +def set_sdk_key(sdk_key: str): """Sets the SDK key for the shared SDK client instance. If this is called prior to :func:`ldclient.get()`, it stores the SDK key that will be used when the client is @@ -58,7 +58,7 @@ def set_sdk_key(sdk_key): If you need to set any configuration options other than the SDK key, use :func:`ldclient.set_config()` instead. - :param string sdk_key: the new SDK key + :param sdk_key: the new SDK key """ global __config global __client @@ -78,7 +78,7 @@ def set_sdk_key(sdk_key): __lock.lock() __config = __config.copy_with_new_sdk_key(new_sdk_key=sdk_key) if __client: - log.info("Reinitializing LaunchDarkly Client " + version.VERSION + " with new sdk key") + log.info("Reinitializing LaunchDarkly Client " + VERSION + " with new sdk key") new_client = LDClient(config=__config, start_wait=start_wait) old_client = __client __client = new_client @@ -87,7 +87,7 @@ def set_sdk_key(sdk_key): __lock.unlock() -def get(): +def get() -> LDClient: """Returns the shared SDK client instance, using the current global configuration. To use the SDK as a singleton, first make sure you have called :func:`ldclient.set_sdk_key()` or @@ -97,8 +97,6 @@ def get(): If you need to create multiple client instances with different configurations, instead of this singleton approach you can call the :class:`ldclient.client.LDClient` constructor directly instead. - - :rtype: ldclient.client.LDClient """ global __config global __client @@ -113,7 +111,7 @@ def get(): try: __lock.lock() if not __client: - log.info("Initializing LaunchDarkly Client " + version.VERSION) + log.info("Initializing LaunchDarkly Client " + VERSION) __client = LDClient(config=__config, start_wait=start_wait) return __client finally: diff --git a/ldclient/client.py b/ldclient/client.py index 34340c22..4d8b9600 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -2,6 +2,9 @@ This submodule contains the client class that provides most of the SDK functionality. """ +from typing import Optional, Any, Dict, Mapping +from .impl import AnyNum + import hashlib import hmac import threading @@ -20,7 +23,8 @@ from ldclient.polling import PollingUpdateProcessor from ldclient.streaming import StreamingUpdateProcessor from ldclient.util import check_uwsgi, log -from ldclient.versioned_data_kind import FEATURES, SEGMENTS +from ldclient.versioned_data_kind import FEATURES, SEGMENTS, VersionedDataKind +from ldclient.feature_store import FeatureStore import queue from threading import Lock @@ -32,10 +36,10 @@ class _FeatureStoreClientWrapper(FeatureStore): to provide an update listener capability. """ - def __init__(self, store): + def __init__(self, store: FeatureStore): self.store = store - def init(self, all_data): + def init(self, all_data: Mapping[VersionedDataKind, Mapping[str, Dict[Any, Any]]]): return self.store.init(_FeatureStoreDataSetSorter.sort_all_collections(all_data)) def get(self, kind, key, callback): @@ -51,7 +55,7 @@ def upsert(self, kind, item): return self.store.upsert(kind, item) @property - def initialized(self): + def initialized(self) -> bool: return self.store.initialized @@ -65,12 +69,12 @@ class LDClient: Client instances are thread-safe. """ - def __init__(self, sdk_key=None, config=None, start_wait=5): + def __init__(self, sdk_key: str=None, config: Config=None, start_wait: float=5): """Constructs a new LDClient instance. - :param string sdk_key: the SDK key for your LaunchDarkly environment - :param ldclient.config.Config config: optional custom configuration - :param float start_wait: the number of seconds to wait for a successful connection to LaunchDarkly + :param sdk_key: the SDK key for your LaunchDarkly environment + :param config: optional custom configuration + :param start_wait: the number of seconds to wait for a successful connection to LaunchDarkly """ check_uwsgi() @@ -149,10 +153,8 @@ def _make_update_processor(self, config, store, ready, diagnostic_accumulator): return PollingUpdateProcessor(config, feature_requester, store, ready) - def get_sdk_key(self): + def get_sdk_key(self) -> Optional[str]: """Returns the configured SDK key. - - :rtype: string """ return self._config.sdk_key @@ -175,15 +177,15 @@ def __exit__(self, type, value, traceback): def _send_event(self, event): self._event_processor.send_event(event) - def track(self, event_name, user, data=None, metric_value=None): + def track(self, event_name: str, user: dict, data: Optional[Any]=None, metric_value: Optional[AnyNum]=None): """Tracks that a user performed an event. LaunchDarkly automatically tracks pageviews and clicks that are specified in the Goals section of the dashboard. This can be used to track custom goals or other events that do not currently have goals. - :param string event_name: the name of the event, which may correspond to a goal in A/B tests - :param dict user: the attributes of the user + :param event_name: the name of the event, which may correspond to a goal in A/B tests + :param user: the attributes of the user :param data: optional additional data associated with the event :param metric_value: a numeric value used by the LaunchDarkly experimentation feature in numeric custom metrics. Can be omitted if this event is used by only non-numeric metrics. @@ -194,36 +196,32 @@ def track(self, event_name, user, data=None, metric_value=None): else: self._send_event(self._event_factory_default.new_custom_event(event_name, user, data, metric_value)) - def identify(self, user): + def identify(self, user: dict): """Registers the user. This simply creates an analytics event that will transmit the given user properties to LaunchDarkly, so that the user will be visible on your dashboard even if you have not evaluated any flags for that user. It has no other effect. - :param dict user: attributes of the user to register + :param user: attributes of the user to register """ if user is None or user.get('key') is None: log.warning("Missing user or user key when calling identify().") else: self._send_event(self._event_factory_default.new_identify_event(user)) - def is_offline(self): + def is_offline(self) -> bool: """Returns true if the client is in offline mode. - - :rtype: bool """ return self._config.offline - def is_initialized(self): + def is_initialized(self) -> bool: """Returns true if the client has successfully connected to LaunchDarkly. If this returns false, it means that the client has not yet successfully connected to LaunchDarkly. It might still be in the process of starting up, or it might be attempting to reconnect after an unsuccessful attempt, or it might have received an unrecoverable error (such as an invalid SDK key) and given up. - - :rtype: bool """ return self.is_offline() or self._config.use_ldd or self._update_processor.initialized() @@ -247,18 +245,18 @@ def toggle(self, key, user, default): log.warning("Deprecated method: toggle() called. Use variation() instead.") return self.variation(key, user, default) - def variation(self, key, user, default): + def variation(self, key: str, user: dict, default: Any) -> Any: """Determines the variation of a feature flag for a user. - :param string key: the unique key for the feature flag - :param dict user: a dictionary containing parameters for the end user requesting the flag - :param object default: the default value of the flag, to be used if the value is not + :param key: the unique key for the feature flag + :param user: a dictionary containing parameters for the end user requesting the flag + :param default: the default value of the flag, to be used if the value is not available from LaunchDarkly :return: one of the flag's variation values, or the default value """ return self._evaluate_internal(key, user, default, self._event_factory_default).value - def variation_detail(self, key, user, default): + def variation_detail(self, key: str, user: dict, default: Any) -> EvaluationDetail: """Determines the variation of a feature flag for a user, like :func:`variation()`, but also provides additional information about how this value was calculated, in the form of an :class:`ldclient.flag.EvaluationDetail` object. @@ -266,12 +264,11 @@ def variation_detail(self, key, user, default): Calling this method also causes the "reason" data to be included in analytics events, if you are capturing detailed event data for this flag. - :param string key: the unique key for the feature flag - :param dict user: a dictionary containing parameters for the end user requesting the flag - :param object default: the default value of the flag, to be used if the value is not + :param key: the unique key for the feature flag + :param user: a dictionary containing parameters for the end user requesting the flag + :param default: the default value of the flag, to be used if the value is not available from LaunchDarkly :return: an object describing the result - :rtype: EvaluationDetail """ return self._evaluate_internal(key, user, default, self._event_factory_with_reasons) @@ -328,23 +325,22 @@ def _evaluate_internal(self, key, user, default, event_factory): self._send_event(event_factory.new_default_event(flag, user, default, reason)) return EvaluationDetail(default, None, reason) - def all_flags(self, user): + def all_flags(self, user: dict) -> Optional[dict]: """Returns all feature flag values for the given user. This method is deprecated - please use :func:`all_flags_state()` instead. Current versions of the client-side SDK will not generate analytics events correctly if you pass the result of ``all_flags``. - :param dict user: the end user requesting the feature flags + :param user: the end user requesting the feature flags :return: a dictionary of feature flag keys to values; returns None if the client is offline, has not been initialized, or the user is None or has no key - :rtype: dict """ state = self.all_flags_state(user) if not state.valid: return None return state.to_values_map() - def all_flags_state(self, user, **kwargs): + def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: """Returns an object that encapsulates the state of all feature flags for a given user, including the flag values and also metadata that can be used on the front end. See the JavaScript SDK Reference Guide on @@ -352,7 +348,7 @@ def all_flags_state(self, user, **kwargs): This method does not send analytics events back to LaunchDarkly. - :param dict user: the end user requesting the feature flags + :param user: the end user requesting the feature flags :param kwargs: optional parameters affecting how the state is computed - see below :Keyword Arguments: @@ -368,7 +364,6 @@ def all_flags_state(self, user, **kwargs): :return: a FeatureFlagsState object (will never be None; its ``valid`` property will be False if the client is offline, has not been initialized, or the user is None or has no key) - :rtype: FeatureFlagsState """ if self._config.offline: log.warning("all_flags_state() called, but client is in offline mode. Returning empty state") @@ -412,20 +407,20 @@ def all_flags_state(self, user, **kwargs): return state - def secure_mode_hash(self, user): + def secure_mode_hash(self, user: dict) -> str: """Computes an HMAC signature of a user signed with the client's SDK key, for use with the JavaScript SDK. For more information, see the JavaScript SDK Reference Guide on `Secure mode `_. - - :param dict user: the attributes of the user + + :param user: the attributes of the user :return: a hash string that can be passed to the front end - :rtype: string """ - if user.get('key') is None or self._config.sdk_key is None: + key = user.get('key') + if key is None or self._config.sdk_key is None: return "" - return hmac.new(self._config.sdk_key.encode(), user.get('key').encode(), hashlib.sha256).hexdigest() + return hmac.new(self._config.sdk_key.encode(), key.encode(), hashlib.sha256).hexdigest() __all__ = ['LDClient', 'Config'] diff --git a/ldclient/config.py b/ldclient/config.py index a14505f7..5a0d25e2 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -4,8 +4,11 @@ Note that the same class can also be imported from the ``ldclient.client`` submodule. """ +from typing import Optional, Callable, List, Any, Set + from ldclient.feature_store import InMemoryFeatureStore from ldclient.util import log +from ldclient.interfaces import EventProcessor, FeatureStore, UpdateProcessor, FeatureRequester GET_LATEST_FEATURES_PATH = '/sdk/latest-flags' STREAM_FLAGS_PATH = '/flags' @@ -23,26 +26,25 @@ class HTTPConfig: corresponding `Config` properties will be ignored. """ def __init__(self, - connect_timeout=10, - read_timeout=15, - http_proxy=None, - ca_certs=None, - cert_file=None, - disable_ssl_verification=False): + connect_timeout: float=10, + read_timeout: float=15, + http_proxy: Optional[str]=None, + ca_certs: Optional[str]=None, + cert_file: Optional[str]=None, + disable_ssl_verification: bool=False): """ - :param float connect_timeout: The connect timeout for network connections in seconds. - :param float read_timeout: The read timeout for network connections in seconds. + :param connect_timeout: The connect timeout for network connections in seconds. + :param read_timeout: The read timeout for network connections in seconds. :param http_proxy: Use a proxy when connecting to LaunchDarkly. This is the full URI of the proxy; for example: http://my-proxy.com:1234. Note that unlike the standard `http_proxy` environment variable, this is used regardless of whether the target URI is HTTP or HTTPS (the actual LaunchDarkly service uses HTTPS, but a Relay Proxy instance could use HTTP). Setting this Config parameter will override any proxy specified by an environment variable, but only for LaunchDarkly SDK connections. - The URL may contain authentication parameters in the form http://username:password@host:port. - :param string ca_certs: If using a custom certificate authority, set this to the file path of the + :param ca_certs: If using a custom certificate authority, set this to the file path of the certificate bundle. - :param string cert_file: If using a custom client certificate, set this to the file path of the + :param cert_file: If using a custom client certificate, set this to the file path of the certificate. - :param bool disable_ssl_verification: If true, completely disables SSL verification and certificate + :param disable_ssl_verification: If true, completely disables SSL verification and certificate verification for secure requests. This is unsafe and should not be used in a production environment; instead, use a self-signed certificate and set `ca_certs`. """ @@ -54,27 +56,27 @@ def __init__(self, self.__disable_ssl_verification = disable_ssl_verification @property - def connect_timeout(self): + def connect_timeout(self) -> float: return self.__connect_timeout @property - def read_timeout(self): + def read_timeout(self) -> float: return self.__read_timeout @property - def http_proxy(self): + def http_proxy(self) -> Optional[str]: return self.__http_proxy @property - def ca_certs(self): + def ca_certs(self) -> Optional[str]: return self.__ca_certs @property - def cert_file(self): + def cert_file(self) -> Optional[str]: return self.__cert_file @property - def disable_ssl_verification(self): + def disable_ssl_verification(self) -> bool: return self.__disable_ssl_verification class Config: @@ -84,128 +86,119 @@ class Config: if you are using the singleton client, or the :class:`ldclient.client.LDClient` constructor otherwise. """ def __init__(self, - sdk_key=None, - base_uri='https://app.launchdarkly.com', - events_uri='https://events.launchdarkly.com', - connect_timeout=10, - read_timeout=15, - events_max_pending=10000, - flush_interval=5, - stream_uri='https://stream.launchdarkly.com', - stream=True, - initial_reconnect_delay=1, - verify_ssl=True, - defaults=None, - send_events=None, - events_enabled=True, - update_processor_class=None, - poll_interval=30, - use_ldd=False, - feature_store=None, + sdk_key: Optional[str]=None, + base_uri: str='https://app.launchdarkly.com', + events_uri: str='https://events.launchdarkly.com', + connect_timeout=10, # deprecated + read_timeout=15, # deprecated + events_max_pending: int=10000, + flush_interval: float=5, + stream_uri: str='https://stream.launchdarkly.com', + stream: bool=True, + initial_reconnect_delay: float=1, + verify_ssl=True, # deprecated + defaults: dict={}, + send_events: Optional[bool]=None, + events_enabled: bool=True, + update_processor_class: Callable[[str, 'Config', FeatureStore], UpdateProcessor]=None, + poll_interval: float=30, + use_ldd: bool=False, + feature_store: Optional[FeatureStore]=None, feature_requester_class=None, - event_processor_class=None, - private_attribute_names=(), - all_attributes_private=False, - offline=False, - user_keys_capacity=1000, - user_keys_flush_interval=300, - inline_users_in_events=False, - http_proxy=None, - diagnostic_opt_out=False, - diagnostic_recording_interval=900, - wrapper_name=None, - wrapper_version=None, - http=None): + event_processor_class: Callable[['Config'], EventProcessor]=None, + private_attribute_names: Set[str]=set(), + all_attributes_private: bool=False, + offline: bool=False, + user_keys_capacity: int=1000, + user_keys_flush_interval: float=300, + inline_users_in_events: bool=False, + http_proxy=None, # deprecated + diagnostic_opt_out: bool=False, + diagnostic_recording_interval: int=900, + wrapper_name: Optional[str]=None, + wrapper_version: Optional[str]=None, + http: Optional[HTTPConfig]=None): """ - :param string sdk_key: The SDK key for your LaunchDarkly account. - :param string base_uri: The base URL for the LaunchDarkly server. Most users should use the default + :param sdk_key: The SDK key for your LaunchDarkly account. + :param base_uri: The base URL for the LaunchDarkly server. Most users should use the default value. - :param string events_uri: The URL for the LaunchDarkly events server. Most users should use the + :param events_uri: The URL for the LaunchDarkly events server. Most users should use the default value. - :param float connect_timeout: Deprecated; use `http` instead and specify the `connect_timeout` as + :param connect_timeout: Deprecated; use `http` instead and specify the `connect_timeout` as part of :class:`HTTPConfig`. - :param float read_timeout: Deprecated; use `http` instead and specify the `read_timeout` as + :param read_timeout: Deprecated; use `http` instead and specify the `read_timeout` as part of :class:`HTTPConfig`. - :param int events_upload_max_batch_size: The maximum number of analytics events that the client will - send at once. - :param int events_max_pending: The capacity of the events buffer. The client buffers up to this many + :param events_max_pending: The capacity of the events buffer. The client buffers up to this many events in memory before flushing. If the capacity is exceeded before the buffer is flushed, events will be discarded. - :param float flush_interval: The number of seconds in between flushes of the events buffer. Decreasing + :param flush_interval: The number of seconds in between flushes of the events buffer. Decreasing the flush interval means that the event buffer is less likely to reach capacity. - :param string stream_uri: The URL for the LaunchDarkly streaming events server. Most users should + :param stream_uri: The URL for the LaunchDarkly streaming events server. Most users should use the default value. - :param bool stream: Whether or not the streaming API should be used to receive flag updates. By + :param stream: Whether or not the streaming API should be used to receive flag updates. By default, it is enabled. Streaming should only be disabled on the advice of LaunchDarkly support. - :param float initial_reconnect_delay: The initial reconnect delay (in seconds) for the streaming + :param initial_reconnect_delay: The initial reconnect delay (in seconds) for the streaming connection. The streaming service uses a backoff algorithm (with jitter) every time the connection needs to be reestablished. The delay for the first reconnection will start near this value, and then increase exponentially for any subsequent connection failures. - :param bool verify_ssl: Deprecated; use `http` instead and specify `disable_ssl_verification` as + :param verify_ssl: Deprecated; use `http` instead and specify `disable_ssl_verification` as part of :class:`HTTPConfig` if you want to turn off SSL verification (not recommended). - :param bool send_events: Whether or not to send events back to LaunchDarkly. This differs from + :param send_events: Whether or not to send events back to LaunchDarkly. This differs from `offline` in that it affects only the sending of client-side events, not streaming or polling for events from the server. By default, events will be sent. - :param bool events_enabled: Obsolete name for `send_events`. - :param bool offline: Whether the client should be initialized in offline mode. In offline mode, + :param events_enabled: Obsolete name for `send_events`. + :param offline: Whether the client should be initialized in offline mode. In offline mode, default values are returned for all flags and no remote network requests are made. By default, this is false. - :type update_processor_class: (str, ldclient.config.Config, FeatureStore) -> UpdateProcessor - :param float poll_interval: The number of seconds between polls for flag updates if streaming is off. - :param bool use_ldd: Whether you are using the LaunchDarkly relay proxy in daemon mode. In this + :param poll_interval: The number of seconds between polls for flag updates if streaming is off. + :param use_ldd: Whether you are using the LaunchDarkly relay proxy in daemon mode. In this configuration, the client will not use a streaming connection to listen for updates, but instead will get feature state from a Redis instance. The `stream` and `poll_interval` options will be ignored if this option is set to true. By default, this is false. :param array private_attribute_names: Marks a set of attribute names private. Any users sent to LaunchDarkly with this configuration active will have attributes with these names removed. - :param bool all_attributes_private: If true, all user attributes (other than the key) will be + :param all_attributes_private: If true, all user attributes (other than the key) will be private, not just the attributes specified in `private_attribute_names`. :param feature_store: A FeatureStore implementation - :type feature_store: FeatureStore - :param int user_keys_capacity: The number of user keys that the event processor can remember at any + :param user_keys_capacity: The number of user keys that the event processor can remember at any one time, so that duplicate user details will not be sent in analytics events. - :param float user_keys_flush_interval: The interval in seconds at which the event processor will + :param user_keys_flush_interval: The interval in seconds at which the event processor will reset its set of known user keys. - :param bool inline_users_in_events: Whether to include full user details in every analytics event. + :param inline_users_in_events: Whether to include full user details in every analytics event. By default, events will only include the user key, except for one "index" event that provides the full details for the user. :param feature_requester_class: A factory for a FeatureRequester implementation taking the sdk key and config - :type feature_requester_class: (str, ldclient.config.Config, FeatureStore) -> FeatureRequester :param event_processor_class: A factory for an EventProcessor implementation taking the config - :type event_processor_class: (ldclient.config.Config) -> EventProcessor :param update_processor_class: A factory for an UpdateProcessor implementation taking the sdk key, config, and FeatureStore implementation :param http_proxy: Deprecated; use `http` instead and specify the `http_proxy` as part of :class:`HTTPConfig`. - :param bool diagnostic_opt_out: Unless this field is set to True, the client will send + :param diagnostic_opt_out: Unless this field is set to True, the client will send some diagnostics data to the LaunchDarkly servers in order to assist in the development of future SDK improvements. These diagnostics consist of an initial payload containing some details of SDK in use, the SDK's configuration, and the platform the SDK is being run on, as well as periodic information on irregular occurrences such as dropped events. - :param int diagnostic_recording_interval: The interval in seconds at which periodic diagnostic data is + :param diagnostic_recording_interval: The interval in seconds at which periodic diagnostic data is sent. The default is 900 seconds (every 15 minutes) and the minimum value is 60 seconds. - :param string wrapper_name: For use by wrapper libraries to set an identifying name for the wrapper + :param wrapper_name: For use by wrapper libraries to set an identifying name for the wrapper being used. This will be sent in HTTP headers during requests to the LaunchDarkly servers to allow recording metrics on the usage of these wrapper libraries. - :param string wrapper_version: For use by wrapper libraries to report the version of the library in + :param wrapper_version: For use by wrapper libraries to report the version of the library in use. If `wrapper_name` is not set, this field will be ignored. Otherwise the version string will be included in the HTTP headers along with the `wrapper_name` during requests to the LaunchDarkly servers. - :param HTTPConfig http: Optional properties for customizing the client's HTTP/HTTPS behavior. See + :param http: Optional properties for customizing the client's HTTP/HTTPS behavior. See :class:`HTTPConfig`. """ self.__sdk_key = sdk_key - if defaults is None: - defaults = {} - self.__base_uri = base_uri.rstrip('\\') self.__events_uri = events_uri.rstrip('\\') self.__stream_uri = stream_uri.rstrip('\\') self.__update_processor_class = update_processor_class self.__stream = stream self.__initial_reconnect_delay = initial_reconnect_delay - self.__poll_interval = max(poll_interval, 30) + self.__poll_interval = max(poll_interval, 30.0) self.__use_ldd = use_ldd self.__feature_store = InMemoryFeatureStore() if not feature_store else feature_store self.__event_processor_class = event_processor_class @@ -233,18 +226,15 @@ def __init__(self, self.__http = http @classmethod - def default(cls): + def default(cls) -> 'Config': """Returns a ``Config`` instance with default values for all properties. - - :rtype: ldclient.config.Config """ return cls() - def copy_with_new_sdk_key(self, new_sdk_key): + def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': """Returns a new ``Config`` instance that is the same as this one, except for having a different SDK key. - :param string new_sdk_key: the new SDK key - :rtype: ldclient.config.Config + :param new_sdk_key: the new SDK key """ return Config(sdk_key=new_sdk_key, base_uri=self.__base_uri, @@ -282,11 +272,11 @@ def get_default(self, key, default): return default if key not in self.__defaults else self.__defaults[key] @property - def sdk_key(self): + def sdk_key(self) -> Optional[str]: return self.__sdk_key @property - def base_uri(self): + def base_uri(self) -> str: return self.__base_uri # for internal use only - also no longer used, will remove @@ -315,86 +305,86 @@ def stream_uri(self): return self.__stream_uri + STREAM_FLAGS_PATH @property - def update_processor_class(self): + def update_processor_class(self) -> Optional[Callable[[str, 'Config', FeatureStore], UpdateProcessor]]: return self.__update_processor_class @property - def stream(self): + def stream(self) -> bool: return self.__stream @property - def initial_reconnect_delay(self): + def initial_reconnect_delay(self) -> float: return self.__initial_reconnect_delay @property - def poll_interval(self): + def poll_interval(self) -> float: return self.__poll_interval @property - def use_ldd(self): + def use_ldd(self) -> bool: return self.__use_ldd @property - def feature_store(self): + def feature_store(self) -> FeatureStore: return self.__feature_store @property - def event_processor_class(self): + def event_processor_class(self) -> Optional[Callable[['Config'], EventProcessor]]: return self.__event_processor_class @property - def feature_requester_class(self): + def feature_requester_class(self) -> Callable: return self.__feature_requester_class @property - def connect_timeout(self): + def connect_timeout(self) -> float: return self.__connect_timeout @property - def read_timeout(self): + def read_timeout(self) -> float: return self.__read_timeout @property - def events_enabled(self): + def events_enabled(self) -> bool: return self.__send_events @property - def send_events(self): + def send_events(self) -> bool: return self.__send_events @property - def events_max_pending(self): + def events_max_pending(self) -> int: return self.__events_max_pending @property - def flush_interval(self): + def flush_interval(self) -> float: return self.__flush_interval @property - def verify_ssl(self): + def verify_ssl(self) -> bool: return self.__verify_ssl @property - def private_attribute_names(self): + def private_attribute_names(self) -> list: return list(self.__private_attribute_names) @property - def all_attributes_private(self): + def all_attributes_private(self) -> bool: return self.__all_attributes_private @property - def offline(self): + def offline(self) -> bool: return self.__offline @property - def user_keys_capacity(self): + def user_keys_capacity(self) -> int: return self.__user_keys_capacity @property - def user_keys_flush_interval(self): + def user_keys_flush_interval(self) -> float: return self.__user_keys_flush_interval @property - def inline_users_in_events(self): + def inline_users_in_events(self) -> bool: return self.__inline_users_in_events @property @@ -402,23 +392,23 @@ def http_proxy(self): return self.__http_proxy @property - def diagnostic_opt_out(self): + def diagnostic_opt_out(self) -> bool: return self.__diagnostic_opt_out @property - def diagnostic_recording_interval(self): + def diagnostic_recording_interval(self) -> int: return self.__diagnostic_recording_interval @property - def wrapper_name(self): + def wrapper_name(self) -> Optional[str]: return self.__wrapper_name @property - def wrapper_version(self): + def wrapper_version(self) -> Optional[str]: return self.__wrapper_version @property - def http(self): + def http(self) -> HTTPConfig: if self.__http is None: return HTTPConfig( connect_timeout=self.__connect_timeout, diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index df443510..dcac232d 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -6,70 +6,63 @@ storage systems; those are in :class:`ldclient.integrations`. """ +from typing import Callable, Any + from collections import OrderedDict, defaultdict from ldclient.util import log from ldclient.interfaces import DiagnosticDescription, FeatureStore from ldclient.rwlock import ReadWriteLock +from ldclient.versioned_data_kind import VersionedDataKind class CacheConfig: """Encapsulates caching parameters for feature store implementations that support local caching. """ - DEFAULT_EXPIRATION = 15 + DEFAULT_EXPIRATION = 15.0 DEFAULT_CAPACITY = 1000 def __init__(self, - expiration = DEFAULT_EXPIRATION, - capacity = DEFAULT_CAPACITY): + expiration: float = DEFAULT_EXPIRATION, + capacity: int = DEFAULT_CAPACITY): """Constructs an instance of CacheConfig. - :param float expiration: the cache TTL, in seconds. Items will be evicted from the cache after + :param expiration: the cache TTL, in seconds. Items will be evicted from the cache after this amount of time from the time when they were originally cached. If the time is less than or equal to zero, caching is disabled. - :param int capacity: the maximum number of items that can be in the cache at a time + :param capacity: the maximum number of items that can be in the cache at a time """ self._expiration = expiration self._capacity = capacity @staticmethod - def default(): + def default() -> 'CacheConfig': """Returns an instance of CacheConfig with default properties. By default, caching is enabled. This is the same as calling the constructor with no parameters. - - :rtype: ldclient.feature_store.CacheConfig """ return CacheConfig() @staticmethod - def disabled(): + def disabled() -> 'CacheConfig': """Returns an instance of CacheConfig specifying that caching should be disabled. - - :rtype: ldclient.feature_store.CacheConfig """ return CacheConfig(expiration = 0) @property - def enabled(self): + def enabled(self) -> bool: """Returns True if caching is enabled in this configuration. - - :rtype: bool """ return self._expiration > 0 @property - def expiration(self): + def expiration(self) -> float: """Returns the configured cache TTL, in seconds. - - :rtype: float """ return self._expiration @property - def capacity(self): + def capacity(self) -> int: """Returns the configured maximum number of cacheable items. - - :rtype: int """ return self._capacity @@ -85,7 +78,7 @@ def __init__(self): self._initialized = False self._items = defaultdict(dict) - def get(self, kind, key, callback): + def get(self, kind: VersionedDataKind, key: str, callback: Callable[[Any], Any]=lambda x: x) -> Any: """ """ try: @@ -126,7 +119,7 @@ def init(self, all_data): self._lock.runlock() # noinspection PyShadowingNames - def delete(self, kind, key, version): + def delete(self, kind, key: str, version: int): """ """ try: @@ -154,7 +147,7 @@ def upsert(self, kind, item): self._lock.runlock() @property - def initialized(self): + def initialized(self) -> bool: """ """ try: diff --git a/ldclient/feature_store_helpers.py b/ldclient/feature_store_helpers.py index 0f371f7b..1904f59d 100644 --- a/ldclient/feature_store_helpers.py +++ b/ldclient/feature_store_helpers.py @@ -2,10 +2,12 @@ This submodule contains support code for writing feature store implementations. """ +from typing import Dict, Mapping, Any from expiringdict import ExpiringDict -from ldclient.interfaces import DiagnosticDescription, FeatureStore - +from ldclient.interfaces import DiagnosticDescription, FeatureStore, FeatureStoreCore +from ldclient.versioned_data_kind import VersionedDataKind +from ldclient.feature_store import CacheConfig class CachingStoreWrapper(DiagnosticDescription, FeatureStore): """A partial implementation of :class:`ldclient.interfaces.FeatureStore`. @@ -17,11 +19,11 @@ class CachingStoreWrapper(DiagnosticDescription, FeatureStore): """ __INITED_CACHE_KEY__ = "$inited" - def __init__(self, core, cache_config): + def __init__(self, core: FeatureStoreCore, cache_config: CacheConfig): """Constructs an instance by wrapping a core implementation object. - :param FeatureStoreCore core: the implementation object - :param ldclient.feature_store.CacheConfig cache_config: the caching parameters + :param core: the implementation object + :param cache_config: the caching parameters """ self._core = core if cache_config.enabled: @@ -30,7 +32,7 @@ def __init__(self, core, cache_config): self._cache = None self._inited = False - def init(self, all_data): + def init(self, all_data: Mapping[VersionedDataKind, Mapping[str, Dict[Any, Any]]]): """ """ self._core.init_internal(all_data) @@ -84,7 +86,7 @@ def upsert(self, kind, item): self._cache.pop(self._all_cache_key(kind), None) @property - def initialized(self): + def initialized(self) -> bool: """ """ if self._inited: diff --git a/ldclient/flag.py b/ldclient/flag.py index dbf63b45..4c279f93 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -6,6 +6,7 @@ import hashlib import logging +from typing import Optional, List, Any import sys from ldclient import operators @@ -29,7 +30,7 @@ class EvaluationDetail: The return type of :func:`ldclient.client.LDClient.variation_detail()`, combining the result of a flag evaluation with information about how it was calculated. """ - def __init__(self, value, variation_index, reason): + def __init__(self, value: object, variation_index: Optional[int], reason: dict): """Constructs an instance. """ self.__value = value @@ -37,7 +38,7 @@ def __init__(self, value, variation_index, reason): self.__reason = reason @property - def value(self): + def value(self) -> object: """The result of the flag evaluation. This will be either one of the flag's variations or the default value that was passed to the :func:`ldclient.client.LDClient.variation_detail()` method. @@ -45,16 +46,14 @@ def value(self): return self.__value @property - def variation_index(self): + def variation_index(self) -> Optional[int]: """The index of the returned value within the flag's list of variations, e.g. 0 for the first variation -- or None if the default value was returned. - - :rtype: int or None """ return self.__variation_index @property - def reason(self): + def reason(self) -> dict: """A dictionary describing the main factor that influenced the flag evaluation value. It contains the following properties: @@ -76,42 +75,39 @@ def reason(self): * ``errorKind``: further describes the nature of the error if the kind was ``ERROR``, e.g. ``"FLAG_NOT_FOUND"`` - - :rtype: dict """ return self.__reason - def is_default_value(self): + def is_default_value(self) -> bool: + """Returns True if the flag evaluated to the default value rather than one of its variations. - - :rtype: bool """ return self.__variation_index is None - def __eq__(self, other): + def __eq__(self, other) -> bool: return self.value == other.value and self.variation_index == other.variation_index and self.reason == other.reason - def __ne__(self, other): + def __ne__(self, other) -> bool: return not self.__eq__(other) - def __str__(self): + def __str__(self) -> str: return "(value=%s, variation_index=%s, reason=%s)" % (self.value, self.variation_index, self.reason) - def __repr__(self): + def __repr__(self) -> str: return self.__str__() EvalResult = namedtuple('EvalResult', ['detail', 'events']) -def error_reason(error_kind): +def error_reason(error_kind: str) -> dict: return {'kind': 'ERROR', 'errorKind': error_kind} -def evaluate(flag, user, store, event_factory): +def evaluate(flag, user, store, event_factory) -> EvalResult: sanitized_user = stringify_attrs(user, __USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__) - prereq_events = [] + prereq_events = [] # type: List[Any] detail = _evaluate(flag, sanitized_user, store, prereq_events, event_factory) return EvalResult(detail = detail, events = prereq_events) diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index 4ea41aaa..547a5d16 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -1,6 +1,7 @@ """ This submodule contains a helper class for feature flag evaluation. """ +from typing import Optional, Dict, Any import json import time @@ -13,9 +14,9 @@ class FeatureFlagsState: appropriate data structure for bootstrapping the LaunchDarkly JavaScript client. See the JavaScript SDK Reference Guide on `Bootstrapping `_. """ - def __init__(self, valid): - self.__flag_values = {} - self.__flag_metadata = {} + def __init__(self, valid: bool): + self.__flag_values = {} # type: Dict[str, Any] + self.__flag_metadata = {} # type: Dict[str, Any] self.__valid = valid # Used internally to build the state map @@ -41,64 +42,56 @@ def add_flag(self, flag, value, variation, reason, details_only_if_tracked): self.__flag_metadata[key] = meta @property - def valid(self): + def valid(self) -> bool: """True if this object contains a valid snapshot of feature flag state, or False if the state could not be computed (for instance, because the client was offline or there was no user). - - :rtype: bool """ return self.__valid - def get_flag_value(self, key): + + def get_flag_value(self, key: str) -> object: """Returns the value of an individual feature flag at the time the state was recorded. - :param string key: the feature flag key + :param key: the feature flag key :return: the flag's value; None if the flag returned the default value, or if there was no such flag """ return self.__flag_values.get(key) - def get_flag_reason(self, key): + def get_flag_reason(self, key: str) -> Optional[dict]: """Returns the evaluation reason for an individual feature flag at the time the state was recorded. - :param string key: the feature flag key + :param key: the feature flag key :return: a dictionary describing the reason; None if reasons were not recorded, or if there was no such flag - :rtype: dict or None """ meta = self.__flag_metadata.get(key) return None if meta is None else meta.get('reason') - def to_values_map(self): + def to_values_map(self) -> dict: """Returns a dictionary of flag keys to flag values. If the flag would have evaluated to the default value, its value will be None. Do not use this method if you are passing data to the front end to "bootstrap" the JavaScript client. Instead, use :func:`to_json_dict()`. - - :rtype: dict """ return self.__flag_values - def to_json_dict(self): + def to_json_dict(self) -> dict: """Returns a dictionary suitable for passing as JSON, in the format used by the LaunchDarkly JavaScript SDK. Use this method if you are passing data to the front end in order to "bootstrap" the JavaScript client. - - :rtype: dict """ ret = self.__flag_values.copy() ret['$flagsState'] = self.__flag_metadata ret['$valid'] = self.__valid return ret - def to_json_string(self): + def to_json_string(self) -> str: """Same as to_json_dict, but serializes the JSON structure into a string. - - :rtype: string """ return json.dumps(self.to_json_dict()) - def __getstate__(self): + def __getstate__(self) -> dict: """Equivalent to to_json_dict() - used if you are serializing the object with jsonpickle. """ return self.to_json_dict() diff --git a/ldclient/impl/__init__.py b/ldclient/impl/__init__.py index e69de29b..d70df6c0 100644 --- a/ldclient/impl/__init__.py +++ b/ldclient/impl/__init__.py @@ -0,0 +1,3 @@ +from typing import TypeVar + +AnyNum = TypeVar('AnyNum', int, float, complex) diff --git a/ldclient/integrations.py b/ldclient/integrations.py index 15816f72..e0f0050c 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -10,6 +10,7 @@ from ldclient.impl.integrations.files.file_data_source import _FileDataSource from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore +from typing import List, Callable, Mapping, Any class Consul: """Provides factory methods for integrations between the LaunchDarkly SDK and Consul. @@ -19,11 +20,11 @@ class Consul: DEFAULT_PREFIX = "launchdarkly" @staticmethod - def new_feature_store(host=None, - port=None, - prefix=None, - consul_opts=None, - caching=CacheConfig.default()): + def new_feature_store(host: str=None, + port: int=None, + prefix: str=None, + consul_opts: dict=None, + caching: CacheConfig=CacheConfig.default()) -> CachingStoreWrapper: """Creates a Consul-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -40,13 +41,13 @@ def new_feature_store(host=None, Note that ``python-consul`` is not available for Python 3.3 or 3.4, so this feature cannot be used in those Python versions. - :param string host: hostname of the Consul server (uses ``localhost`` if omitted) - :param int port: port of the Consul server (uses 8500 if omitted) - :param string prefix: a namespace prefix to be prepended to all Consul keys - :param dict consul_opts: optional parameters for configuring the Consul client, if you need + :param host: hostname of the Consul server (uses ``localhost`` if omitted) + :param port: port of the Consul server (uses 8500 if omitted) + :param prefix: a namespace prefix to be prepended to all Consul keys + :param consul_opts: optional parameters for configuring the Consul client, if you need to set any of them besides host and port, as defined in the `python-consul API `_ - :param CacheConfig caching: specifies whether local caching should be enabled and if so, + :param caching: specifies whether local caching should be enabled and if so, sets the cache properties; defaults to :func:`ldclient.feature_store.CacheConfig.default()` """ core = _ConsulFeatureStoreCore(host, port, prefix, consul_opts) @@ -58,10 +59,10 @@ class DynamoDB: """ @staticmethod - def new_feature_store(table_name, - prefix=None, - dynamodb_opts={}, - caching=CacheConfig.default()): + def new_feature_store(table_name: str, + prefix: str=None, + dynamodb_opts: Mapping[str, Any]={}, + caching: CacheConfig=CacheConfig.default()) -> CachingStoreWrapper: """Creates a DynamoDB-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -84,11 +85,11 @@ def new_feature_store(table_name, environment variables and/or local configuration files, as described in the AWS SDK documentation. You may also pass configuration settings in ``dynamodb_opts``. - :param string table_name: the name of an existing DynamoDB table - :param string prefix: an optional namespace prefix to be prepended to all DynamoDB keys - :param dict dynamodb_opts: optional parameters for configuring the DynamoDB client, as defined in + :param table_name: the name of an existing DynamoDB table + :param prefix: an optional namespace prefix to be prepended to all DynamoDB keys + :param dynamodb_opts: optional parameters for configuring the DynamoDB client, as defined in the `boto3 API `_ - :param CacheConfig caching: specifies whether local caching should be enabled and if so, + :param caching: specifies whether local caching should be enabled and if so, sets the cache properties; defaults to :func:`ldclient.feature_store.CacheConfig.default()` """ core = _DynamoDBFeatureStoreCore(table_name, prefix, dynamodb_opts) @@ -103,10 +104,10 @@ class Redis: DEFAULT_MAX_CONNECTIONS = 16 @staticmethod - def new_feature_store(url='redis://localhost:6379/0', - prefix='launchdarkly', - max_connections=16, - caching=CacheConfig.default()): + def new_feature_store(url: str='redis://localhost:6379/0', + prefix: str='launchdarkly', + max_connections: int=16, + caching: CacheConfig=CacheConfig.default()) -> CachingStoreWrapper: """Creates a Redis-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -120,17 +121,17 @@ def new_feature_store(url='redis://localhost:6379/0', store = Redis.new_feature_store() config = Config(feature_store=store) - :param string url: the URL of the Redis host; defaults to ``DEFAULT_URL`` - :param string prefix: a namespace prefix to be prepended to all Redis keys; defaults to + :param url: the URL of the Redis host; defaults to ``DEFAULT_URL`` + :param prefix: a namespace prefix to be prepended to all Redis keys; defaults to ``DEFAULT_PREFIX`` - :param int max_connections: the maximum number of Redis connections to keep in the + :param max_connections: the maximum number of Redis connections to keep in the connection pool; defaults to ``DEFAULT_MAX_CONNECTIONS`` - :param CacheConfig caching: specifies whether local caching should be enabled and if so, + :param caching: specifies whether local caching should be enabled and if so, sets the cache properties; defaults to :func:`ldclient.feature_store.CacheConfig.default()` """ core = _RedisFeatureStoreCore(url, prefix, max_connections) wrapper = CachingStoreWrapper(core, caching) - wrapper.core = core # exposed for testing + wrapper._core = core # exposed for testing return wrapper @@ -139,7 +140,10 @@ class Files: """ @staticmethod - def new_data_source(paths, auto_update=False, poll_interval=1, force_polling=False): + def new_data_source(paths: List[str], + auto_update: bool=False, + poll_interval: float=1, + force_polling: bool=False) -> object: """Provides a way to use local files as a source of feature flag state. This would typically be used in a test environment, to operate using a predetermined feature flag state without an actual LaunchDarkly connection. @@ -164,18 +168,18 @@ def new_data_source(paths, auto_update=False, poll_interval=1, force_polling=Fal If the data source encounters any error in any file-- malformed content, a missing file, or a duplicate key-- it will not load flags from any of the files. - :param array paths: the paths of the source files for loading flag data. These may be absolute paths + :param paths: the paths of the source files for loading flag data. These may be absolute paths or relative to the current working directory. Files will be parsed as JSON unless the ``pyyaml`` package is installed, in which case YAML is also allowed. - :param bool auto_update: (default: false) True if the data source should watch for changes to the source file(s) + :param auto_update: (default: false) True if the data source should watch for changes to the source file(s) and reload flags whenever there is a change. The default implementation of this feature is based on polling the filesystem, which may not perform well; if you install the ``watchdog`` package, its native file watching mechanism will be used instead. Note that auto-updating will only work if all of the files you specified have valid directory paths at startup time. - :param float poll_interval: (default: 1) the minimum interval, in seconds, between checks for file + :param poll_interval: (default: 1) the minimum interval, in seconds, between checks for file modifications-- used only if ``auto_update`` is true, and if the native file-watching mechanism from ``watchdog`` is not being used. - :param bool force_polling: (default: false) True if the data source should implement auto-update via + :param force_polling: (default: false) True if the data source should implement auto-update via polling the filesystem even if a native mechanism is available. This is mainly for SDK testing. :return: an object (actually a lambda) to be stored in the ``update_processor_class`` configuration property diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index ea3c9fbd..08919ed2 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -5,7 +5,8 @@ """ from abc import ABCMeta, abstractmethod, abstractproperty - +from .versioned_data_kind import VersionedDataKind +from typing import Mapping, Callable, Any class FeatureStore: """ @@ -25,79 +26,65 @@ class FeatureStore: __metaclass__ = ABCMeta @abstractmethod - def get(self, kind, key, callback=lambda x: x): + def get(self, kind: VersionedDataKind, key: str, callback: Callable[[Any], Any]=lambda x: x) -> Any: """ Retrieves the object to which the specified key is mapped, or None if the key is not found or the associated object has a ``deleted`` property of True. The retrieved object, if any (a dict) can be transformed by the specified callback. :param kind: The kind of object to get - :type kind: VersionedDataKind :param key: The key whose associated object is to be returned - :type key: str :param callback: A function that accepts the retrieved data and returns a transformed value - :type callback: function :return: The result of executing callback """ @abstractmethod - def all(self, kind, callback=lambda x: x): + def all(self, kind: VersionedDataKind, callback: Callable[[Any], Any]=lambda x: x) -> Any: """ Retrieves a dictionary of all associated objects of a given kind. The retrieved dict of keys to objects can be transformed by the specified callback. :param kind: The kind of objects to get - :type kind: VersionedDataKind :param callback: A function that accepts the retrieved data and returns a transformed value - :type callback: function - :rtype: The result of executing callback """ @abstractmethod - def init(self, all_data): + def init(self, all_data: Mapping[VersionedDataKind, Mapping[str, dict]]): """ Initializes (or re-initializes) the store with the specified set of objects. Any existing entries will be removed. Implementations can assume that this set of objects is up to date-- there is no need to perform individual version comparisons between the existing objects and the supplied data. :param all_data: All objects to be stored - :type all_data: dict[VersionedDataKind, dict[str, dict]] """ @abstractmethod - def delete(self, kind, key, version): + def delete(self, kind: VersionedDataKind, key: str, version: int): """ Deletes the object associated with the specified key, if it exists and its version is less than the specified version. The object should be replaced in the data store by a placeholder with the specified version and a "deleted" property of TErue. :param kind: The kind of object to delete - :type kind: VersionedDataKind :param key: The key of the object to be deleted - :type key: str :param version: The version for the delete operation - :type version: int """ @abstractmethod - def upsert(self, kind, item): + def upsert(self, kind: VersionedDataKind, item: dict): """ Updates or inserts the object associated with the specified key. If an item with the same key already exists, it should update it only if the new item's version property is greater than the old one. :param kind: The kind of object to update - :type kind: VersionedDataKind :param item: The object to update or insert - :type feature: dict """ @abstractproperty - def initialized(self): + def initialized(self) -> bool: """ Returns whether the store has been initialized yet or not - - :rtype: bool """ @@ -112,34 +99,29 @@ class FeatureStoreCore: __metaclass__ = ABCMeta @abstractmethod - def get_internal(self, kind, key): + def get_internal(self, kind: VersionedDataKind, key: str) -> dict: """ Returns the object to which the specified key is mapped, or None if no such item exists. The method should not attempt to filter out any items based on their deleted property, nor to cache any items. :param kind: The kind of object to get - :type kind: VersionedDataKind :param key: The key of the object - :type key: str :return: The object to which the specified key is mapped, or None - :rtype: dict """ @abstractmethod - def get_all_internal(self, callback): + def get_all_internal(self, callback) -> Mapping[str, dict]: """ Returns a dictionary of all associated objects of a given kind. The method should not attempt to filter out any items based on their deleted property, nor to cache any items. :param kind: The kind of objects to get - :type kind: VersionedDataKind :return: A dictionary of keys to items - :rtype: dict[str, dict] """ @abstractmethod - def init_internal(self, all_data): + def init_internal(self, all_data: Mapping[VersionedDataKind, Mapping[str, dict]]): """ Initializes (or re-initializes) the store with the specified set of objects. Any existing entries will be removed. Implementations can assume that this set of objects is up to date-- there is no @@ -147,11 +129,10 @@ def init_internal(self, all_data): data. :param all_data: A dictionary of data kinds to item collections - :type all_data: dict[VersionedDataKind, dict[str, dict]] """ @abstractmethod - def upsert_internal(self, kind, item): + def upsert_internal(self, kind: VersionedDataKind, item: dict) -> dict: """ Updates or inserts the object associated with the specified key. If an item with the same key already exists, it should update it only if the new item's version property is greater than @@ -161,22 +142,17 @@ def upsert_internal(self, kind, item): `CachingStoreWrapper` will update the cache correctly). :param kind: The kind of object to update - :type kind: VersionedDataKind :param item: The object to update or insert - :type item: dict :return: The state of the object after the update - :rtype: dict """ @abstractmethod - def initialized_internal(self): + def initialized_internal(self) -> bool: """ Returns true if this store has been initialized. In a shared data store, it should be able to detect this even if initInternal was called in a different process, i.e. the test should be based on looking at what is in the data store. The method does not need to worry about caching this value; `CachingStoreWrapper` will only call it when necessary. - - :rtype: bool """ @@ -197,10 +173,9 @@ def stop(self): pass # noinspection PyMethodMayBeStatic - def is_alive(self): + def is_alive(self) -> bool: """ Returns whether the operation is alive or not - :rtype: bool """ return True @@ -213,11 +188,9 @@ class UpdateProcessor(BackgroundOperation): """ __metaclass__ = ABCMeta - def initialized(self): + def initialized(self) -> bool: """ Returns whether the update processor has received feature flags and has initialized its feature store. - - :rtype: bool """ @@ -270,11 +243,10 @@ class DiagnosticDescription: """ @abstractmethod - def describe_configuration(self, config): + def describe_configuration(self, config) -> str: """ Used internally by the SDK to inspect the configuration. - :param ldclient.config.Config config: the full configuration, in case this component depends on properties outside itself + :param config: the full configuration, in case this component depends on properties outside itself :return: a string describing the type of the component, or None - :rtype: string """ pass diff --git a/ldclient/memoized_value.py b/ldclient/memoized_value.py index 3cf2dd22..d92b69ad 100644 --- a/ldclient/memoized_value.py +++ b/ldclient/memoized_value.py @@ -5,7 +5,7 @@ from threading import RLock -class MemoizedValue +class MemoizedValue: """Simple implementation of a thread-safe memoized value whose generator function will never be run more than once, and whose value can be overridden by explicit assignment. diff --git a/ldclient/operators.py b/ldclient/operators.py index bf083a06..39e8860c 100644 --- a/ldclient/operators.py +++ b/ldclient/operators.py @@ -67,17 +67,17 @@ def _time_operator(u, c, fn): def _parse_semver(input): try: - semver.parse(input) + semver.VersionInfo.parse(input) return input except ValueError as e: try: input = _add_zero_version_component(input) - semver.parse(input) + semver.VersionInfo.parse(input) return input except ValueError as e: try: input = _add_zero_version_component(input) - semver.parse(input) + semver.VersionInfo.parse(input) return input except ValueError as e: return None @@ -190,4 +190,4 @@ def dst(self, dt): "semVerGreaterThan": _semver_greater_than } -ops = defaultdict(lambda: False, ops) +ops = defaultdict(lambda: lambda l, r: False, ops) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..d345df18 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +ignore_missing_imports = true +python_version = 3.5 diff --git a/requirements.txt b/requirements.txt index 97dddee5..e80910cf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ certifi>=2018.4.16 expiringdict>=1.1.4,<1.2.0 pyRFC3339>=1.0 -semver>=2.7.9 +semver>=2.10.2 urllib3>=1.22.0 diff --git a/runtests.py b/runtests.py index 6fc85fe3..ec32fcc6 100644 --- a/runtests.py +++ b/runtests.py @@ -1,5 +1,5 @@ #! /usr/bin/env python - +# type: ignore # Hi There! # You may be wondering what this giant blob of binary data here is, you might # even be worried that we're up to something nefarious (good for you for being @@ -3021,7 +3021,6 @@ import base64 import zlib - class DictImporter: def __init__(self, sources): diff --git a/setup.py b/setup.py index e717a5b9..18ccade9 100644 --- a/setup.py +++ b/setup.py @@ -1,3 +1,4 @@ +# type: ignore from setuptools import find_packages, setup, Command import sys @@ -62,8 +63,6 @@ def run(self): 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.3', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', diff --git a/test-requirements.txt b/test-requirements.txt index bc5b43f2..d73c173e 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,6 +4,6 @@ redis>=2.10.5 boto3>=1.9.71 coverage>=4.4 jsonpickle==0.9.3 -pytest-capturelog>=0.7 pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 +pytest-mypy==0.7 \ No newline at end of file diff --git a/testing/http_util.py b/testing/http_util.py index 25ca7f52..de89aa30 100644 --- a/testing/http_util.py +++ b/testing/http_util.py @@ -76,7 +76,7 @@ def require_request(self): def wait_until_request_received(self): req = self.requests.get() self.requests.put(req) - + def should_have_requests(self, count): if self.requests.qsize() != count: rs = [] diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index a5c0a4c9..bb54bb50 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -5,6 +5,8 @@ import redis import time +from typing import List + # Consul is only supported in some Python versions have_consul = False try: @@ -21,8 +23,10 @@ skip_db_tests = os.environ.get('LD_SKIP_DATABASE_TESTS') == '1' +class Tester: + pass -class InMemoryTester: +class InMemoryTester(Tester): def init_store(self): return InMemoryFeatureStore() @@ -31,7 +35,7 @@ def supports_prefix(self): return False -class RedisTester: +class RedisTester(Tester): redis_host = 'localhost' redis_port = 6379 @@ -61,7 +65,7 @@ def supports_prefix(self): return True -class ConsulTester: +class ConsulTester(Tester): def __init__(self, cache_config): self._cache_config = cache_config @@ -80,7 +84,7 @@ def _clear_data(self, prefix): client.kv.delete(key) -class DynamoDBTester: +class DynamoDBTester(Tester): table_name = 'LD_DYNAMODB_TEST_TABLE' table_created = False options = { @@ -168,12 +172,13 @@ def _clear_data(self): class TestFeatureStore: + params = [] # type: List[Tester] if skip_db_tests: - params = [ + params += [ InMemoryTester() ] else: - params = [ + params += [ InMemoryTester(), RedisTester(CacheConfig.default()), RedisTester(CacheConfig.disabled()), diff --git a/testing/test_user_filter.py b/testing/test_user_filter.py index e1711ffb..ee61fce1 100644 --- a/testing/test_user_filter.py +++ b/testing/test_user_filter.py @@ -5,7 +5,7 @@ base_config = Config() config_with_all_attrs_private = Config(all_attributes_private = True) -config_with_some_attrs_private = Config(private_attribute_names=[u'firstName', u'bizzle']) +config_with_some_attrs_private = Config(private_attribute_names=set([u'firstName', u'bizzle'])) # users to serialize From 58b5bc302fe871b31bf11b28126f8b4133ad6d25 Mon Sep 17 00:00:00 2001 From: Elliot <35050275+Apache-HB@users.noreply.github.com> Date: Tue, 27 Oct 2020 12:14:26 -0700 Subject: [PATCH 205/356] remove all current deprecations (#139) * remove all currently deprecated classes, methods, arguments, and tests * also update semver usage to remove calls to deprecated functions and classes --- ldclient/client.py | 38 ++------------------- ldclient/config.py | 52 ++--------------------------- ldclient/diagnostics.py | 6 ++-- ldclient/event_processor.py | 2 +- ldclient/feature_requester.py | 2 +- ldclient/file_data_source.py | 25 -------------- ldclient/memoized_value.py | 31 ----------------- ldclient/operators.py | 14 ++++---- ldclient/redis_feature_store.py | 46 ------------------------- requirements.txt | 2 +- testing/proxy_test_util.py | 2 +- testing/test_diagnostics.py | 13 ++++---- testing/test_event_processor.py | 8 ++--- testing/test_feature_store.py | 22 +++--------- testing/test_ldclient_end_to_end.py | 2 +- testing/test_ldclient_evaluation.py | 18 +++++----- 16 files changed, 43 insertions(+), 240 deletions(-) delete mode 100644 ldclient/file_data_source.py delete mode 100644 ldclient/memoized_value.py delete mode 100644 ldclient/redis_feature_store.py diff --git a/ldclient/client.py b/ldclient/client.py index 4d8b9600..66d925f7 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -10,7 +10,7 @@ import threading import traceback -from ldclient.config import Config as Config +from ldclient.config import Config, HTTPConfig from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.event_processor import DefaultEventProcessor from ldclient.feature_requester import FeatureRequesterImpl @@ -69,24 +69,15 @@ class LDClient: Client instances are thread-safe. """ - def __init__(self, sdk_key: str=None, config: Config=None, start_wait: float=5): + def __init__(self, config: Config, start_wait: float=5): """Constructs a new LDClient instance. - :param sdk_key: the SDK key for your LaunchDarkly environment :param config: optional custom configuration :param start_wait: the number of seconds to wait for a successful connection to LaunchDarkly """ check_uwsgi() - if config is not None and config.sdk_key is not None and sdk_key is not None: - raise Exception("LaunchDarkly client init received both sdk_key and config with sdk_key. " - "Only one of either is expected") - - if sdk_key is not None: - log.warning("Deprecated sdk_key argument was passed to init. Use config object instead.") - self._config = Config(sdk_key=sdk_key) - else: - self._config = config or Config.default() + self._config = config self._config._validate() self._event_processor = None @@ -237,14 +228,6 @@ def flush(self): return return self._event_processor.flush() - def toggle(self, key, user, default): - """Deprecated synonym for :func:`variation()`. - - .. deprecated:: 2.0.0 - """ - log.warning("Deprecated method: toggle() called. Use variation() instead.") - return self.variation(key, user, default) - def variation(self, key: str, user: dict, default: Any) -> Any: """Determines the variation of a feature flag for a user. @@ -325,21 +308,6 @@ def _evaluate_internal(self, key, user, default, event_factory): self._send_event(event_factory.new_default_event(flag, user, default, reason)) return EvaluationDetail(default, None, reason) - def all_flags(self, user: dict) -> Optional[dict]: - """Returns all feature flag values for the given user. - - This method is deprecated - please use :func:`all_flags_state()` instead. Current versions of the - client-side SDK will not generate analytics events correctly if you pass the result of ``all_flags``. - - :param user: the end user requesting the feature flags - :return: a dictionary of feature flag keys to values; returns None if the client is offline, - has not been initialized, or the user is None or has no key - """ - state = self.all_flags_state(user) - if not state.valid: - return None - return state.to_values_map() - def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: """Returns an object that encapsulates the state of all feature flags for a given user, including the flag values and also metadata that can be used on the front end. See the diff --git a/ldclient/config.py b/ldclient/config.py index 5a0d25e2..c421610f 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -20,10 +20,6 @@ class HTTPConfig: This class groups together HTTP/HTTPS-related configuration properties that rarely need to be changed. If you need to set these, construct an `HTTPConfig` instance and pass it as the `http` parameter when you construct the main :class:`Config` for the SDK client. - - For some of these properties, :class:`Config` also has properties with the same names; the latter are - deprecated and will be removed in the future, and if you specify an `HTTPConfig` instance then the - corresponding `Config` properties will be ignored. """ def __init__(self, connect_timeout: float=10, @@ -89,18 +85,15 @@ def __init__(self, sdk_key: Optional[str]=None, base_uri: str='https://app.launchdarkly.com', events_uri: str='https://events.launchdarkly.com', - connect_timeout=10, # deprecated - read_timeout=15, # deprecated events_max_pending: int=10000, flush_interval: float=5, stream_uri: str='https://stream.launchdarkly.com', stream: bool=True, initial_reconnect_delay: float=1, - verify_ssl=True, # deprecated defaults: dict={}, send_events: Optional[bool]=None, events_enabled: bool=True, - update_processor_class: Callable[[str, 'Config', FeatureStore], UpdateProcessor]=None, + update_processor_class: Optional[Callable[[str, 'Config', FeatureStore], UpdateProcessor]]=None, poll_interval: float=30, use_ldd: bool=False, feature_store: Optional[FeatureStore]=None, @@ -112,22 +105,17 @@ def __init__(self, user_keys_capacity: int=1000, user_keys_flush_interval: float=300, inline_users_in_events: bool=False, - http_proxy=None, # deprecated diagnostic_opt_out: bool=False, diagnostic_recording_interval: int=900, wrapper_name: Optional[str]=None, wrapper_version: Optional[str]=None, - http: Optional[HTTPConfig]=None): + http: HTTPConfig=HTTPConfig()): """ :param sdk_key: The SDK key for your LaunchDarkly account. :param base_uri: The base URL for the LaunchDarkly server. Most users should use the default value. :param events_uri: The URL for the LaunchDarkly events server. Most users should use the default value. - :param connect_timeout: Deprecated; use `http` instead and specify the `connect_timeout` as - part of :class:`HTTPConfig`. - :param read_timeout: Deprecated; use `http` instead and specify the `read_timeout` as - part of :class:`HTTPConfig`. :param events_max_pending: The capacity of the events buffer. The client buffers up to this many events in memory before flushing. If the capacity is exceeded before the buffer is flushed, events will be discarded. @@ -141,8 +129,6 @@ def __init__(self, connection. The streaming service uses a backoff algorithm (with jitter) every time the connection needs to be reestablished. The delay for the first reconnection will start near this value, and then increase exponentially for any subsequent connection failures. - :param verify_ssl: Deprecated; use `http` instead and specify `disable_ssl_verification` as - part of :class:`HTTPConfig` if you want to turn off SSL verification (not recommended). :param send_events: Whether or not to send events back to LaunchDarkly. This differs from `offline` in that it affects only the sending of client-side events, not streaming or polling for events from the server. By default, events will be sent. @@ -171,8 +157,6 @@ def __init__(self, :param event_processor_class: A factory for an EventProcessor implementation taking the config :param update_processor_class: A factory for an UpdateProcessor implementation taking the sdk key, config, and FeatureStore implementation - :param http_proxy: Deprecated; use `http` instead and specify the `http_proxy` as part of - :class:`HTTPConfig`. :param diagnostic_opt_out: Unless this field is set to True, the client will send some diagnostics data to the LaunchDarkly servers in order to assist in the development of future SDK improvements. These diagnostics consist of an initial payload containing some details of SDK in use, @@ -203,11 +187,8 @@ def __init__(self, self.__feature_store = InMemoryFeatureStore() if not feature_store else feature_store self.__event_processor_class = event_processor_class self.__feature_requester_class = feature_requester_class - self.__connect_timeout = connect_timeout - self.__read_timeout = read_timeout self.__events_max_pending = events_max_pending self.__flush_interval = flush_interval - self.__verify_ssl = verify_ssl self.__defaults = defaults if offline is True: send_events = False @@ -218,7 +199,6 @@ def __init__(self, self.__user_keys_capacity = user_keys_capacity self.__user_keys_flush_interval = user_keys_flush_interval self.__inline_users_in_events = inline_users_in_events - self.__http_proxy = http_proxy self.__diagnostic_opt_out = diagnostic_opt_out self.__diagnostic_recording_interval = max(diagnostic_recording_interval, 60) self.__wrapper_name = wrapper_name @@ -239,14 +219,11 @@ def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': return Config(sdk_key=new_sdk_key, base_uri=self.__base_uri, events_uri=self.__events_uri, - connect_timeout=self.__connect_timeout, - read_timeout=self.__read_timeout, events_max_pending=self.__events_max_pending, flush_interval=self.__flush_interval, stream_uri=self.__stream_uri, stream=self.__stream, initial_reconnect_delay=self.__initial_reconnect_delay, - verify_ssl=self.__verify_ssl, defaults=self.__defaults, send_events=self.__send_events, update_processor_class=self.__update_processor_class, @@ -335,14 +312,6 @@ def event_processor_class(self) -> Optional[Callable[['Config'], EventProcessor] def feature_requester_class(self) -> Callable: return self.__feature_requester_class - @property - def connect_timeout(self) -> float: - return self.__connect_timeout - - @property - def read_timeout(self) -> float: - return self.__read_timeout - @property def events_enabled(self) -> bool: return self.__send_events @@ -359,10 +328,6 @@ def events_max_pending(self) -> int: def flush_interval(self) -> float: return self.__flush_interval - @property - def verify_ssl(self) -> bool: - return self.__verify_ssl - @property def private_attribute_names(self) -> list: return list(self.__private_attribute_names) @@ -387,10 +352,6 @@ def user_keys_flush_interval(self) -> float: def inline_users_in_events(self) -> bool: return self.__inline_users_in_events - @property - def http_proxy(self): - return self.__http_proxy - @property def diagnostic_opt_out(self) -> bool: return self.__diagnostic_opt_out @@ -409,15 +370,6 @@ def wrapper_version(self) -> Optional[str]: @property def http(self) -> HTTPConfig: - if self.__http is None: - return HTTPConfig( - connect_timeout=self.__connect_timeout, - read_timeout=self.__read_timeout, - http_proxy=self.__http_proxy, - ca_certs=None, - cert_file=None, - disable_ssl_verification=not self.__verify_ssl - ) return self.__http def _validate(self): diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index fc3486b5..055dfe06 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -67,10 +67,10 @@ def _create_diagnostic_config_object(config): 'customEventsURI': config.events_uri != default_config.events_uri, 'customStreamURI': config.stream_base_uri != default_config.stream_base_uri, 'eventsCapacity': config.events_max_pending, - 'connectTimeoutMillis': config.connect_timeout * 1000, - 'socketTimeoutMillis': config.read_timeout * 1000, + 'connectTimeoutMillis': config.http.connect_timeout * 1000, + 'socketTimeoutMillis': config.http.read_timeout * 1000, 'eventsFlushIntervalMillis': config.flush_interval * 1000, - 'usingProxy': config.http_proxy is not None, + 'usingProxy': config.http.http_proxy is not None, 'streamingDisabled': not config.stream, 'usingRelayDaemon': config.use_ldd, 'allAttributesPrivate': config.all_attributes_private, diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index de5f8107..6bdb7da9 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -477,7 +477,7 @@ def _post_events_with_retry( uri, headers=hdrs, body=body, - timeout=urllib3.Timeout(connect=config.connect_timeout, read=config.read_timeout), + timeout=urllib3.Timeout(connect=config.http.connect_timeout, read=config.http.read_timeout), retries=0 ) if r.status < 300: diff --git a/ldclient/feature_requester.py b/ldclient/feature_requester.py index 4557104f..b526a332 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/feature_requester.py @@ -37,7 +37,7 @@ def get_all_data(self): hdrs['If-None-Match'] = cache_entry.etag r = self._http.request('GET', uri, headers=hdrs, - timeout=urllib3.Timeout(connect=self._config.connect_timeout, read=self._config.read_timeout), + timeout=urllib3.Timeout(connect=self._config.http.connect_timeout, read=self._config.http.read_timeout), retries=1) throw_if_unsuccessful_response(r) if r.status == 304 and cache_entry is not None: diff --git a/ldclient/file_data_source.py b/ldclient/file_data_source.py deleted file mode 100644 index 56da8de8..00000000 --- a/ldclient/file_data_source.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Deprecated entry point for a component that has been moved. -""" -# currently excluded from documentation - see docs/README.md - -from ldclient.impl.integrations.files.file_data_source import _FileDataSource -from ldclient.interfaces import UpdateProcessor - -class FileDataSource(UpdateProcessor): - @classmethod - def factory(cls, **kwargs): - """Provides a way to use local files as a source of feature flag state. - - .. deprecated:: 6.8.0 - This module and this implementation class are deprecated and may be changed or removed in the future. - Please use :func:`ldclient.integrations.Files.new_data_source()`. - - The keyword arguments are the same as the arguments to :func:`ldclient.integrations.Files.new_data_source()`. - """ - - return lambda config, store, ready : _FileDataSource(store, ready, - paths=kwargs.get("paths"), - auto_update=kwargs.get("auto_update", False), - poll_interval=kwargs.get("poll_interval", 1), - force_polling=kwargs.get("force_polling", False)) diff --git a/ldclient/memoized_value.py b/ldclient/memoized_value.py deleted file mode 100644 index d92b69ad..00000000 --- a/ldclient/memoized_value.py +++ /dev/null @@ -1,31 +0,0 @@ -""" -Internal helper class for caching. No longer used. -""" -# currently excluded from documentation - see docs/README.md - -from threading import RLock - -class MemoizedValue: - """Simple implementation of a thread-safe memoized value whose generator function will never be - run more than once, and whose value can be overridden by explicit assignment. - - .. deprecated:: 6.7.0 - No longer used. Retained here only in case third parties were using it for another purpose. - """ - def __init__(self, generator): - self.generator = generator - self.inited = False - self.value = None - self.lock = RLock() - - def get(self): - with self.lock: - if not self.inited: - self.value = self.generator() - self.inited = True - return self.value - - def set(self, value): - with self.lock: - self.value = value - self.inited = True diff --git a/ldclient/operators.py b/ldclient/operators.py index 39e8860c..8bf95f86 100644 --- a/ldclient/operators.py +++ b/ldclient/operators.py @@ -5,7 +5,7 @@ import logging import re -import semver +from semver import VersionInfo import sys from datetime import tzinfo, timedelta, datetime from collections import defaultdict @@ -67,17 +67,17 @@ def _time_operator(u, c, fn): def _parse_semver(input): try: - semver.VersionInfo.parse(input) + VersionInfo.parse(input) return input except ValueError as e: try: input = _add_zero_version_component(input) - semver.VersionInfo.parse(input) + VersionInfo.parse(input) return input except ValueError as e: try: input = _add_zero_version_component(input) - semver.VersionInfo.parse(input) + VersionInfo.parse(input) return input except ValueError as e: return None @@ -143,15 +143,15 @@ def _after(u, c): def _semver_equal(u, c): - return _semver_operator(u, c, lambda u, c: semver.compare(u, c) == 0) + return _semver_operator(u, c, lambda u, c: VersionInfo.parse(u).compare(c) == 0) def _semver_less_than(u, c): - return _semver_operator(u, c, lambda u, c: semver.compare(u, c) < 0) + return _semver_operator(u, c, lambda u, c: VersionInfo.parse(u).compare(c) < 0) def _semver_greater_than(u, c): - return _semver_operator(u, c, lambda u, c: semver.compare(u, c) > 0) + return _semver_operator(u, c, lambda u, c: VersionInfo.parse(u).compare(c) > 0) _ZERO = timedelta(0) diff --git a/ldclient/redis_feature_store.py b/ldclient/redis_feature_store.py deleted file mode 100644 index 1e49d9ee..00000000 --- a/ldclient/redis_feature_store.py +++ /dev/null @@ -1,46 +0,0 @@ -from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore - -from ldclient.feature_store import CacheConfig -from ldclient.feature_store_helpers import CachingStoreWrapper -from ldclient.interfaces import FeatureStore - - -# Note that this class is now just a facade around CachingStoreWrapper, which is in turn delegating -# to _RedisFeatureStoreCore where the actual database logic is. This class was retained for historical -# reasons, to support existing code that calls the RedisFeatureStore constructor. In the future, we -# will migrate away from exposing these concrete classes and use only the factory methods. - -class RedisFeatureStore(FeatureStore): - """A Redis-backed implementation of :class:`ldclient.interfaces.FeatureStore`. - - .. deprecated:: 6.7.0 - This module and this implementation class are deprecated and may be changed or removed in the future. - Please use :func:`ldclient.integrations.Redis.new_feature_store()`. - """ - def __init__(self, - url='redis://localhost:6379/0', - prefix='launchdarkly', - max_connections=16, - expiration=15, - capacity=1000): - self.core = _RedisFeatureStoreCore(url, prefix, max_connections) # exposed for testing - self._wrapper = CachingStoreWrapper(self.core, CacheConfig(expiration=expiration, capacity=capacity)) - - def get(self, kind, key, callback = lambda x: x): - return self._wrapper.get(kind, key, callback) - - def all(self, kind, callback): - return self._wrapper.all(kind, callback) - - def init(self, all_data): - return self._wrapper.init(all_data) - - def upsert(self, kind, item): - return self._wrapper.upsert(kind, item) - - def delete(self, kind, key, version): - return self._wrapper.delete(kind, key, version) - - @property - def initialized(self): - return self._wrapper.initialized diff --git a/requirements.txt b/requirements.txt index e80910cf..449e3467 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ certifi>=2018.4.16 expiringdict>=1.1.4,<1.2.0 pyRFC3339>=1.0 -semver>=2.10.2 +semver>=2.10.2,<3.0.0 urllib3>=1.22.0 diff --git a/testing/proxy_test_util.py b/testing/proxy_test_util.py index b9483f7b..2d6532d4 100644 --- a/testing/proxy_test_util.py +++ b/testing/proxy_test_util.py @@ -31,7 +31,7 @@ def do_proxy_tests(action, action_method, monkeypatch): base_uri = target_uri, events_uri = target_uri, stream_uri = target_uri, - http = None if use_env_vars else HTTPConfig(http_proxy=proxy_uri), + http = HTTPConfig(http_proxy=proxy_uri), diagnostic_opt_out = True) try: action(server, config, secure) diff --git a/testing/test_diagnostics.py b/testing/test_diagnostics.py index 6fd8e90d..7d10e1ba 100644 --- a/testing/test_diagnostics.py +++ b/testing/test_diagnostics.py @@ -1,14 +1,14 @@ import json import uuid -from ldclient.config import Config +from ldclient.config import Config, HTTPConfig from ldclient.diagnostics import create_diagnostic_id, create_diagnostic_init, _DiagnosticAccumulator, _create_diagnostic_config_object from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper def test_create_diagnostic_id(): - test_config = Config(sdk_key = "SDK_KEY") - diag_id = create_diagnostic_id(test_config); + test_config = Config(sdk_key = "SDK_KEY", http=HTTPConfig()) + diag_id = create_diagnostic_id(test_config) assert len(diag_id) == 2 uid = diag_id['diagnosticId'] # Will throw if invalid UUID4 @@ -17,7 +17,7 @@ def test_create_diagnostic_id(): def test_create_diagnostic_init(): test_config = Config(sdk_key = "SDK_KEY", wrapper_name='django', wrapper_version = '5.1.1') - diag_id = create_diagnostic_id(test_config); + diag_id = create_diagnostic_id(test_config) diag_init = create_diagnostic_init(100, diag_id, test_config) assert len(diag_init) == 6 assert diag_init['kind'] == 'diagnostic-init' @@ -64,11 +64,10 @@ def test_create_diagnostic_config_defaults(): def test_create_diagnostic_config_custom(): test_store = CachingStoreWrapper(_TestStoreForDiagnostics(), CacheConfig.default()) test_config = Config(base_uri='https://test.com', events_uri='https://test.com', - connect_timeout=1, read_timeout=1, events_max_pending=10, - flush_interval=1, stream_uri='https://test.com', + events_max_pending=10, flush_interval=1, stream_uri='https://test.com', stream=False, poll_interval=60, use_ldd=True, feature_store=test_store, all_attributes_private=True, user_keys_capacity=10, user_keys_flush_interval=60, - inline_users_in_events=True, http_proxy='', diagnostic_recording_interval=60) + inline_users_in_events=True, http=HTTPConfig(http_proxy = 'proxy', read_timeout=1, connect_timeout=1), diagnostic_recording_interval=60) diag_config = _create_diagnostic_config_object(test_config) assert len(diag_config) == 17 diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 76208784..900d04eb 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -4,7 +4,7 @@ import time import uuid -from ldclient.config import Config +from ldclient.config import Config, HTTPConfig from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.event_processor import DefaultEventProcessor from ldclient.util import log @@ -215,7 +215,7 @@ def test_two_events_for_same_user_only_produce_one_index_event(): 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True } - e1 = e0.copy(); + e1 = e0.copy() ep.send_event(e0) ep.send_event(e1) @@ -232,8 +232,8 @@ def test_new_index_event_is_added_if_user_cache_has_been_cleared(): 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True } - e1 = e0.copy(); - ep.send_event(e0); + e1 = e0.copy() + ep.send_event(e0) time.sleep(0.2) ep.send_event(e1) diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py index bb54bb50..1df87694 100644 --- a/testing/test_feature_store.py +++ b/testing/test_feature_store.py @@ -18,7 +18,6 @@ from ldclient.feature_store import CacheConfig, InMemoryFeatureStore from ldclient.impl.integrations.dynamodb.dynamodb_feature_store import _DynamoDBFeatureStoreCore, _DynamoDBHelpers from ldclient.integrations import Consul, DynamoDB, Redis -from ldclient.redis_feature_store import RedisFeatureStore from ldclient.versioned_data_kind import FEATURES skip_db_tests = os.environ.get('LD_SKIP_DATABASE_TESTS') == '1' @@ -54,17 +53,6 @@ def _clear_data(self): r = redis.StrictRedis(host=self.redis_host, port=self.redis_port, db=0) r.flushdb() - -class RedisWithDeprecatedConstructorTester(RedisTester): - def init_store(self, prefix=None): - self._clear_data() - return RedisFeatureStore(expiration=(30 if self._cache_config.enabled else 0), prefix=prefix) - - @property - def supports_prefix(self): - return True - - class ConsulTester(Tester): def __init__(self, cache_config): self._cache_config = cache_config @@ -182,8 +170,6 @@ class TestFeatureStore: InMemoryTester(), RedisTester(CacheConfig.default()), RedisTester(CacheConfig.disabled()), - RedisWithDeprecatedConstructorTester(CacheConfig.default()), - RedisWithDeprecatedConstructorTester(CacheConfig.disabled()), DynamoDBTester(CacheConfig.default()), DynamoDBTester(CacheConfig.disabled()) ] @@ -332,7 +318,7 @@ def test_stores_with_different_prefixes_are_independent(self, tester): class TestRedisFeatureStoreExtraTests: def test_upsert_race_condition_against_external_client_with_higher_version(self): other_client = redis.StrictRedis(host='localhost', port=6379, db=0) - store = RedisFeatureStore() + store = Redis.new_feature_store() store.init({ FEATURES: {} }) other_version = {u'key': u'flagkey', u'version': 2} @@ -340,7 +326,7 @@ def hook(base_key, key): if other_version['version'] <= 4: other_client.hset(base_key, key, json.dumps(other_version)) other_version['version'] = other_version['version'] + 1 - store.core.test_update_hook = hook + store._core.test_update_hook = hook feature = { u'key': 'flagkey', u'version': 1 } @@ -350,7 +336,7 @@ def hook(base_key, key): def test_upsert_race_condition_against_external_client_with_lower_version(self): other_client = redis.StrictRedis(host='localhost', port=6379, db=0) - store = RedisFeatureStore() + store = Redis.new_feature_store() store.init({ FEATURES: {} }) other_version = {u'key': u'flagkey', u'version': 2} @@ -358,7 +344,7 @@ def hook(base_key, key): if other_version['version'] <= 4: other_client.hset(base_key, key, json.dumps(other_version)) other_version['version'] = other_version['version'] + 1 - store.core.test_update_hook = hook + store._core.test_update_hook = hook feature = { u'key': 'flagkey', u'version': 5 } diff --git a/testing/test_ldclient_end_to_end.py b/testing/test_ldclient_end_to_end.py index 48968b9f..7003805a 100644 --- a/testing/test_ldclient_end_to_end.py +++ b/testing/test_ldclient_end_to_end.py @@ -129,7 +129,7 @@ def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): base_uri = server.uri, stream = False, send_events = False, - verify_ssl = False + http = HTTPConfig(disable_ssl_verification=True) ) with LDClient(config = config) as client: assert client.is_initialized() diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index f716c5de..06ec99f7 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -166,27 +166,27 @@ def test_all_flags_returns_values(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) - result = client.all_flags(user) + result = client.all_flags_state(user).to_values_map() assert result == { 'key1': 'value1', 'key2': 'value2' } def test_all_flags_returns_none_if_user_is_none(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) - result = client.all_flags(None) - assert result is None + result = client.all_flags_state(None) + assert not result.valid def test_all_flags_returns_none_if_user_has_no_key(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) - result = client.all_flags({ }) - assert result is None + result = client.all_flags_state({ }) + assert not result.valid def test_all_flags_returns_none_if_feature_store_throws_error(caplog): store = ErroringFeatureStore() client = make_client(store) - assert client.all_flags({ "key": "user" }) is None + assert not client.all_flags_state({ "key": "user" }).valid errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unable to read flags for all_flag_state: NotImplementedError()' ] @@ -195,7 +195,7 @@ def test_all_flags_state_returns_state(): store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state(user) - assert state.valid == True + assert state.valid result = state.to_json_dict() assert result == { 'key1': 'value1', @@ -220,7 +220,7 @@ def test_all_flags_state_returns_state_with_reasons(): store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) client = make_client(store) state = client.all_flags_state(user, with_reasons=True) - assert state.valid == True + assert state.valid result = state.to_json_dict() assert result == { 'key1': 'value1', @@ -277,7 +277,7 @@ def test_all_flags_state_can_be_filtered_for_client_side_flags(): client = make_client(store) state = client.all_flags_state(user, client_side_only=True) - assert state.valid == True + assert state.valid values = state.to_values_map() assert values == { 'client-side-1': 'value1', 'client-side-2': 'value2' } From b35ec6eea64f3f3b1dd0d34625fc4adc10cab33c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 27 Oct 2020 15:51:42 -0700 Subject: [PATCH 206/356] remove global set_sdk_key, make SDK key required in Config (#140) --- demo/demo.py | 22 ------------- docs/api-main.rst | 2 +- ldclient/__init__.py | 50 ++++-------------------------- ldclient/client.py | 4 +-- ldclient/config.py | 10 ++---- ldclient/diagnostics.py | 3 +- testing/test_diagnostics.py | 6 ++-- testing/test_event_processor.py | 6 ++-- testing/test_file_data_source.py | 8 ++--- testing/test_init.py | 20 ------------ testing/test_ldclient.py | 14 +++------ testing/test_ldclient_singleton.py | 15 ++++----- testing/test_polling_processor.py | 8 ++--- testing/test_user_filter.py | 6 ++-- 14 files changed, 44 insertions(+), 130 deletions(-) delete mode 100644 demo/demo.py diff --git a/demo/demo.py b/demo/demo.py deleted file mode 100644 index 8ac745f4..00000000 --- a/demo/demo.py +++ /dev/null @@ -1,22 +0,0 @@ -import logging -import sys - -import ldclient - -root = logging.getLogger() -root.setLevel(logging.DEBUG) - -ch = logging.StreamHandler(sys.stdout) -ch.setLevel(logging.DEBUG) -formatter = logging.Formatter('%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s') -ch.setFormatter(formatter) -root.addHandler(ch) - -if __name__ == '__main__': - ldclient.start_wait = 10 - ldclient.set_sdk_key('YOUR_SDK_KEY') - - user = {u'key': 'userKey'} - print(ldclient.get().variation("update-app", user, False)) - - ldclient.get().close() diff --git a/docs/api-main.rst b/docs/api-main.rst index 1a5af4a1..003737f8 100644 --- a/docs/api-main.rst +++ b/docs/api-main.rst @@ -5,7 +5,7 @@ ldclient module --------------- .. automodule:: ldclient - :members: get,set_config,set_sdk_key + :members: get,set_config ldclient.client module ---------------------- diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 8eb9ac36..13f31802 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -18,7 +18,7 @@ start_wait = 5 __client = None -__config = Config() +__config = None __lock = ReadWriteLock() @@ -48,52 +48,12 @@ def set_config(config: Config): __lock.unlock() -def set_sdk_key(sdk_key: str): - """Sets the SDK key for the shared SDK client instance. - - If this is called prior to :func:`ldclient.get()`, it stores the SDK key that will be used when the client is - initialized. If it is called after the client has already been initialized, the client will be - re-initialized with the new SDK key (this will result in the next call to :func:`ldclient.get()` returning a - new client instance). - - If you need to set any configuration options other than the SDK key, use :func:`ldclient.set_config()` instead. - - :param sdk_key: the new SDK key - """ - global __config - global __client - global __lock - sdk_key_changed = False - try: - __lock.rlock() - if sdk_key == __config.sdk_key: - log.info("New sdk_key is the same as the existing one. doing nothing.") - else: - sdk_key_changed = True - finally: - __lock.runlock() - - if sdk_key_changed: - try: - __lock.lock() - __config = __config.copy_with_new_sdk_key(new_sdk_key=sdk_key) - if __client: - log.info("Reinitializing LaunchDarkly Client " + VERSION + " with new sdk key") - new_client = LDClient(config=__config, start_wait=start_wait) - old_client = __client - __client = new_client - old_client.close() - finally: - __lock.unlock() - - def get() -> LDClient: """Returns the shared SDK client instance, using the current global configuration. - To use the SDK as a singleton, first make sure you have called :func:`ldclient.set_sdk_key()` or - :func:`ldclient.set_config()` at startup time. Then ``get()`` will return the same shared - :class:`ldclient.client.LDClient` instance each time. The client will be initialized if it has - not been already. + To use the SDK as a singleton, first make sure you have called :func:`ldclient.set_config()` + at startup time. Then ``get()`` will return the same shared :class:`ldclient.client.LDClient` + instance each time. The client will be initialized if it has not been already. If you need to create multiple client instances with different configurations, instead of this singleton approach you can call the :class:`ldclient.client.LDClient` constructor directly instead. @@ -105,6 +65,8 @@ def get() -> LDClient: __lock.rlock() if __client: return __client + if __config is None: + raise Exception("set_config was not called") finally: __lock.runlock() diff --git a/ldclient/client.py b/ldclient/client.py index 66d925f7..c97bbb42 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -64,8 +64,8 @@ class LDClient: Applications should configure the client at startup time and continue to use it throughout the lifetime of the application, rather than creating instances on the fly. The best way to do this is with the - singleton methods :func:`ldclient.set_sdk_key()`, :func:`ldclient.set_config()`, and :func:`ldclient.get()`. - However, you may also call the constructor directly if you need to maintain multiple instances. + singleton methods :func:`ldclient.set_config()` and :func:`ldclient.get()`. However, you may also call + the constructor directly if you need to maintain multiple instances. Client instances are thread-safe. """ diff --git a/ldclient/config.py b/ldclient/config.py index c421610f..cccb7f27 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -82,7 +82,7 @@ class Config: if you are using the singleton client, or the :class:`ldclient.client.LDClient` constructor otherwise. """ def __init__(self, - sdk_key: Optional[str]=None, + sdk_key: str, base_uri: str='https://app.launchdarkly.com', events_uri: str='https://events.launchdarkly.com', events_max_pending: int=10000, @@ -111,7 +111,7 @@ def __init__(self, wrapper_version: Optional[str]=None, http: HTTPConfig=HTTPConfig()): """ - :param sdk_key: The SDK key for your LaunchDarkly account. + :param sdk_key: The SDK key for your LaunchDarkly account. This is always required. :param base_uri: The base URL for the LaunchDarkly server. Most users should use the default value. :param events_uri: The URL for the LaunchDarkly events server. Most users should use the @@ -205,12 +205,6 @@ def __init__(self, self.__wrapper_version = wrapper_version self.__http = http - @classmethod - def default(cls) -> 'Config': - """Returns a ``Config`` instance with default values for all properties. - """ - return cls() - def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': """Returns a new ``Config`` instance that is the same as this one, except for having a different SDK key. diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index 055dfe06..e40b4ff0 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -8,6 +8,7 @@ import uuid import platform +from ldclient.config import Config from ldclient.version import VERSION class _DiagnosticAccumulator: @@ -62,7 +63,7 @@ def _diagnostic_base_fields(kind, creation_date, diagnostic_id): 'id': diagnostic_id} def _create_diagnostic_config_object(config): - default_config = config.default() + default_config = Config("SDK_KEY") return {'customBaseURI': config.base_uri != default_config.base_uri, 'customEventsURI': config.events_uri != default_config.events_uri, 'customStreamURI': config.stream_base_uri != default_config.stream_base_uri, diff --git a/testing/test_diagnostics.py b/testing/test_diagnostics.py index 7d10e1ba..c725e8d9 100644 --- a/testing/test_diagnostics.py +++ b/testing/test_diagnostics.py @@ -39,7 +39,7 @@ def test_create_diagnostic_init(): json.dumps(diag_init) def test_create_diagnostic_config_defaults(): - test_config = Config() + test_config = Config("SDK_KEY") diag_config = _create_diagnostic_config_object(test_config) assert len(diag_config) == 17 @@ -63,7 +63,7 @@ def test_create_diagnostic_config_defaults(): def test_create_diagnostic_config_custom(): test_store = CachingStoreWrapper(_TestStoreForDiagnostics(), CacheConfig.default()) - test_config = Config(base_uri='https://test.com', events_uri='https://test.com', + test_config = Config("SDK_KEY", base_uri='https://test.com', events_uri='https://test.com', events_max_pending=10, flush_interval=1, stream_uri='https://test.com', stream=False, poll_interval=60, use_ldd=True, feature_store=test_store, all_attributes_private=True, user_keys_capacity=10, user_keys_flush_interval=60, @@ -95,7 +95,7 @@ def describe_configuration(self, config): def test_diagnostic_accumulator(): test_config = Config(sdk_key = "SDK_KEY") - diag_id = create_diagnostic_id(test_config); + diag_id = create_diagnostic_id(test_config) diag_accum = _DiagnosticAccumulator(diag_id) # Test default periodic event diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 900d04eb..77ec05e8 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -13,7 +13,7 @@ from testing.stub_util import MockResponse, MockHttp -default_config = Config() +default_config = Config("fake_sdk_key") user = { 'key': 'userkey', 'name': 'Red' @@ -69,6 +69,8 @@ class DefaultTestProcessor(DefaultEventProcessor): def __init__(self, **kwargs): if not 'diagnostic_opt_out' in kwargs: kwargs['diagnostic_opt_out'] = True + if not 'sdk_key' in kwargs: + kwargs['sdk_key'] = 'SDK_KEY' config = Config(**kwargs) diagnostic_accumulator = _DiagnosticAccumulator(create_diagnostic_id(config)) DefaultEventProcessor.__init__(self, config, mock_http, diagnostic_accumulator = diagnostic_accumulator) @@ -531,7 +533,7 @@ def test_will_still_send_after_500_error(): verify_recoverable_http_error(500) def test_does_not_block_on_full_inbox(): - config = Config(events_max_pending=1) # this sets the size of both the inbox and the outbox to 1 + config = Config("fake_sdk_key", events_max_pending=1) # this sets the size of both the inbox and the outbox to 1 ep_inbox_holder = [ None ] ep_inbox = None diff --git a/testing/test_file_data_source.py b/testing/test_file_data_source.py index 191309d7..17efe8a0 100644 --- a/testing/test_file_data_source.py +++ b/testing/test_file_data_source.py @@ -100,7 +100,7 @@ def teardown_function(): def make_data_source(**kwargs): global data_source - data_source = Files.new_data_source(**kwargs)(Config(), store, ready) + data_source = Files.new_data_source(**kwargs)(Config("SDK_KEY"), store, ready) return data_source def make_temp_file(content): @@ -226,7 +226,7 @@ def test_evaluates_full_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: factory = Files.new_data_source(paths = path) - client = LDClient(config=Config(update_processor_class = factory, send_events = False)) + client = LDClient(config=Config('SDK_KEY', update_processor_class = factory, send_events = False)) value = client.variation('flag1', { 'key': 'user' }, '') assert value == 'on' finally: @@ -238,7 +238,7 @@ def test_evaluates_simplified_flag_with_client_as_expected(): path = make_temp_file(all_properties_json) try: factory = Files.new_data_source(paths = path) - client = LDClient(config=Config(update_processor_class = factory, send_events = False)) + client = LDClient(config=Config('SDK_KEY', update_processor_class = factory, send_events = False)) value = client.variation('flag2', { 'key': 'user' }, '') assert value == 'value2' finally: @@ -264,7 +264,7 @@ def test_does_not_allow_unsafe_yaml(): path = make_temp_file(unsafe_yaml) try: factory = Files.new_data_source(paths = path) - client = LDClient(config=Config(update_processor_class = factory, send_events = False)) + client = LDClient(config=Config('SDK_KEY', update_processor_class = factory, send_events = False)) finally: os.remove(path) if client is not None: diff --git a/testing/test_init.py b/testing/test_init.py index ca13c130..6817b873 100644 --- a/testing/test_init.py +++ b/testing/test_init.py @@ -6,26 +6,6 @@ mylogger = logging.getLogger() -def test_set_sdk_key(): - old_sdk_key = "OLD_SDK_KEY" - new_sdk_key = "NEW_SDK_KEY" - - old_config = Config(sdk_key=old_sdk_key, stream=False, offline=True) - ldclient.set_config(old_config) - - old_client = ldclient.get() - assert old_client.get_sdk_key() == old_sdk_key - - ldclient.set_sdk_key(new_sdk_key) - new_client = ldclient.get() - - assert new_client.get_sdk_key() == new_sdk_key - - # illustrates bad behavior- assigning value of ldclient.get() means - # the old_client didn't get updated when we called set_sdk_key() - assert old_client.get_sdk_key() == old_sdk_key - - def test_set_config(): old_sdk_key = "OLD_SDK_KEY" new_sdk_key = "NEW_SDK_KEY" diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index e16af123..7615bb16 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -74,12 +74,6 @@ def count_events(c): return n -def test_ctor_both_sdk_keys_set(): - with pytest.raises(Exception): - config = Config(sdk_key="sdk key a", offline=True) - LDClient(sdk_key="sdk key b", config=config) - - def test_client_has_null_event_processor_if_offline(): with make_offline_client() as client: assert isinstance(client._event_processor, NullEventProcessor) @@ -179,14 +173,15 @@ def test_track_no_user_key(): def test_defaults(): - config=Config(base_uri="http://localhost:3000", defaults={"foo": "bar"}, offline=True) + config=Config("SDK_KEY", base_uri="http://localhost:3000", defaults={"foo": "bar"}, offline=True) with LDClient(config=config) as client: assert "bar" == client.variation('foo', user, default=None) def test_defaults_and_online(): expected = "bar" - my_client = LDClient(config=Config(base_uri="http://localhost:3000", + my_client = LDClient(config=Config("SDK_KEY", + base_uri="http://localhost:3000", defaults={"foo": expected}, event_processor_class=MockEventProcessor, update_processor_class=MockUpdateProcessor, @@ -198,7 +193,8 @@ def test_defaults_and_online(): def test_defaults_and_online_no_default(): - my_client = LDClient(config=Config(base_uri="http://localhost:3000", + my_client = LDClient(config=Config("SDK_KEY", + base_uri="http://localhost:3000", defaults={"foo": "bar"}, event_processor_class=MockEventProcessor, update_processor_class=MockUpdateProcessor)) diff --git a/testing/test_ldclient_singleton.py b/testing/test_ldclient_singleton.py index 6dba8262..f685ea1b 100644 --- a/testing/test_ldclient_singleton.py +++ b/testing/test_ldclient_singleton.py @@ -18,8 +18,7 @@ def test_set_sdk_key_before_init(): try: stream_server.for_path('/all', stream_handler) - ldclient.set_config(Config(stream_uri = stream_server.uri, send_events = False)) - ldclient.set_sdk_key(sdk_key) + ldclient.set_config(Config(sdk_key, stream_uri = stream_server.uri, send_events = False)) wait_until(ldclient.get().is_initialized, timeout=10) r = stream_server.await_request() @@ -29,20 +28,22 @@ def test_set_sdk_key_before_init(): def test_set_sdk_key_after_init(): _reset_client() + other_key = 'other-key' with start_server() as stream_server: with stream_content(make_put_event()) as stream_handler: try: stream_server.for_path('/all', BasicResponse(401)) - ldclient.set_config(Config(stream_uri = stream_server.uri, send_events = False)) + config = Config(other_key, stream_uri = stream_server.uri, send_events = False) + ldclient.set_config(config) assert ldclient.get().is_initialized() is False r = stream_server.await_request() - assert r.headers['Authorization'] == '' + assert r.headers['Authorization'] == other_key stream_server.for_path('/all', stream_handler) - ldclient.set_sdk_key(sdk_key) + ldclient.set_config(config.copy_with_new_sdk_key(sdk_key)) wait_until(ldclient.get().is_initialized, timeout=30) r = stream_server.await_request() @@ -57,10 +58,10 @@ def test_set_config(): try: stream_server.for_path('/all', stream_handler) - ldclient.set_config(Config(offline=True)) + ldclient.set_config(Config(sdk_key, offline=True)) assert ldclient.get().is_offline() is True - ldclient.set_config(Config(sdk_key = sdk_key, stream_uri = stream_server.uri, send_events = False)) + ldclient.set_config(Config(sdk_key, stream_uri = stream_server.uri, send_events = False)) assert ldclient.get().is_offline() is False wait_until(ldclient.get().is_initialized, timeout=10) diff --git a/testing/test_polling_processor.py b/testing/test_polling_processor.py index 113672f3..e4a4dcd0 100644 --- a/testing/test_polling_processor.py +++ b/testing/test_polling_processor.py @@ -47,7 +47,7 @@ def test_successful_request_puts_feature_data_in_store(): "segkey": segment } } - setup_processor(Config()) + setup_processor(Config("SDK_KEY")) ready.wait() assert store.get(FEATURES, "flagkey", lambda x: x) == flag assert store.get(SEGMENTS, "segkey", lambda x: x) == segment @@ -60,7 +60,7 @@ def test_successful_request_puts_feature_data_in_store(): def test_general_connection_error_does_not_cause_immediate_failure(ignore_mock): mock_requester.exception = Exception("bad") start_time = time.time() - setup_processor(Config()) + setup_processor(Config("SDK_KEY")) ready.wait(0.3) assert not pp.initialized() assert mock_requester.request_count >= 2 @@ -86,7 +86,7 @@ def test_http_503_error_does_not_cause_immediate_failure(): @mock.patch('ldclient.config.Config.poll_interval', new_callable=mock.PropertyMock, return_value=0.1) def verify_unrecoverable_http_error(status, ignore_mock): mock_requester.exception = UnsuccessfulResponseException(status) - setup_processor(Config()) + setup_processor(Config("SDK_KEY")) finished = ready.wait(0.5) assert finished assert not pp.initialized() @@ -95,7 +95,7 @@ def verify_unrecoverable_http_error(status, ignore_mock): @mock.patch('ldclient.config.Config.poll_interval', new_callable=mock.PropertyMock, return_value=0.1) def verify_recoverable_http_error(status, ignore_mock): mock_requester.exception = UnsuccessfulResponseException(status) - setup_processor(Config()) + setup_processor(Config("SDK_KEY")) finished = ready.wait(0.5) assert not finished assert not pp.initialized() diff --git a/testing/test_user_filter.py b/testing/test_user_filter.py index ee61fce1..aa53bbad 100644 --- a/testing/test_user_filter.py +++ b/testing/test_user_filter.py @@ -3,9 +3,9 @@ from ldclient.user_filter import UserFilter -base_config = Config() -config_with_all_attrs_private = Config(all_attributes_private = True) -config_with_some_attrs_private = Config(private_attribute_names=set([u'firstName', u'bizzle'])) +base_config = Config("fake_sdk_key") +config_with_all_attrs_private = Config("fake_sdk_key", all_attributes_private = True) +config_with_some_attrs_private = Config("fake_sdk_key", private_attribute_names=set([u'firstName', u'bizzle'])) # users to serialize From e5f645032dc17738c05ee64bd894cd7c177cca87 Mon Sep 17 00:00:00 2001 From: Ben Woskow <48036130+bwoskow-ld@users.noreply.github.com> Date: Wed, 3 Feb 2021 15:11:52 -0800 Subject: [PATCH 207/356] Removed the guides link --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 80bd937b..5782eff1 100644 --- a/README.md +++ b/README.md @@ -46,4 +46,3 @@ We encourage pull requests and other contributions from the community. Check out * [docs.launchdarkly.com](https://docs.launchdarkly.com/ "LaunchDarkly Documentation") for our documentation and SDK reference guides * [apidocs.launchdarkly.com](https://apidocs.launchdarkly.com/ "LaunchDarkly API Documentation") for our API documentation * [blog.launchdarkly.com](https://blog.launchdarkly.com/ "LaunchDarkly Blog Documentation") for the latest product updates - * [Feature Flagging Guide](https://github.com/launchdarkly/featureflags/ "Feature Flagging Guide") for best practices and strategies From 4d2e99979ca0aededa854ab9583c5e28986a184e Mon Sep 17 00:00:00 2001 From: Ben Woskow <48036130+bwoskow-ld@users.noreply.github.com> Date: Thu, 4 Feb 2021 16:39:52 -0800 Subject: [PATCH 208/356] Pinning mypy and running it against different python versions (#141) --- .circleci/config.yml | 3 +-- mypy.ini | 3 +-- test-requirements.txt | 3 ++- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 87453577..f33121db 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -86,9 +86,8 @@ jobs: - run: name: verify typehints command: | - pip install mypy export PATH="/home/circleci/.local/bin:$PATH" - mypy --config-file mypy.ini --python-version 3.5 ldclient/*.py testing/*.py + mypy --config-file mypy.ini ldclient testing - store_test_results: path: test-reports - store_artifacts: diff --git a/mypy.ini b/mypy.ini index d345df18..e886c085 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,3 +1,2 @@ [mypy] -ignore_missing_imports = true -python_version = 3.5 +ignore_missing_imports = true \ No newline at end of file diff --git a/test-requirements.txt b/test-requirements.txt index d73c173e..1f80fcc7 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -6,4 +6,5 @@ coverage>=4.4 jsonpickle==0.9.3 pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 -pytest-mypy==0.7 \ No newline at end of file +pytest-mypy==0.7 +mypy==0.800 \ No newline at end of file From 1cc83f56582357e07209ff96eb2e5b41a525d55b Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 18 Feb 2021 18:04:10 -0800 Subject: [PATCH 209/356] fix time zone mishandling that could make event debugging not work (#142) --- ldclient/event_processor.py | 3 ++- testing/test_event_processor.py | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 6bdb7da9..6174f7f2 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -3,6 +3,7 @@ """ # currently excluded from documentation - see docs/README.md +from calendar import timegm from collections import namedtuple from email.utils import parsedate import errno @@ -361,7 +362,7 @@ def _handle_response(self, r): if server_date_str is not None: server_date = parsedate(server_date_str) if server_date is not None: - timestamp = int(time.mktime(server_date) * 1000) + timestamp = int(timegm(server_date) * 1000) self._last_known_past_time = timestamp if r.status > 299 and not is_http_error_recoverable(r.status): self._disabled = True diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 77ec05e8..0946b583 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -280,6 +280,33 @@ def test_event_can_be_both_tracked_and_debugged(): check_feature_event(output[2], e, True, user) check_summary_event(output[3]) +def test_debug_mode_does_not_expire_if_both_client_time_and_server_time_are_before_expiration_time(): + with DefaultTestProcessor() as ep: + # Pick a server time that slightly different from client time + server_time = now() + 1000 + + # Send and flush an event we don't care about, just to set the last server time + mock_http.set_server_time(server_time) + ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) + flush_and_get_events(ep) + + # Now send an event with debug mode on, with a "debug until" time that is further in + # the future than both the client time and the server time + debug_until = server_time + 10000 + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', + 'trackEvents': False, 'debugEventsUntilDate': debug_until + } + ep.send_event(e) + + # Should get a summary event only, not a full feature event + output = flush_and_get_events(ep) + assert len(output) == 3 + check_index_event(output[0], e, user) + check_feature_event(output[1], e, True, user) # debug event + check_summary_event(output[2]) + def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_server_time(): with DefaultTestProcessor() as ep: # Pick a server time that is somewhat behind the client time From 5fa59665ad59dd9b073986be53d7d674b27b1882 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 23 Feb 2021 10:22:25 -0800 Subject: [PATCH 210/356] fix 6.x build (#143) --- .circleci/config.yml | 3 ++- requirements.txt | 2 +- test-filesource-optional-requirements.txt | 2 +- test-requirements.txt | 6 +++--- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6b0e096a..21b12842 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -66,7 +66,8 @@ jobs: - run: name: install requirements command: | - sudo pip install --upgrade pip virtualenv; + sudo pip install --upgrade pip; + sudo pip install 'virtualenv~=16.0'; sudo pip install -r test-requirements.txt; if [[ "<>" == "true" ]]; then sudo pip install -r test-filesource-optional-requirements.txt; diff --git a/requirements.txt b/requirements.txt index 76cd9de6..b164adf0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,5 +2,5 @@ certifi>=2018.4.16 expiringdict>=1.1.4,<1.2.0 six>=1.10.0 pyRFC3339>=1.0 -semver>=2.7.9 +semver>=2.7.9,<3.0.0 urllib3>=1.22.0 diff --git a/test-filesource-optional-requirements.txt b/test-filesource-optional-requirements.txt index 40e04279..3cfa747b 100644 --- a/test-filesource-optional-requirements.txt +++ b/test-filesource-optional-requirements.txt @@ -1,2 +1,2 @@ pyyaml>=3.0,<5.2 -watchdog>=0.9 +watchdog>=0.9,<1.0 diff --git a/test-requirements.txt b/test-requirements.txt index bc5b43f2..8e582a06 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,9 +1,9 @@ mock>=2.0.0 pytest>=2.8 -redis>=2.10.5 -boto3>=1.9.71 +redis>=2.10.5,<3.0.0 +boto3>=1.9.71,<1.11.0 coverage>=4.4 jsonpickle==0.9.3 -pytest-capturelog>=0.7 +pytest-catchlog pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 From c9d6ec6c94e4e8c2ca0f4e669e429ed8dadf62ff Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 23 Feb 2021 10:26:11 -0800 Subject: [PATCH 211/356] fix time zone mishandling that could make event debugging not work (6.x) (#144) --- ldclient/event_processor.py | 3 ++- testing/test_event_processor.py | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index b94d800a..1d67123f 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -3,6 +3,7 @@ """ # currently excluded from documentation - see docs/README.md +from calendar import timegm from collections import namedtuple from email.utils import parsedate import errno @@ -368,7 +369,7 @@ def _handle_response(self, r): if server_date_str is not None: server_date = parsedate(server_date_str) if server_date is not None: - timestamp = int(time.mktime(server_date) * 1000) + timestamp = int(timegm(server_date) * 1000) self._last_known_past_time = timestamp if r.status > 299 and not is_http_error_recoverable(r.status): self._disabled = True diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 76208784..74c9d0d7 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -278,6 +278,33 @@ def test_event_can_be_both_tracked_and_debugged(): check_feature_event(output[2], e, True, user) check_summary_event(output[3]) +def test_debug_mode_does_not_expire_if_both_client_time_and_server_time_are_before_expiration_time(): + with DefaultTestProcessor() as ep: + # Pick a server time that slightly different from client time + server_time = now() + 1000 + + # Send and flush an event we don't care about, just to set the last server time + mock_http.set_server_time(server_time) + ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) + flush_and_get_events(ep) + + # Now send an event with debug mode on, with a "debug until" time that is further in + # the future than both the client time and the server time + debug_until = server_time + 10000 + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', + 'trackEvents': False, 'debugEventsUntilDate': debug_until + } + ep.send_event(e) + + # Should get a summary event only, not a full feature event + output = flush_and_get_events(ep) + assert len(output) == 3 + check_index_event(output[0], e, user) + check_feature_event(output[1], e, True, user) # debug event + check_summary_event(output[2]) + def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_server_time(): with DefaultTestProcessor() as ep: # Pick a server time that is somewhat behind the client time From f9ce3b9285aad925aed8a30afd53463db67afdf0 Mon Sep 17 00:00:00 2001 From: LaunchDarklyCI Date: Tue, 23 Feb 2021 23:48:20 +0000 Subject: [PATCH 212/356] prepare 6.13.3 release (#154) --- .circleci/config.yml | 3 ++- ldclient/event_processor.py | 3 ++- requirements.txt | 2 +- test-filesource-optional-requirements.txt | 2 +- test-requirements.txt | 6 ++--- testing/test_event_processor.py | 27 +++++++++++++++++++++++ 6 files changed, 36 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6b0e096a..21b12842 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -66,7 +66,8 @@ jobs: - run: name: install requirements command: | - sudo pip install --upgrade pip virtualenv; + sudo pip install --upgrade pip; + sudo pip install 'virtualenv~=16.0'; sudo pip install -r test-requirements.txt; if [[ "<>" == "true" ]]; then sudo pip install -r test-filesource-optional-requirements.txt; diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index b94d800a..1d67123f 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -3,6 +3,7 @@ """ # currently excluded from documentation - see docs/README.md +from calendar import timegm from collections import namedtuple from email.utils import parsedate import errno @@ -368,7 +369,7 @@ def _handle_response(self, r): if server_date_str is not None: server_date = parsedate(server_date_str) if server_date is not None: - timestamp = int(time.mktime(server_date) * 1000) + timestamp = int(timegm(server_date) * 1000) self._last_known_past_time = timestamp if r.status > 299 and not is_http_error_recoverable(r.status): self._disabled = True diff --git a/requirements.txt b/requirements.txt index 76cd9de6..b164adf0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,5 +2,5 @@ certifi>=2018.4.16 expiringdict>=1.1.4,<1.2.0 six>=1.10.0 pyRFC3339>=1.0 -semver>=2.7.9 +semver>=2.7.9,<3.0.0 urllib3>=1.22.0 diff --git a/test-filesource-optional-requirements.txt b/test-filesource-optional-requirements.txt index 40e04279..3cfa747b 100644 --- a/test-filesource-optional-requirements.txt +++ b/test-filesource-optional-requirements.txt @@ -1,2 +1,2 @@ pyyaml>=3.0,<5.2 -watchdog>=0.9 +watchdog>=0.9,<1.0 diff --git a/test-requirements.txt b/test-requirements.txt index bc5b43f2..8e582a06 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,9 +1,9 @@ mock>=2.0.0 pytest>=2.8 -redis>=2.10.5 -boto3>=1.9.71 +redis>=2.10.5,<3.0.0 +boto3>=1.9.71,<1.11.0 coverage>=4.4 jsonpickle==0.9.3 -pytest-capturelog>=0.7 +pytest-catchlog pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 76208784..74c9d0d7 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -278,6 +278,33 @@ def test_event_can_be_both_tracked_and_debugged(): check_feature_event(output[2], e, True, user) check_summary_event(output[3]) +def test_debug_mode_does_not_expire_if_both_client_time_and_server_time_are_before_expiration_time(): + with DefaultTestProcessor() as ep: + # Pick a server time that slightly different from client time + server_time = now() + 1000 + + # Send and flush an event we don't care about, just to set the last server time + mock_http.set_server_time(server_time) + ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) + flush_and_get_events(ep) + + # Now send an event with debug mode on, with a "debug until" time that is further in + # the future than both the client time and the server time + debug_until = server_time + 10000 + e = { + 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, + 'variation': 1, 'value': 'value', 'default': 'default', + 'trackEvents': False, 'debugEventsUntilDate': debug_until + } + ep.send_event(e) + + # Should get a summary event only, not a full feature event + output = flush_and_get_events(ep) + assert len(output) == 3 + check_index_event(output[0], e, user) + check_feature_event(output[1], e, True, user) # debug event + check_summary_event(output[2]) + def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_server_time(): with DefaultTestProcessor() as ep: # Pick a server time that is somewhat behind the client time From 953c126bb9997a2574d2af375176a91e5c7ff849 Mon Sep 17 00:00:00 2001 From: LaunchDarklyCI Date: Tue, 23 Feb 2021 23:48:52 +0000 Subject: [PATCH 213/356] Releasing version 6.13.3 --- CHANGELOG.md | 4 ++++ ldclient/version.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 181927f0..4f5686b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ All notable changes to the LaunchDarkly Python SDK will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org). +## [6.13.3] - 2021-02-23 +### Fixed: +- The SDK could fail to send debug events when event debugging was enabled on the LaunchDarkly dashboard, if the application server's time zone was not GMT. + ## [6.13.2] - 2020-09-21 ### Fixed: - The SDK was not recognizing proxy authorization parameters included in a proxy URL (example: `http://username:password@proxyhost:port`). It will now use these parameters if present, regardless of whether you set the proxy URL programmatically or in an environment variable. (Thanks, [gangeli](https://github.com/launchdarkly/python-server-sdk/pull/145)!) diff --git a/ldclient/version.py b/ldclient/version.py index a9336f71..7bfde60e 100644 --- a/ldclient/version.py +++ b/ldclient/version.py @@ -1 +1 @@ -VERSION = "6.13.2" +VERSION = "6.13.3" From 532a01b27f918de322f55dc0715fbe9b0b64503c Mon Sep 17 00:00:00 2001 From: hroederld Date: Thu, 11 Mar 2021 13:52:43 -0800 Subject: [PATCH 214/356] [ch99756] Add alias events (#145) --- ldclient/client.py | 19 +++++++++++++ ldclient/event_processor.py | 4 +++ ldclient/impl/event_factory.py | 23 ++++++++++++++++ testing/test_ldclient.py | 49 +++++++++++++++++++++++++++++++++- 4 files changed, 94 insertions(+), 1 deletion(-) diff --git a/ldclient/client.py b/ldclient/client.py index c97bbb42..d401df39 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -187,6 +187,25 @@ def track(self, event_name: str, user: dict, data: Optional[Any]=None, metric_va else: self._send_event(self._event_factory_default.new_custom_event(event_name, user, data, metric_value)) + def alias(self, current_user: dict, previous_user: dict): + """Associates two users for analytics purposes. + + This can be helpful in the situation where a person is represented by multiple + LaunchDarkly users. This may happen, for example, when a person initially logs into + an application, the person might be represented by an anonymous user prior to logging + in and a different user after logging in, as denoted by a different user key. + + :param current_user: The new version of a user. + :param previous_user: The old version of a user. + """ + if current_user is None or current_user.get('key') is None: + log.warning("Missing current_user or current_user key when calling alias().") + return None + if previous_user is None or previous_user.get('key') is None: + log.warning("Missing previous_user or previous_user key when calling alias().") + return None + self._send_event(self._event_factory_default.new_alias_event(current_user, previous_user)) + def identify(self, user: dict): """Registers the user. diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 6174f7f2..1afb3221 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -65,6 +65,8 @@ def make_output_event(self, e): out['userKey'] = self._get_userkey(e) if e.get('reason'): out['reason'] = e.get('reason') + if e.get('contextKind'): + out['contextKind'] = e.get('contextKind') return out elif kind == 'identify': return { @@ -87,6 +89,8 @@ def make_output_event(self, e): out['data'] = e['data'] if e.get('metricValue') is not None: out['metricValue'] = e['metricValue'] + if e.get('contextKind'): + out['contextKind'] = e.get('contextKind') return out elif kind == 'index': return { diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index c35d3bbe..16f81ac7 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -30,6 +30,8 @@ def new_eval_event(self, flag, user, detail, default_value, prereq_of_flag = Non e['prereqOf'] = prereq_of_flag.get('key') if add_experiment_data or self._with_reasons: e['reason'] = detail.reason + if user is not None and user.get('anonymous'): + e['contextKind'] = self._user_to_context_kind(user) return e def new_default_event(self, flag, user, default_value, reason): @@ -48,6 +50,8 @@ def new_default_event(self, flag, user, default_value, reason): e['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') if self._with_reasons: e['reason'] = reason + if user is not None and user.get('anonymous'): + e['contextKind'] = self._user_to_context_kind(user) return e def new_unknown_flag_event(self, key, user, default_value, reason): @@ -60,6 +64,8 @@ def new_unknown_flag_event(self, key, user, default_value, reason): } if self._with_reasons: e['reason'] = reason + if user is not None and user.get('anonymous'): + e['contextKind'] = self._user_to_context_kind(user) return e def new_identify_event(self, user): @@ -79,8 +85,25 @@ def new_custom_event(self, event_name, user, data, metric_value): e['data'] = data if metric_value is not None: e['metricValue'] = metric_value + if user.get('anonymous'): + e['contextKind'] = self._user_to_context_kind(user) return e + def new_alias_event(self, current_user, previous_user): + return { + 'kind': 'alias', + 'key': current_user.get('key'), + 'contextKind': self._user_to_context_kind(current_user), + 'previousKey': previous_user.get('key'), + 'previousContextKind': self._user_to_context_kind(previous_user) + } + + def _user_to_context_kind(self, user): + if user.get('anonymous'): + return "anonymousUser" + else: + return "user" + def _is_experiment(self, flag, reason): if reason is not None: kind = reason['kind'] diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 7615bb16..86cc319e 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -24,6 +24,10 @@ } } +anonymous_user = { + u'key': u'abc', + u'anonymous': True +} def make_client(store = InMemoryFeatureStore()): return LDClient(config=Config(sdk_key = 'SDK_KEY', @@ -172,6 +176,26 @@ def test_track_no_user_key(): assert count_events(client) == 0 +def test_track_anonymous_user(): + with make_client() as client: + client.track('my_event', anonymous_user) + e = get_first_event(client) + assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == anonymous_user and e.get('data') is None and e.get('metricValue') is None and e.get('contextKind') == 'anonymousUser' + + +def test_alias(): + with make_client() as client: + client.alias(user, anonymous_user) + e = get_first_event(client) + assert e['kind'] == 'alias' and e['key'] == 'xyz' and e['contextKind'] == 'user' and e['previousKey'] == 'abc' and e['previousContextKind'] == 'anonymousUser' + + +def test_alias_no_user(): + with make_client() as client: + client.alias(None, None) + assert count_events(client) == 0 + + def test_defaults(): config=Config("SDK_KEY", base_uri="http://localhost:3000", defaults={"foo": "bar"}, offline=True) with LDClient(config=config) as client: @@ -226,7 +250,30 @@ def test_event_for_existing_feature(): e.get('reason') is None and e['default'] == 'default' and e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000) + e['debugEventsUntilDate'] == 1000 and + e.get('contextKind') is None) + + +def test_event_for_existing_feature_anonymous_user(): + feature = make_off_flag_with_value('feature.key', 'value') + feature['trackEvents'] = True + feature['debugEventsUntilDate'] = 1000 + store = InMemoryFeatureStore() + store.init({FEATURES: {'feature.key': feature}}) + with make_client(store) as client: + assert 'value' == client.variation('feature.key', anonymous_user, default='default') + e = get_first_event(client) + assert (e['kind'] == 'feature' and + e['key'] == 'feature.key' and + e['user'] == anonymous_user and + e['version'] == feature['version'] and + e['value'] == 'value' and + e['variation'] == 0 and + e.get('reason') is None and + e['default'] == 'default' and + e['trackEvents'] == True and + e['debugEventsUntilDate'] == 1000 and + e['contextKind'] == 'anonymousUser') def test_event_for_existing_feature_with_reason(): From 8c327d04be56cde64693474a1f11879ba2388c22 Mon Sep 17 00:00:00 2001 From: "Robert J. Neal" Date: Thu, 29 Apr 2021 14:04:07 -0700 Subject: [PATCH 215/356] add support for experiment rollouts --- ldclient/flag.py | 29 +++++++++----- ldclient/impl/event_factory.py | 3 ++ testing/test_event_factory.py | 72 ++++++++++++++++++++++++++++++++++ testing/test_flag.py | 53 ++++++++++++++++++++----- 4 files changed, 138 insertions(+), 19 deletions(-) create mode 100644 testing/test_event_factory.py diff --git a/ldclient/flag.py b/ldclient/flag.py index 4c279f93..7bb23fcc 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -172,9 +172,11 @@ def _get_off_value(flag, reason): def _get_value_for_variation_or_rollout(flag, vr, user, reason): - index = _variation_index_for_user(flag, vr, user) + index, inExperiment = _variation_index_for_user(flag, vr, user) if index is None: return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) + if inExperiment: + reason['inExperiment'] = inExperiment return _get_variation(flag, index, reason) @@ -191,34 +193,38 @@ def _get_user_attribute(user, attr): def _variation_index_for_user(feature, rule, user): if rule.get('variation') is not None: - return rule['variation'] + return (rule['variation'], False) rollout = rule.get('rollout') if rollout is None: - return None + return (None, False) variations = rollout.get('variations') + seed = rollout.get('seed') if variations is not None and len(variations) > 0: bucket_by = 'key' if rollout.get('bucketBy') is not None: bucket_by = rollout['bucketBy'] - bucket = _bucket_user(user, feature['key'], feature['salt'], bucket_by) + bucket = _bucket_user(seed, user, feature['key'], feature['salt'], bucket_by) + is_experiment = rollout.get('kind') is not None and rollout['kind'] == 'experiment' sum = 0.0 for wv in variations: sum += wv.get('weight', 0.0) / 100000.0 if bucket < sum: - return wv.get('variation') + is_experiment_partition = is_experiment and wv.get('untracked') is not None and not wv['untracked'] + return (wv.get('variation'), is_experiment_partition) # The user's bucket value was greater than or equal to the end of the last bucket. This could happen due # to a rounding error, or due to the fact that we are scaling to 100000 rather than 99999, or the flag # data could contain buckets that don't actually add up to 100000. Rather than returning an error in # this case (or changing the scaling, which would potentially change the results for *all* users), we # will simply put the user in the last bucket. - return variations[-1].get('variation') + is_experiment_partition = is_experiment and variations[-1].get('untracked') is not None and not variations[-1]['untracked'] + return (variations[-1].get('variation'), is_experiment_partition) - return None + return (None, False) -def _bucket_user(user, key, salt, bucket_by): +def _bucket_user(seed, user, key, salt, bucket_by): u_value, should_pass = _get_user_attribute(user, bucket_by) bucket_by_value = _bucketable_string_value(u_value) @@ -228,7 +234,12 @@ def _bucket_user(user, key, salt, bucket_by): id_hash = u_value if user.get('secondary') is not None: id_hash = id_hash + '.' + user['secondary'] - hash_key = '%s.%s.%s' % (key, salt, id_hash) + + prefix = '%s.%s' % (key, salt) + if (seed is not None): + prefix = str(seed) + + hash_key = '%s.%s' % (prefix, id_hash) hash_val = int(hashlib.sha1(hash_key.encode('utf-8')).hexdigest()[:15], 16) result = hash_val / __LONG_SCALE__ return result diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index 16f81ac7..81d99ab9 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -106,6 +106,9 @@ def _user_to_context_kind(self, user): def _is_experiment(self, flag, reason): if reason is not None: + inExperiment = reason.get('inExperiment') + if inExperiment is not None and inExperiment: + return True kind = reason['kind'] if kind == 'RULE_MATCH': index = reason['ruleIndex'] diff --git a/testing/test_event_factory.py b/testing/test_event_factory.py new file mode 100644 index 00000000..6b763e84 --- /dev/null +++ b/testing/test_event_factory.py @@ -0,0 +1,72 @@ +import pytest +from ldclient.flag import EvaluationDetail +from ldclient.impl.event_factory import _EventFactory + +_event_factory_default = _EventFactory(False) +_user = { 'key': 'x' } + +def make_basic_flag_with_rules(kind, should_track_events): + rule = { + 'rollout': { + 'variations': [ + { 'variation': 0, 'weight': 50000 }, + { 'variation': 1, 'weight': 50000 } + ] + } + } + if kind == 'rulematch': + rule.update({'trackEvents': should_track_events}) + + flag = { + 'key': 'feature', + 'on': True, + 'rules': [rule], + 'fallthrough': { 'variation': 0 }, + 'variations': [ False, True ], + 'salt': '' + } + if kind == 'fallthrough': + flag.update({'trackEventsFallthrough': should_track_events}) + return flag + +def test_fallthrough_track_event_false(): + flag = make_basic_flag_with_rules('fallthrough', False) + detail = EvaluationDetail('b', 1, {'kind': 'FALLTHROUGH'}) + + eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) + assert eval.get('trackEvents') is None + +def test_fallthrough_track_event_true(): + flag = make_basic_flag_with_rules('fallthrough', True) + detail = EvaluationDetail('b', 1, {'kind': 'FALLTHROUGH'}) + + eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) + assert eval['trackEvents'] == True + +def test_fallthrough_track_event_false_with_experiment(): + flag = make_basic_flag_with_rules('fallthrough', False) + detail = EvaluationDetail('b', 1, {'kind': 'FALLTHROUGH', 'inExperiment': True}) + + eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) + assert eval['trackEvents'] == True + +def test_rulematch_track_event_false(): + flag = make_basic_flag_with_rules('rulematch', False) + detail = EvaluationDetail('b', 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0}) + + eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) + assert eval.get('trackEvents') is None + +def test_rulematch_track_event_true(): + flag = make_basic_flag_with_rules('rulematch', True) + detail = EvaluationDetail('b', 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0}) + + eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) + assert eval['trackEvents'] == True + +def test_rulematch_track_event_false_with_experiment(): + flag = make_basic_flag_with_rules('rulematch', False) + detail = EvaluationDetail('b', 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0, 'inExperiment': True}) + + eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) + assert eval['trackEvents'] == True diff --git a/testing/test_flag.py b/testing/test_flag.py index 6b50b55a..c0d61707 100644 --- a/testing/test_flag.py +++ b/testing/test_flag.py @@ -391,7 +391,7 @@ def test_variation_index_is_returned_for_bucket(): # First verify that with our test inputs, the bucket value will be greater than zero and less than 100000, # so we can construct a rollout whose second bucket just barely contains that value - bucket_value = math.trunc(_bucket_user(user, flag['key'], flag['salt'], 'key') * 100000) + bucket_value = math.trunc(_bucket_user(None, user, flag['key'], flag['salt'], 'key') * 100000) assert bucket_value > 0 and bucket_value < 100000 bad_variation_a = 0 @@ -407,14 +407,14 @@ def test_variation_index_is_returned_for_bucket(): } } result_variation = _variation_index_for_user(flag, rule, user) - assert result_variation == matched_variation + assert result_variation == (matched_variation, False) def test_last_bucket_is_used_if_bucket_value_equals_total_weight(): user = { 'key': 'userkey' } flag = { 'key': 'flagkey', 'salt': 'salt' } # We'll construct a list of variations that stops right at the target bucket value - bucket_value = math.trunc(_bucket_user(user, flag['key'], flag['salt'], 'key') * 100000) + bucket_value = math.trunc(_bucket_user(None, user, flag['key'], flag['salt'], 'key') * 100000) rule = { 'rollout': { @@ -424,21 +424,35 @@ def test_last_bucket_is_used_if_bucket_value_equals_total_weight(): } } result_variation = _variation_index_for_user(flag, rule, user) - assert result_variation == 0 + assert result_variation == (0, False) def test_bucket_by_user_key(): user = { u'key': u'userKeyA' } - bucket = _bucket_user(user, 'hashKey', 'saltyA', 'key') + bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'key') assert bucket == pytest.approx(0.42157587) user = { u'key': u'userKeyB' } - bucket = _bucket_user(user, 'hashKey', 'saltyA', 'key') + bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'key') assert bucket == pytest.approx(0.6708485) user = { u'key': u'userKeyC' } - bucket = _bucket_user(user, 'hashKey', 'saltyA', 'key') + bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'key') assert bucket == pytest.approx(0.10343106) +def test_bucket_by_user_key_with_seed(): + seed = 61 + user = { u'key': u'userKeyA' } + point = _bucket_user(seed, user, 'hashKey', 'saltyA', 'key') + assert point == pytest.approx(0.09801207) + + user = { u'key': u'userKeyB' } + point = _bucket_user(seed, user, 'hashKey', 'saltyA', 'key') + assert point == pytest.approx(0.14483777) + + user = { u'key': u'userKeyC' } + point = _bucket_user(seed, user, 'hashKey', 'saltyA', 'key') + assert point == pytest.approx(0.9242641) + def test_bucket_by_int_attr(): user = { u'key': u'userKey', @@ -447,9 +461,9 @@ def test_bucket_by_int_attr(): u'stringAttr': u'33333' } } - bucket = _bucket_user(user, 'hashKey', 'saltyA', 'intAttr') + bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'intAttr') assert bucket == pytest.approx(0.54771423) - bucket2 = _bucket_user(user, 'hashKey', 'saltyA', 'stringAttr') + bucket2 = _bucket_user(None, user, 'hashKey', 'saltyA', 'stringAttr') assert bucket2 == bucket def test_bucket_by_float_attr_not_allowed(): @@ -459,5 +473,24 @@ def test_bucket_by_float_attr_not_allowed(): u'floatAttr': 33.5 } } - bucket = _bucket_user(user, 'hashKey', 'saltyA', 'floatAttr') + bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'floatAttr') assert bucket == 0.0 + +def test_seed_independent_of_salt_and_hashKey(): + seed = 61 + user = { u'key': u'userKeyA' } + point1 = _bucket_user(seed, user, 'hashKey', 'saltyA', 'key') + point2 = _bucket_user(seed, user, 'hashKey', 'saltyB', 'key') + point3 = _bucket_user(seed, user, 'hashKey2', 'saltyA', 'key') + + assert point1 == point2 + assert point2 == point3 + +def test_seed_changes_hash_evaluation(): + seed1 = 61 + user = { u'key': u'userKeyA' } + point1 = _bucket_user(seed1, user, 'hashKey', 'saltyA', 'key') + seed2 = 62 + point2 = _bucket_user(seed2, user, 'hashKey', 'saltyB', 'key') + + assert point1 != point2 \ No newline at end of file From 31c1c217ee8ee1f9c4ad0cb3f48b2360961c3666 Mon Sep 17 00:00:00 2001 From: "Robert J. Neal" Date: Thu, 29 Apr 2021 14:37:09 -0700 Subject: [PATCH 216/356] fix unit test --- ldclient/flag.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/flag.py b/ldclient/flag.py index 7bb23fcc..a8491b2e 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -305,7 +305,7 @@ def _segment_rule_matches_user(rule, user, segment_key, salt): # All of the clauses are met. See if the user buckets in bucket_by = 'key' if rule.get('bucketBy') is None else rule['bucketBy'] - bucket = _bucket_user(user, segment_key, salt, bucket_by) + bucket = _bucket_user(None, user, segment_key, salt, bucket_by) weight = rule['weight'] / 100000.0 return bucket < weight From f52ab393bad63fe4b8c85ea4188292e33556d0e6 Mon Sep 17 00:00:00 2001 From: "Robert J. Neal" Date: Thu, 6 May 2021 16:30:50 -0700 Subject: [PATCH 217/356] address PR comments --- ldclient/flag.py | 11 ++++++----- ldclient/impl/event_factory.py | 3 +-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ldclient/flag.py b/ldclient/flag.py index a8491b2e..ed2583ce 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -205,12 +205,12 @@ def _variation_index_for_user(feature, rule, user): if rollout.get('bucketBy') is not None: bucket_by = rollout['bucketBy'] bucket = _bucket_user(seed, user, feature['key'], feature['salt'], bucket_by) - is_experiment = rollout.get('kind') is not None and rollout['kind'] == 'experiment' + is_experiment = rollout.get('kind') == 'experiment' sum = 0.0 for wv in variations: sum += wv.get('weight', 0.0) / 100000.0 if bucket < sum: - is_experiment_partition = is_experiment and wv.get('untracked') is not None and not wv['untracked'] + is_experiment_partition = is_experiment and not wv.get('untracked') return (wv.get('variation'), is_experiment_partition) # The user's bucket value was greater than or equal to the end of the last bucket. This could happen due @@ -218,7 +218,7 @@ def _variation_index_for_user(feature, rule, user): # data could contain buckets that don't actually add up to 100000. Rather than returning an error in # this case (or changing the scaling, which would potentially change the results for *all* users), we # will simply put the user in the last bucket. - is_experiment_partition = is_experiment and variations[-1].get('untracked') is not None and not variations[-1]['untracked'] + is_experiment_partition = is_experiment and not variations[-1].get('untracked') return (variations[-1].get('variation'), is_experiment_partition) return (None, False) @@ -235,9 +235,10 @@ def _bucket_user(seed, user, key, salt, bucket_by): if user.get('secondary') is not None: id_hash = id_hash + '.' + user['secondary'] - prefix = '%s.%s' % (key, salt) - if (seed is not None): + if seed is not None: prefix = str(seed) + else: + prefix = '%s.%s' % (key, salt) hash_key = '%s.%s' % (prefix, id_hash) hash_val = int(hashlib.sha1(hash_key.encode('utf-8')).hexdigest()[:15], 16) diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index 81d99ab9..062c9d02 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -106,8 +106,7 @@ def _user_to_context_kind(self, user): def _is_experiment(self, flag, reason): if reason is not None: - inExperiment = reason.get('inExperiment') - if inExperiment is not None and inExperiment: + if reason.get('inExperiment'): return True kind = reason['kind'] if kind == 'RULE_MATCH': From 527f366e915e891bf7f9ee7a1584f26bd33f5a35 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 20 Sep 2021 18:43:01 -0700 Subject: [PATCH 218/356] use Releaser v2 config --- .ldrelease/config.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.ldrelease/config.yml b/.ldrelease/config.yml index 5615e7d2..b7db59ad 100644 --- a/.ldrelease/config.yml +++ b/.ldrelease/config.yml @@ -1,3 +1,5 @@ +version: 2 + repo: public: python-server-sdk private: python-server-sdk-private @@ -8,15 +10,17 @@ publications: - url: https://launchdarkly-python-sdk.readthedocs.io/en/latest/ description: documentation (readthedocs.io) -releasableBranches: +branches: - name: master description: 7.x - name: 6.x -template: - name: python - env: - LD_SKIP_DATABASE_TESTS: 1 +jobs: + - docker: {} + template: + name: python + env: + LD_SKIP_DATABASE_TESTS: 1 sdk: displayName: "Python" From d2528ed55ea9cb9b2a0cf914da6161eda8bf26f3 Mon Sep 17 00:00:00 2001 From: Ben Woskow <48036130+bwoskow-ld@users.noreply.github.com> Date: Wed, 22 Sep 2021 11:45:52 -0700 Subject: [PATCH 219/356] Use newer docker images (#147) --- .circleci/config.yml | 20 +++++++++----------- test-filesource-optional-requirements.txt | 2 +- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 4153459f..dd40ce24 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -8,19 +8,19 @@ workflows: jobs: - test-linux: name: Python 3.5 - docker-image: circleci/python:3.5-jessie + docker-image: cimg/python:3.5 - test-linux: name: Python 3.6 - docker-image: circleci/python:3.6-jessie + docker-image: cimg/python:3.6 - test-linux: name: Python 3.7 - docker-image: circleci/python:3.7-stretch + docker-image: cimg/python:3.7 - test-linux: name: Python 3.8 - docker-image: circleci/python:3.8-buster + docker-image: cimg/python:3.8 - test-linux: name: Python 3.9 - docker-image: circleci/python:3.9-rc-buster + docker-image: cimg/python:3.9 - test-windows: name: Windows Python 3 py3: true @@ -49,12 +49,10 @@ jobs: - run: name: install requirements command: | - sudo pip install --upgrade pip; - sudo pip install 'virtualenv~=16.0'; - sudo pip install -r test-requirements.txt; - sudo pip install -r test-filesource-optional-requirements.txt; - sudo pip install -r consul-requirements.txt; - sudo python setup.py install; + pip install -r test-requirements.txt; + pip install -r test-filesource-optional-requirements.txt; + pip install -r consul-requirements.txt; + python setup.py install; pip freeze - when: condition: <> diff --git a/test-filesource-optional-requirements.txt b/test-filesource-optional-requirements.txt index 3cfa747b..38bdc65b 100644 --- a/test-filesource-optional-requirements.txt +++ b/test-filesource-optional-requirements.txt @@ -1,2 +1,2 @@ pyyaml>=3.0,<5.2 -watchdog>=0.9,<1.0 +watchdog>=0.9,<1.0,!=0.10.5 From 666e5f0b5a3885ff40aab568c90008bb947a2e43 Mon Sep 17 00:00:00 2001 From: Ember Stevens Date: Fri, 24 Sep 2021 14:51:34 -0700 Subject: [PATCH 220/356] Updates docs URLs --- CHANGELOG.md | 6 +++--- CONTRIBUTING.md | 2 +- README.md | 6 +++--- docs/index.rst | 2 +- ldclient/client.py | 2 +- ldclient/flags_state.py | 2 +- ldclient/integrations.py | 10 +++++----- ldclient/util.py | 2 +- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b43cb59..f24c6b9c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -168,11 +168,11 @@ Note that starting with this release, generated API documentation is available o ## [6.8.0] - 2019-01-31 ### Added: -- It is now possible to use Consul as a persistent feature store, similar to the existing Redis and DynamoDB integrations. See `Consul` in `ldclient.integrations`, and the reference guide for ["Using a persistent feature store"](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store). +- It is now possible to use Consul as a persistent feature store, similar to the existing Redis and DynamoDB integrations. See `Consul` in `ldclient.integrations`, and the reference guide for ["Storing data"](https://docs.launchdarkly.com/sdk/features/storing-data#python). ## [6.7.0] - 2019-01-15 ### Added: -- It is now possible to use DynamoDB as a persistent feature store, similar to the existing Redis integration. See `DynamoDB` in `ldclient.integrations`, and the reference guide to ["Using a persistent feature store"](https://docs.launchdarkly.com/v2.0/docs/using-a-persistent-feature-store). +- It is now possible to use DynamoDB as a persistent feature store, similar to the existing Redis integration. See `DynamoDB` in `ldclient.integrations`, and the reference guide to ["Storing data"](https://docs.launchdarkly.com/sdk/features/storing-data#python). - The new class `CacheConfig` (in `ldclient.feature_store`) encapsulates all the parameters that control local caching in database feature stores. This takes the place of the `expiration` and `capacity` parameters that are in the deprecated `RedisFeatureStore` constructor; it can be used with DynamoDB and any other database integrations in the future, and if more caching options are added to `CacheConfig` they will be automatically supported in all of the feature stores. ### Deprecated: @@ -261,7 +261,7 @@ _This release was broken and has been removed._ ## [6.0.0] - 2018-05-10 ### Changed: -- To reduce the network bandwidth used for analytics events, feature request events are now sent as counters rather than individual events, and user details are now sent only at intervals rather than in each event. These behaviors can be modified through the LaunchDarkly UI and with the new configuration option `inline_users_in_events`. For more details, see [Analytics Data Stream Reference](https://docs.launchdarkly.com/v2.0/docs/analytics-data-stream-reference). +- To reduce the network bandwidth used for analytics events, feature request events are now sent as counters rather than individual events, and user details are now sent only at intervals rather than in each event. These behaviors can be modified through the LaunchDarkly UI and with the new configuration option `inline_users_in_events`. - The analytics event processor now flushes events at a configurable interval defaulting to 5 seconds, like the other SDKs (previously it flushed if no events had been posted for 5 seconds, or if events exceeded a configurable number). This interval is set by the new `Config` property `flush_interval`. ### Removed: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7d2a9b8a..32425905 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing to the LaunchDarkly Server-side SDK for Python -LaunchDarkly has published an [SDK contributor's guide](https://docs.launchdarkly.com/docs/sdk-contributors-guide) that provides a detailed explanation of how our SDKs work. See below for additional information on how to contribute to this SDK. +LaunchDarkly has published an [SDK contributor's guide](https://docs.launchdarkly.com/sdk/concepts/contributors-guide) that provides a detailed explanation of how our SDKs work. See below for additional information on how to contribute to this SDK. ## Submitting bug reports and feature requests diff --git a/README.md b/README.md index 5782eff1..d016113d 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ ## LaunchDarkly overview -[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves over 100 billion feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/docs/getting-started) using LaunchDarkly today! +[LaunchDarkly](https://www.launchdarkly.com) is a feature management platform that serves over 100 billion feature flags daily to help teams build better software, faster. [Get started](https://docs.launchdarkly.com/home/getting-started) using LaunchDarkly today! [![Twitter Follow](https://img.shields.io/twitter/follow/launchdarkly.svg?style=social&label=Follow&maxAge=2592000)](https://twitter.com/intent/follow?screen_name=launchdarkly) @@ -17,7 +17,7 @@ This version of the LaunchDarkly SDK is compatible with Python 3.5 through 3.9. ## Getting started -Refer to the [SDK reference guide](https://docs.launchdarkly.com/docs/python-sdk-reference) for instructions on getting started with using the SDK. +Refer to the [SDK reference guide](https://docs.launchdarkly.com/sdk/server-side/python) for instructions on getting started with using the SDK. ## Learn more @@ -40,7 +40,7 @@ We encourage pull requests and other contributions from the community. Check out * Gradually roll out a feature to an increasing percentage of users, and track the effect that the feature has on key metrics (for instance, how likely is a user to complete a purchase if they have feature A versus feature B?). * Turn off a feature that you realize is causing performance problems in production, without needing to re-deploy, or even restart the application with a changed configuration file. * Grant access to certain features based on user attributes, like payment plan (eg: users on the ‘gold’ plan get access to more features than users in the ‘silver’ plan). Disable parts of your application to facilitate maintenance, without taking everything offline. -* LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Check out [our documentation](https://docs.launchdarkly.com/docs) for a complete list. +* LaunchDarkly provides feature flag SDKs for a wide variety of languages and technologies. Read [our documentation](https://docs.launchdarkly.com/sdk) for a complete list. * Explore LaunchDarkly * [launchdarkly.com](https://www.launchdarkly.com/ "LaunchDarkly Main Website") for more information * [docs.launchdarkly.com](https://docs.launchdarkly.com/ "LaunchDarkly Documentation") for our documentation and SDK reference guides diff --git a/docs/index.rst b/docs/index.rst index 1be4daca..12e66506 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -10,7 +10,7 @@ This is the API reference for the `LaunchDarkly `_ SD The latest version of the SDK can be found on `PyPI `_, and the source code is on `GitHub `_. -For more information, see LaunchDarkly's `Quickstart `_ and `SDK Reference Guide `_. +For more information, see LaunchDarkly's `Quickstart `_ and `SDK Reference Guide `_. .. toctree:: :maxdepth: 2 diff --git a/ldclient/client.py b/ldclient/client.py index d401df39..330e0f29 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -331,7 +331,7 @@ def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: """Returns an object that encapsulates the state of all feature flags for a given user, including the flag values and also metadata that can be used on the front end. See the JavaScript SDK Reference Guide on - `Bootstrapping `_. + `Bootstrapping `_. This method does not send analytics events back to LaunchDarkly. diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index 547a5d16..0bb0dbd0 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -12,7 +12,7 @@ class FeatureFlagsState: calling the :func:`ldclient.client.LDClient.all_flags_state()` method. Serializing this object to JSON, using the :func:`to_json_dict` method or ``jsonpickle``, will produce the appropriate data structure for bootstrapping the LaunchDarkly JavaScript client. See the - JavaScript SDK Reference Guide on `Bootstrapping `_. + JavaScript SDK Reference Guide on `Bootstrapping `_. """ def __init__(self, valid: bool): self.__flag_values = {} # type: Dict[str, Any] diff --git a/ldclient/integrations.py b/ldclient/integrations.py index e0f0050c..550f0177 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -27,7 +27,7 @@ def new_feature_store(host: str=None, caching: CacheConfig=CacheConfig.default()) -> CachingStoreWrapper: """Creates a Consul-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the - `SDK reference guide `_. + `SDK reference guide `_. To use this method, you must first install the ``python-consul`` package. Then, put the object returned by this method into the ``feature_store`` property of your client configuration @@ -65,7 +65,7 @@ def new_feature_store(table_name: str, caching: CacheConfig=CacheConfig.default()) -> CachingStoreWrapper: """Creates a DynamoDB-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the - `SDK reference guide `_. + `SDK reference guide `_. To use this method, you must first install the ``boto3`` package containing the AWS SDK gems. Then, put the object returned by this method into the ``feature_store`` property of your @@ -110,7 +110,7 @@ def new_feature_store(url: str='redis://localhost:6379/0', caching: CacheConfig=CacheConfig.default()) -> CachingStoreWrapper: """Creates a Redis-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the - `SDK reference guide `_. + `SDK reference guide `_. To use this method, you must first install the ``redis`` package. Then, put the object returned by this method into the ``feature_store`` property of your client configuration @@ -161,8 +161,8 @@ def new_data_source(paths: List[str], client may still make network connections to send analytics events, unless you have disabled this in your configuration with ``send_events`` or ``offline``. - The format of the data files is described in the SDK Reference Guide on - `Reading flags from a file `_. + The format of the data files is described in the SDK Reference Guide on + `Reading flags from a file `_. Note that in order to use YAML, you will need to install the ``pyyaml`` package. If the data source encounters any error in any file-- malformed content, a missing file, or a diff --git a/ldclient/util.py b/ldclient/util.py index 2479fe67..66c0c70b 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -43,7 +43,7 @@ def check_uwsgi(): if uwsgi.opt.get('threads') is not None and int(uwsgi.opt.get('threads')) > 1: return log.error("The LaunchDarkly client requires the 'enable-threads' or 'threads' option be passed to uWSGI. " - 'To learn more, see https://docs.launchdarkly.com/sdk/server-side/python#configuring-uwsgi') + 'To learn more, read https://docs.launchdarkly.com/sdk/server-side/python#configuring-uwsgi') class Event: From 3286623416fb7315695c9a09727ba1468f86789c Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Wed, 6 Oct 2021 17:12:12 -0400 Subject: [PATCH 221/356] Add support for 3.10 (#150) --- .circleci/config.yml | 3 +++ README.md | 2 +- setup.py | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index dd40ce24..8e7b5e85 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -21,6 +21,9 @@ workflows: - test-linux: name: Python 3.9 docker-image: cimg/python:3.9 + - test-linux: + name: Python 3.10 + docker-image: cimg/python:3.10 - test-windows: name: Windows Python 3 py3: true diff --git a/README.md b/README.md index d016113d..8ea3a283 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ ## Supported Python versions -This version of the LaunchDarkly SDK is compatible with Python 3.5 through 3.9. It is tested with the most recent patch releases of those versions. Python versions 2.7 to 3.4 are no longer supported. +This version of the LaunchDarkly SDK is compatible with Python 3.5 through 3.10. It is tested with the most recent patch releases of those versions. Python versions 2.7 to 3.4 are no longer supported. ## Getting started diff --git a/setup.py b/setup.py index 18ccade9..cf3312f8 100644 --- a/setup.py +++ b/setup.py @@ -66,6 +66,9 @@ def run(self): 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', ], From 3a64bf6cda426ee3e526ccbef1552675d6dc6e6b Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Mon, 22 Nov 2021 23:29:10 +0000 Subject: [PATCH 222/356] started work on FlagBuilder in as part of test data source implementation --- .../impl/integrations/test_data/__init__.py | 0 .../test_data/test_data_source.py | 60 +++++++++++++++++++ testing/test_test_data_source.py | 59 ++++++++++++++++++ 3 files changed, 119 insertions(+) create mode 100644 ldclient/impl/integrations/test_data/__init__.py create mode 100644 ldclient/impl/integrations/test_data/test_data_source.py create mode 100644 testing/test_test_data_source.py diff --git a/ldclient/impl/integrations/test_data/__init__.py b/ldclient/impl/integrations/test_data/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py new file mode 100644 index 00000000..89aa915b --- /dev/null +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -0,0 +1,60 @@ + +TRUE_VARIATION_INDEX = 0 +FALSE_VARIATION_INDEX = 1 + +def variation_for_boolean(variation): + if variation: + return TRUE_VARIATION_INDEX + else: + return FALSE_VARIATION_INDEX + +class _FlagBuilder(): + def __init__(self, key): + self._key = key + self._on = True + # TODO set up deep copy + self._variations = [] + + def on(self, aBool): + self._on = aBool + return self + + def fallthrough_variation(self, variation): + if isinstance(variation, bool): + self._boolean_flag(self)._fallthrough_variation = variation + return self + else: + self._fallthrough_variation = variation + return self + + def off_variation(self, variation) : + if isinstance(variation, bool): + self._boolean_flag(self)._off_variation = variation + return self + else: + self._off_variation = variation + return self + + def boolean_flag(self): + if self._is_boolean_flag(): + return self + else: + return (self.variations(True, False) + .fallthrough_variation(TRUE_VARIATION_INDEX) + .off_variation(FALSE_VARIATION_INDEX)) + + def _is_boolean_flag(self): + return (len(self._variations) == 2 + and self._variations[TRUE_VARIATION_INDEX] == True + and self._variations[FALSE_VARIATION_INDEX] == False) + + def variations(self, *variations): + self._variations = variations + return self + + + def variation_for_all_users(self, variation): + if isinstance(variation, bool): + return self.boolean_flag().variation_for_all_users(variation_for_boolean(variation)) + else: + return self.on(True).fallthrough_variation(variation) diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py new file mode 100644 index 00000000..b42de1f8 --- /dev/null +++ b/testing/test_test_data_source.py @@ -0,0 +1,59 @@ +import json +import os +import pytest +import threading +import time + +from ldclient.client import LDClient +from ldclient.config import Config +from ldclient.feature_store import InMemoryFeatureStore +from ldclient.versioned_data_kind import FEATURES, SEGMENTS + +#from ldclient.integrations import TestData +from ldclient.impl.integrations.test_data.test_data_source import _FlagBuilder + + + +data_source = None +store = None +ready = None + + +def setup_function(): + print("Setup") + +def teardown_function(): + print("Teardown") + +def test_makes_flag_builder(): + flagBuilder = _FlagBuilder('test-flag') + assert flagBuilder is not None + assert flagBuilder._key is 'test-flag' + assert flagBuilder._on is True + assert flagBuilder._variations == [] + +def test_can_turn_flag_off(): + flagBuilder = _FlagBuilder('test-flag') + flagBuilder.on(False) + assert flagBuilder._on is False + +def test_can_set_fallthrough_variation(): + flagBuilder = _FlagBuilder('test-flag') + flagBuilder.fallthrough_variation(2) + assert flagBuilder._fallthrough_variation == 2 + +def test_can_set_off_variation(): + flagBuilder = _FlagBuilder('test-flag') + flagBuilder.off_variation(2) + assert flagBuilder._off_variation == 2 + +def test_can_make_boolean_flag(): + flagBuilder = _FlagBuilder('boolean-flag').boolean_flag() + assert flagBuilder._is_boolean_flag() == True + assert flagBuilder._fallthrough_variation == 0 + assert flagBuilder._off_variation == 1 + +def test_can_set_variation_for_all_users(): + flagBuilder = _FlagBuilder('test-flag') + flagBuilder.variation_for_all_users(True) + assert flagBuilder._fallthrough_variation == 0 From dd7561e5dd14df970fb65d1aa9a86412a093b4e9 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Tue, 23 Nov 2021 22:46:20 +0000 Subject: [PATCH 223/356] finished FlagBuilder implementation and added FlagRuleBuilder implementation --- .../test_data/test_data_source.py | 165 +++++++++++++++++- testing/test_test_data_source.py | 68 +++++++- 2 files changed, 226 insertions(+), 7 deletions(-) diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py index 89aa915b..d0f500fa 100644 --- a/ldclient/impl/integrations/test_data/test_data_source.py +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -1,3 +1,4 @@ +import copy TRUE_VARIATION_INDEX = 0 FALSE_VARIATION_INDEX = 1 @@ -12,9 +13,37 @@ class _FlagBuilder(): def __init__(self, key): self._key = key self._on = True - # TODO set up deep copy self._variations = [] + def copy(self): + to = _FlagBuilder(self._key) + + to._on = self._on + to._variations = copy.copy(self._variations) + + try: + to._off_variation = self._off_variation + except: + pass + + try: + to._fallthrough_variation = self._fallthrough_variation + except: + pass + + try: + to._targets = copy.copy(self._targets) + except: + pass + + try: + to._rules = copy.copy(self._rules) + except: + pass + + return to + + def on(self, aBool): self._on = aBool return self @@ -49,7 +78,8 @@ def _is_boolean_flag(self): and self._variations[FALSE_VARIATION_INDEX] == False) def variations(self, *variations): - self._variations = variations + self._variations = list(variations) + return self @@ -58,3 +88,134 @@ def variation_for_all_users(self, variation): return self.boolean_flag().variation_for_all_users(variation_for_boolean(variation)) else: return self.on(True).fallthrough_variation(variation) + + def variation_for_user(self, user_key, variation): + if isinstance(variation, bool): + return self.boolean_flag().variation_for_user(user_key, variation_for_boolean(variation)) + else: + # `variation` specifies the index of the variation to set + targets = {} + try: + targets = self._targets + except: + self._targets = {} + + for idx, var in enumerate(self._variations): + if (idx == variation): + # If there is no set at the current variation, set it to be empty + target_for_variation = [] + if idx in targets: + target_for_variation = targets[idx] + + # If user is not in the current variation set, add them + if user_key not in target_for_variation: + target_for_variation.append(user_key) + + self._targets[idx] = target_for_variation + + else: + # Remove user from the other variation set if necessary + if idx in targets: + target_for_variation = targets[idx] + if user_key in target_for_variation: + user_key_idx = target_for_variation.index(user_key) + del target_for_variation[user_key_idx] + + self._targets[idx] = target_for_variation + + return self + + def add_rule(self, flag_rule_builder): + try: + len(self._rules) >= 0 + except: + self._rules = [] + + self._rules.append(flag_rule_builder) + + def if_match(self, attribute, *values): + flag_rule_builder = _FlagRuleBuilder(self) + return flag_rule_builder.and_match(attribute, values) + + def if_not_match(self, attribute, *values): + flag_rule_builder = _FlagRuleBuilder(self) + return flag_rule_builder.and_not_match(attribute, values) + + def build(self, version): + base_flag_object = { + 'key': self._key, + 'version': version, + 'on': self._on, + 'variations': self._variations + } + + try: + base_flag_object['off_variation'] = self._off_variation + except: + pass + + try: + base_flag_object['fallthrough_variation'] = self._fallthrough_variation + except: + pass + + try: + targets = [] + for var_index, user_keys in self._targets.items(): + targets.append({ + 'variation': var_index, + 'values': user_keys + }) + base_flag_object['targets'] = targets + except: + pass + + try: + base_flag_object['rules'] = [] + for idx, rule in enumerate(self._rules): + base_flag_object['rules'].append(rule.build(idx)) + except: + pass + + return base_flag_object + + +class _FlagRuleBuilder(): + def __init__(self, flag_builder): + self._flag_builder = flag_builder + self._clauses = [] + self._variation = None + + def and_match(self, attribute, *values): + self._clauses.append({ + 'attribute': attribute, + 'operator': 'in', + 'values': list(values), + 'negate': False + }) + return self + + def and_not_match(self, attribute, *values): + self._clauses.append({ + 'attribute': attribute, + 'operator': 'in', + 'values': list(values), + 'negate': True + }) + return self + + def then_return(self, variation): + if isinstance(variation, bool): + self._flag_builder.boolean_flag() + return self.then_return(variation_for_boolean(variation)) + else: + self._variation = variation + self._flag_builder.add_rule(self) + return self._flag_builder + + def build(self, id): + return { + 'id': 'rule' + str(id), + 'variation': self._variation, + 'clauses': self._clauses + } diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index b42de1f8..30607b5b 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -11,6 +11,7 @@ #from ldclient.integrations import TestData from ldclient.impl.integrations.test_data.test_data_source import _FlagBuilder +from ldclient.impl.integrations.test_data.test_data_source import _FlagRuleBuilder @@ -32,28 +33,85 @@ def test_makes_flag_builder(): assert flagBuilder._on is True assert flagBuilder._variations == [] -def test_can_turn_flag_off(): +def test_flagbuilder_can_turn_flag_off(): flagBuilder = _FlagBuilder('test-flag') flagBuilder.on(False) assert flagBuilder._on is False -def test_can_set_fallthrough_variation(): +def test_flagbuilder_can_set_fallthrough_variation(): flagBuilder = _FlagBuilder('test-flag') flagBuilder.fallthrough_variation(2) assert flagBuilder._fallthrough_variation == 2 -def test_can_set_off_variation(): +def test_flagbuilder_can_set_off_variation(): flagBuilder = _FlagBuilder('test-flag') flagBuilder.off_variation(2) assert flagBuilder._off_variation == 2 -def test_can_make_boolean_flag(): +def test_flagbuilder_can_make_boolean_flag(): flagBuilder = _FlagBuilder('boolean-flag').boolean_flag() assert flagBuilder._is_boolean_flag() == True assert flagBuilder._fallthrough_variation == 0 assert flagBuilder._off_variation == 1 -def test_can_set_variation_for_all_users(): +def test_flagbuilder_can_set_variation_for_all_users(): flagBuilder = _FlagBuilder('test-flag') flagBuilder.variation_for_all_users(True) assert flagBuilder._fallthrough_variation == 0 + +def test_flagbuilder_can_set_variations(): + flagBuilder = _FlagBuilder('test-flag') + flagBuilder.variations(2,3,4,5) + assert flagBuilder._variations == [2,3,4,5] + +def test_flagbuilder_can_copy(): + flagBuilder = _FlagBuilder('test-flag') + flagBuilder.variations(1,2) + flagBuilderCopy = flagBuilder.copy() + flagBuilder.variations(3,4) + assert flagBuilderCopy._variations == [1,2] + +def test_flagbuilder_can_set_boolean_variation_for_user(): + flagBuilder = _FlagBuilder('user-variation-flag') + flagBuilder.variation_for_user('christian', False) + assert flagBuilder._targets == {1: ['christian']} + +def test_flagbuilder_can_set_numerical_variation_for_user(): + flagBuilder = _FlagBuilder('user-variation-flag') + flagBuilder.variations('a','b','c') + flagBuilder.variation_for_user('christian', 2) + expected_targets = [ + { + 'variation': 2, + 'values': ['christian'] + } + ] + assert flagBuilder.build(1)['targets'] == expected_targets + +def test_flagbuilder_can_build(): + flagBuilder = _FlagBuilder('some-flag') + flagRuleBuilder = _FlagRuleBuilder(flagBuilder) + flagRuleBuilder.and_match('country', 'fr').then_return(True) + expected_result = { + 'fallthrough_variation': 0, + 'key': 'some-flag', + 'off_variation': 1, + 'on': True, + 'variations': [True, False], + 'rules': [ + { + 'clauses': [ + {'attribute': 'country', + 'negate': False, + 'operator': 'in', + 'values': ['fr'] + } + ], + 'id': 'rule0', + 'variation': 0 + } + ], + 'version': 1, + } + + assert flagBuilder.build(1) == expected_result From b3bd9b4f74ad5150622a84d6812308c9e775cc5e Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Wed, 24 Nov 2021 18:40:36 +0000 Subject: [PATCH 224/356] added initial TestData interface and updated tests to not rely on test data internals --- .../test_data/test_data_source.py | 16 ++- testing/test_test_data_source.py | 112 +++++++++--------- 2 files changed, 72 insertions(+), 56 deletions(-) diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py index d0f500fa..f49a3fbd 100644 --- a/ldclient/impl/integrations/test_data/test_data_source.py +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -9,6 +9,11 @@ def variation_for_boolean(variation): else: return FALSE_VARIATION_INDEX +class TestData(): + + def flag(key): + return _FlagBuilder(key) + class _FlagBuilder(): def __init__(self, key): self._key = key @@ -65,14 +70,14 @@ def off_variation(self, variation) : return self def boolean_flag(self): - if self._is_boolean_flag(): + if self.is_boolean_flag(): return self else: return (self.variations(True, False) .fallthrough_variation(TRUE_VARIATION_INDEX) .off_variation(FALSE_VARIATION_INDEX)) - def _is_boolean_flag(self): + def is_boolean_flag(self): return (len(self._variations) == 2 and self._variations[TRUE_VARIATION_INDEX] == True and self._variations[FALSE_VARIATION_INDEX] == False) @@ -135,12 +140,17 @@ def add_rule(self, flag_rule_builder): def if_match(self, attribute, *values): flag_rule_builder = _FlagRuleBuilder(self) - return flag_rule_builder.and_match(attribute, values) + return flag_rule_builder.and_match(attribute, *values) def if_not_match(self, attribute, *values): flag_rule_builder = _FlagRuleBuilder(self) return flag_rule_builder.and_not_match(attribute, values) + def clear_rules(self): + del self._rules + return self + + def build(self, version): base_flag_object = { 'key': self._key, diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index 30607b5b..8c29e11f 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -1,8 +1,4 @@ -import json -import os import pytest -import threading -import time from ldclient.client import LDClient from ldclient.config import Config @@ -10,88 +6,98 @@ from ldclient.versioned_data_kind import FEATURES, SEGMENTS #from ldclient.integrations import TestData -from ldclient.impl.integrations.test_data.test_data_source import _FlagBuilder -from ldclient.impl.integrations.test_data.test_data_source import _FlagRuleBuilder +from ldclient.impl.integrations.test_data.test_data_source import TestData -data_source = None -store = None -ready = None - - def setup_function(): print("Setup") def teardown_function(): print("Teardown") -def test_makes_flag_builder(): - flagBuilder = _FlagBuilder('test-flag') - assert flagBuilder is not None - assert flagBuilder._key is 'test-flag' - assert flagBuilder._on is True - assert flagBuilder._variations == [] +def test_makes_flag(): + flag = TestData.flag('test-flag') + assert flag is not None + + builtFlag = flag.build(0) + assert builtFlag['key'] is 'test-flag' + assert builtFlag['on'] is True + assert builtFlag['variations'] == [] def test_flagbuilder_can_turn_flag_off(): - flagBuilder = _FlagBuilder('test-flag') - flagBuilder.on(False) - assert flagBuilder._on is False + flag = TestData.flag('test-flag') + flag.on(False) + + assert flag.build(0)['on'] is False def test_flagbuilder_can_set_fallthrough_variation(): - flagBuilder = _FlagBuilder('test-flag') - flagBuilder.fallthrough_variation(2) - assert flagBuilder._fallthrough_variation == 2 + flag = TestData.flag('test-flag') + flag.fallthrough_variation(2) + + assert flag.build(0)['fallthrough_variation'] == 2 def test_flagbuilder_can_set_off_variation(): - flagBuilder = _FlagBuilder('test-flag') - flagBuilder.off_variation(2) - assert flagBuilder._off_variation == 2 + flag = TestData.flag('test-flag') + flag.off_variation(2) + + assert flag.build(0)['off_variation'] == 2 def test_flagbuilder_can_make_boolean_flag(): - flagBuilder = _FlagBuilder('boolean-flag').boolean_flag() - assert flagBuilder._is_boolean_flag() == True - assert flagBuilder._fallthrough_variation == 0 - assert flagBuilder._off_variation == 1 + flag = TestData.flag('boolean-flag').boolean_flag() + + assert flag.is_boolean_flag() == True + + builtFlag = flag.build(0) + assert builtFlag['fallthrough_variation'] == 0 + assert builtFlag['off_variation'] == 1 def test_flagbuilder_can_set_variation_for_all_users(): - flagBuilder = _FlagBuilder('test-flag') - flagBuilder.variation_for_all_users(True) - assert flagBuilder._fallthrough_variation == 0 + flag = TestData.flag('test-flag') + flag.variation_for_all_users(True) + assert flag.build(0)['fallthrough_variation'] == 0 def test_flagbuilder_can_set_variations(): - flagBuilder = _FlagBuilder('test-flag') - flagBuilder.variations(2,3,4,5) - assert flagBuilder._variations == [2,3,4,5] + flag = TestData.flag('test-flag') + flag.variations(2,3,4,5) + assert flag.build(0)['variations'] == [2,3,4,5] + +def test_flagbuilder_can_safely_copy(): + flag = TestData.flag('test-flag') + flag.variations(1,2) + copy_of_flag = flag.copy() + flag.variations(3,4) + assert copy_of_flag.build(0)['variations'] == [1,2] -def test_flagbuilder_can_copy(): - flagBuilder = _FlagBuilder('test-flag') - flagBuilder.variations(1,2) - flagBuilderCopy = flagBuilder.copy() - flagBuilder.variations(3,4) - assert flagBuilderCopy._variations == [1,2] + copy_of_flag.variations(5,6) + assert flag.build(0)['variations'] == [3,4] def test_flagbuilder_can_set_boolean_variation_for_user(): - flagBuilder = _FlagBuilder('user-variation-flag') - flagBuilder.variation_for_user('christian', False) - assert flagBuilder._targets == {1: ['christian']} + flag = TestData.flag('user-variation-flag') + flag.variation_for_user('christian', False) + expected_targets = [ + { + 'variation': 1, + 'values': ['christian'] + } + ] + assert flag.build(0)['targets'] == expected_targets def test_flagbuilder_can_set_numerical_variation_for_user(): - flagBuilder = _FlagBuilder('user-variation-flag') - flagBuilder.variations('a','b','c') - flagBuilder.variation_for_user('christian', 2) + flag = TestData.flag('user-variation-flag') + flag.variations('a','b','c') + flag.variation_for_user('christian', 2) expected_targets = [ { 'variation': 2, 'values': ['christian'] } ] - assert flagBuilder.build(1)['targets'] == expected_targets + assert flag.build(1)['targets'] == expected_targets def test_flagbuilder_can_build(): - flagBuilder = _FlagBuilder('some-flag') - flagRuleBuilder = _FlagRuleBuilder(flagBuilder) - flagRuleBuilder.and_match('country', 'fr').then_return(True) + flag = TestData.flag('some-flag') + flag.if_match('country', 'fr').then_return(True) expected_result = { 'fallthrough_variation': 0, 'key': 'some-flag', @@ -114,4 +120,4 @@ def test_flagbuilder_can_build(): 'version': 1, } - assert flagBuilder.build(1) == expected_result + assert flag.build(1) == expected_result From bd87aceff4e1d8c3c122817381e69997f6d16428 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Tue, 30 Nov 2021 19:33:05 +0000 Subject: [PATCH 225/356] started data source implementation --- .../test_data/test_data_source.py | 33 +++++++++++++- testing/test_test_data_source.py | 43 ++++++++++++++----- 2 files changed, 64 insertions(+), 12 deletions(-) diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py index f49a3fbd..964c8064 100644 --- a/ldclient/impl/integrations/test_data/test_data_source.py +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -1,4 +1,5 @@ import copy +from ldclient.versioned_data_kind import FEATURES, SEGMENTS TRUE_VARIATION_INDEX = 0 FALSE_VARIATION_INDEX = 1 @@ -10,10 +11,40 @@ def variation_for_boolean(variation): return FALSE_VARIATION_INDEX class TestData(): + def __init__(self): + self._current_flags = {} - def flag(key): + def __call__(self, config, store, ready): + return _TestDataSource() + + @staticmethod + def data_source(): + return TestData() + + def flag(self, key): return _FlagBuilder(key) + def make_init_data(self): + return { FEATURES: self._current_flags } + + +class _TestDataSource(): + + def __init__(self): + pass + + def start(self): + pass + + def stop(self): + pass + + def initialized(self): + return True + + + + class _FlagBuilder(): def __init__(self, key): self._key = key diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index 8c29e11f..b2da47cc 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -1,4 +1,5 @@ import pytest +import warnings from ldclient.client import LDClient from ldclient.config import Config @@ -9,6 +10,9 @@ from ldclient.impl.integrations.test_data.test_data_source import TestData +# Filter warning arising from Pytest treating classes starting +# with the word 'Test' as part of the test suite +warnings.filterwarnings("ignore", message="cannot collect test class 'TestData'") def setup_function(): print("Setup") @@ -17,7 +21,8 @@ def teardown_function(): print("Teardown") def test_makes_flag(): - flag = TestData.flag('test-flag') + td = TestData.data_source() + flag = td.flag(key='test-flag') assert flag is not None builtFlag = flag.build(0) @@ -25,26 +30,36 @@ def test_makes_flag(): assert builtFlag['on'] is True assert builtFlag['variations'] == [] +def test_initializes_flag_with_client(): + td = TestData.data_source() + client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True)) + + client.close() + def test_flagbuilder_can_turn_flag_off(): - flag = TestData.flag('test-flag') + td = TestData.data_source() + flag = td.flag('test-flag') flag.on(False) assert flag.build(0)['on'] is False def test_flagbuilder_can_set_fallthrough_variation(): - flag = TestData.flag('test-flag') + td = TestData.data_source() + flag = td.flag('test-flag') flag.fallthrough_variation(2) assert flag.build(0)['fallthrough_variation'] == 2 def test_flagbuilder_can_set_off_variation(): - flag = TestData.flag('test-flag') + td = TestData.data_source() + flag = td.flag('test-flag') flag.off_variation(2) assert flag.build(0)['off_variation'] == 2 def test_flagbuilder_can_make_boolean_flag(): - flag = TestData.flag('boolean-flag').boolean_flag() + td = TestData.data_source() + flag = td.flag('boolean-flag').boolean_flag() assert flag.is_boolean_flag() == True @@ -53,17 +68,20 @@ def test_flagbuilder_can_make_boolean_flag(): assert builtFlag['off_variation'] == 1 def test_flagbuilder_can_set_variation_for_all_users(): - flag = TestData.flag('test-flag') + td = TestData.data_source() + flag = td.flag('test-flag') flag.variation_for_all_users(True) assert flag.build(0)['fallthrough_variation'] == 0 def test_flagbuilder_can_set_variations(): - flag = TestData.flag('test-flag') + td = TestData.data_source() + flag = td.flag('test-flag') flag.variations(2,3,4,5) assert flag.build(0)['variations'] == [2,3,4,5] def test_flagbuilder_can_safely_copy(): - flag = TestData.flag('test-flag') + td = TestData.data_source() + flag = td.flag('test-flag') flag.variations(1,2) copy_of_flag = flag.copy() flag.variations(3,4) @@ -73,7 +91,8 @@ def test_flagbuilder_can_safely_copy(): assert flag.build(0)['variations'] == [3,4] def test_flagbuilder_can_set_boolean_variation_for_user(): - flag = TestData.flag('user-variation-flag') + td = TestData.data_source() + flag = td.flag('user-variation-flag') flag.variation_for_user('christian', False) expected_targets = [ { @@ -84,7 +103,8 @@ def test_flagbuilder_can_set_boolean_variation_for_user(): assert flag.build(0)['targets'] == expected_targets def test_flagbuilder_can_set_numerical_variation_for_user(): - flag = TestData.flag('user-variation-flag') + td = TestData.data_source() + flag = td.flag('user-variation-flag') flag.variations('a','b','c') flag.variation_for_user('christian', 2) expected_targets = [ @@ -96,7 +116,8 @@ def test_flagbuilder_can_set_numerical_variation_for_user(): assert flag.build(1)['targets'] == expected_targets def test_flagbuilder_can_build(): - flag = TestData.flag('some-flag') + td = TestData.data_source() + flag = td.flag('some-flag') flag.if_match('country', 'fr').then_return(True) expected_result = { 'fallthrough_variation': 0, From fb4aeaff07c6ba55f767b6a3452cca03d637819b Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Tue, 30 Nov 2021 19:47:02 +0000 Subject: [PATCH 226/356] changed FlagBuilder to public class; changed FlagBuilder attributes to be initialized in __init__ and eliminated use of try ... except: pass for handling empty attributes --- .../test_data/test_data_source.py | 88 ++++++------------- testing/test_test_data_source.py | 1 + 2 files changed, 29 insertions(+), 60 deletions(-) diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py index f49a3fbd..d320af96 100644 --- a/ldclient/impl/integrations/test_data/test_data_source.py +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -12,39 +12,28 @@ def variation_for_boolean(variation): class TestData(): def flag(key): - return _FlagBuilder(key) + return FlagBuilder(key) -class _FlagBuilder(): +class FlagBuilder(): def __init__(self, key): self._key = key self._on = True self._variations = [] + self._off_variation = None + self._fallthrough_variation = None + self._targets = {} + self._rules = [] + def copy(self): - to = _FlagBuilder(self._key) + to = FlagBuilder(self._key) to._on = self._on to._variations = copy.copy(self._variations) - - try: - to._off_variation = self._off_variation - except: - pass - - try: - to._fallthrough_variation = self._fallthrough_variation - except: - pass - - try: - to._targets = copy.copy(self._targets) - except: - pass - - try: - to._rules = copy.copy(self._rules) - except: - pass + to._off_variation = self._off_variation + to._fallthrough_variation = self._fallthrough_variation + to._targets = copy.copy(self._targets) + to._rules = copy.copy(self._rules) return to @@ -96,14 +85,11 @@ def variation_for_all_users(self, variation): def variation_for_user(self, user_key, variation): if isinstance(variation, bool): + # `variation` is True/False value return self.boolean_flag().variation_for_user(user_key, variation_for_boolean(variation)) else: # `variation` specifies the index of the variation to set - targets = {} - try: - targets = self._targets - except: - self._targets = {} + targets = self._targets for idx, var in enumerate(self._variations): if (idx == variation): @@ -131,11 +117,6 @@ def variation_for_user(self, user_key, variation): return self def add_rule(self, flag_rule_builder): - try: - len(self._rules) >= 0 - except: - self._rules = [] - self._rules.append(flag_rule_builder) def if_match(self, attribute, *values): @@ -159,33 +140,20 @@ def build(self, version): 'variations': self._variations } - try: - base_flag_object['off_variation'] = self._off_variation - except: - pass - - try: - base_flag_object['fallthrough_variation'] = self._fallthrough_variation - except: - pass - - try: - targets = [] - for var_index, user_keys in self._targets.items(): - targets.append({ - 'variation': var_index, - 'values': user_keys - }) - base_flag_object['targets'] = targets - except: - pass - - try: - base_flag_object['rules'] = [] - for idx, rule in enumerate(self._rules): - base_flag_object['rules'].append(rule.build(idx)) - except: - pass + base_flag_object['off_variation'] = self._off_variation + base_flag_object['fallthrough_variation'] = self._fallthrough_variation + + targets = [] + for var_index, user_keys in self._targets.items(): + targets.append({ + 'variation': var_index, + 'values': user_keys + }) + base_flag_object['targets'] = targets + + base_flag_object['rules'] = [] + for idx, rule in enumerate(self._rules): + base_flag_object['rules'].append(rule.build(idx)) return base_flag_object diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index 8c29e11f..45cf98e8 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -103,6 +103,7 @@ def test_flagbuilder_can_build(): 'key': 'some-flag', 'off_variation': 1, 'on': True, + 'targets': [], 'variations': [True, False], 'rules': [ { From 633669a2529e786676930e10e35fe0504b6e0150 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 30 Nov 2021 13:41:34 -0800 Subject: [PATCH 227/356] (big segments 1) add public config/interface types --- .gitignore | 1 + ldclient/client.py | 6 +- ldclient/config.py | 77 +++++++++++++++++++- ldclient/flag.py | 44 ++++++++++-- ldclient/interfaces.py | 160 ++++++++++++++++++++++++++++++++++++++++- 5 files changed, 276 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index f0def2a6..291d3e29 100644 --- a/.gitignore +++ b/.gitignore @@ -69,3 +69,4 @@ p2venv test-packaging-venv .vscode/ +.python-version diff --git a/ldclient/client.py b/ldclient/client.py index 330e0f29..19178f2e 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -19,7 +19,7 @@ from ldclient.flags_state import FeatureFlagsState from ldclient.impl.event_factory import _EventFactory from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor -from ldclient.interfaces import FeatureStore +from ldclient.interfaces import BigSegmentStoreStatusProvider, FeatureStore from ldclient.polling import PollingUpdateProcessor from ldclient.streaming import StreamingUpdateProcessor from ldclient.util import check_uwsgi, log @@ -409,5 +409,9 @@ def secure_mode_hash(self, user: dict) -> str: return "" return hmac.new(self._config.sdk_key.encode(), key.encode(), hashlib.sha256).hexdigest() + @property + def big_segment_store_status_provider(self) -> BigSegmentStoreStatusProvider: + return None + __all__ = ['LDClient', 'Config'] diff --git a/ldclient/config.py b/ldclient/config.py index cccb7f27..038a7348 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -8,12 +8,76 @@ from ldclient.feature_store import InMemoryFeatureStore from ldclient.util import log -from ldclient.interfaces import EventProcessor, FeatureStore, UpdateProcessor, FeatureRequester +from ldclient.interfaces import BigSegmentStore, EventProcessor, FeatureStore, UpdateProcessor, FeatureRequester GET_LATEST_FEATURES_PATH = '/sdk/latest-flags' STREAM_FLAGS_PATH = '/flags' +class BigSegmentsConfig: + """Configuration options related to Big Segments. + + Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + documentation: https://docs.launchdarkly.com/home/users/big-segments + + If your application uses Big Segments, you will need to create a `BigSegmentsConfig` that at a + minimum specifies what database integration to use, and then pass the `BigSegmentsConfig` + object as the `big_segments` parameter when creating a :class:`Config`. + + This example shows Big Segments being configured to use Redis: + :: + + from ldclient.config import Config, BigSegmentsConfig + from ldclient.integrations import Redis + store = Redis.new_big_segment_store("my-table-name") + config = Config(big_segments=BigSegmentsConfig(store = store)) + """ + def __init__(self, + store: Optional[BigSegmentStore] = None, + user_cache_size: int=1000, + user_cache_time: float=5, + status_poll_interval: float=5, + stale_after: float=120): + """ + + :param store: the implementation of :class:`ldclient.interfaces.BigSegmentStore` that will + be used to query the Big Segments database + :param user_cache_size: the maximum number of users whose Big Segment state will be cached + by the SDK at any given time + :param user_cache_time: the maximum length of time (in seconds) that the Big Segment state + for a user will be cached by the SDK + :param status_poll_interval: the interval (in seconds) at which the SDK will poll the Big + Segment store to make sure it is available and to determine how long ago it was updated + :param stale_after: the maximum length of time between updates of the Big Segments data + before the data is considered out of date + """ + self.__store = store + self.__user_cache_size = user_cache_size + self.__user_cache_time = user_cache_time + self.__status_poll_interval = status_poll_interval + self.__stale_after = stale_after + pass + + @property + def store(self) -> Optional[BigSegmentStore]: + return self.__store + + @property + def user_cache_size(self) -> int: + return self.__user_cache_size + + @property + def user_cache_time(self) -> float: + return self.__user_cache_time + + @property + def status_poll_interval(self) -> float: + return self.__status_poll_interval + + @property + def stale_after(self) -> float: + return self.__stale_after + class HTTPConfig: """Advanced HTTP configuration options for the SDK client. @@ -109,7 +173,8 @@ def __init__(self, diagnostic_recording_interval: int=900, wrapper_name: Optional[str]=None, wrapper_version: Optional[str]=None, - http: HTTPConfig=HTTPConfig()): + http: HTTPConfig=HTTPConfig(), + big_segments: Optional[BigSegmentsConfig]=None): """ :param sdk_key: The SDK key for your LaunchDarkly account. This is always required. :param base_uri: The base URL for the LaunchDarkly server. Most users should use the default @@ -204,6 +269,7 @@ def __init__(self, self.__wrapper_name = wrapper_name self.__wrapper_version = wrapper_version self.__http = http + self.__big_segments = BigSegmentsConfig() if not big_segments else big_segments def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': """Returns a new ``Config`` instance that is the same as this one, except for having a different SDK key. @@ -236,7 +302,8 @@ def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': diagnostic_recording_interval=self.__diagnostic_recording_interval, wrapper_name=self.__wrapper_name, wrapper_version=self.__wrapper_version, - http=self.__http) + http=self.__http, + big_segments=self.__big_segments) # for internal use only - probably should be part of the client logic def get_default(self, key, default): @@ -366,6 +433,10 @@ def wrapper_version(self) -> Optional[str]: def http(self) -> HTTPConfig: return self.__http + @property + def big_segments(self) -> BigSegmentsConfig: + return self.__big_segments + def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key == '': log.warning("Missing or blank sdk_key.") diff --git a/ldclient/flag.py b/ldclient/flag.py index ed2583ce..629f831b 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -60,12 +60,12 @@ def reason(self) -> dict: * ``kind``: The general category of reason, as follows: * ``"OFF"``: the flag was off - * ``"FALLTHROUGH"`` -- the flag was on but the user did not match any targets or rules - * ``"TARGET_MATCH"`` -- the user was specifically targeted for this flag - * ``"RULE_MATCH"`` -- the user matched one of the flag's rules - * ``"PREREQUISITE_FAILED"`` -- the flag was considered off because it had at least one + * ``"FALLTHROUGH"``: the flag was on but the user did not match any targets or rules + * ``"TARGET_MATCH"``: the user was specifically targeted for this flag + * ``"RULE_MATCH"``: the user matched one of the flag's rules + * ``"PREREQUISITE_FAILED"``: the flag was considered off because it had at least one prerequisite flag that did not return the desired variation - * ``"ERROR"`` - the flag could not be evaluated due to an unexpected error. + * ``"ERROR"``: the flag could not be evaluated due to an unexpected error. * ``ruleIndex``, ``ruleId``: The positional index and unique identifier of the matched rule, if the kind was ``RULE_MATCH`` @@ -75,16 +75,20 @@ def reason(self) -> dict: * ``errorKind``: further describes the nature of the error if the kind was ``ERROR``, e.g. ``"FLAG_NOT_FOUND"`` + + * ``bigSegmentsStatus``: describes the validity of Big Segment information, if and only if + the flag evaluation required querying at least one Big Segment; otherwise it returns None. + Allowable values are defined in `BigSegmentStatus`. For more information, read the + LaunchDarkly documentation: https://docs.launchdarkly.com/home/users/big-segments """ return self.__reason def is_default_value(self) -> bool: - """Returns True if the flag evaluated to the default value rather than one of its variations. """ return self.__variation_index is None - + def __eq__(self, other) -> bool: return self.value == other.value and self.variation_index == other.variation_index and self.reason == other.reason @@ -98,6 +102,32 @@ def __repr__(self) -> str: return self.__str__() +class BigSegmentStatus: + """ + Indicates that the Big Segment query involved in the flag evaluation was successful, and + the segment state is considered up to date. + """ + HEALTHY = "HEALTHY" + + """ + Indicates that the Big Segment query involved in the flag evaluation was successful, but + segment state may not be up to date. + """ + STALE = "STALE" + + """ + Indicates that Big Segments could not be queried for the flag evaluation because the SDK + configuration did not include a Big Segment store. + """ + NOT_CONFIGURED = "NOT_CONFIGURED" + + """ + Indicates that the Big Segment query involved in the flag evaluation failed, for + instance due to a database error. + """ + STORE_ERROR = "STORE_ERROR" + + EvalResult = namedtuple('EvalResult', ['detail', 'events']) diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 08919ed2..a4e960e7 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -6,7 +6,7 @@ from abc import ABCMeta, abstractmethod, abstractproperty from .versioned_data_kind import VersionedDataKind -from typing import Mapping, Callable, Any +from typing import Any, Callable, Mapping, Optional class FeatureStore: """ @@ -250,3 +250,161 @@ def describe_configuration(self, config) -> str: :return: a string describing the type of the component, or None """ pass + + +class BigSegmentStoreMetadata: + """ + Values returned by :func:`BigSegmentStore.get_metadata()`. + """ + def __init__(self, last_up_to_date: Optional[int]): + self.__last_up_to_date = last_up_to_date + pass + + @property + def last_up_to_date(self) -> Optional[int]: + """ + The Unix epoch millisecond timestamp of the last update to the `BigSegmentStore`. It is + None if the store has never been updated. + """ + return self.__last_up_to_date + + +class BigSegmentStore: + """ + Interface for a read-only data store that allows querying of user membership in Big Segments. + + Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + documentation: https://docs.launchdarkly.com/home/users/big-segments + """ + + @abstractmethod + def get_metadata(self) -> BigSegmentStoreMetadata: + """ + Returns information about the overall state of the store. This method will be called only + when the SDK needs the latest state, so it should not be cached. + + :return: the store metadata + """ + pass + + @abstractmethod + def get_membership(self, user_hash: str) -> dict: + """ + Queries the store for a snapshot of the current segment state for a specific user. + + The user_hash is a base64-encoded string produced by hashing the user key as defined by + the Big Segments specification; the store implementation does not need to know the details + of how this is done, because it deals only with already-hashed keys, but the string can be + assumed to only contain characters that are valid in base64. + + The return value should be either a `dict`, or nil if the user is not referenced in any big + segments. Each key in the dictionary is a "segment reference", which is how segments are + identified in Big Segment data. This string is not identical to the segment key-- the SDK + will add other information. The store implementation should not be concerned with the + format of the string. Each value in the dictionary is True if the user is explicitly included + in the segment, False if the user is explicitly excluded from the segment-- and is not also + explicitly included (that is, if both an include and an exclude existed in the data, the + include would take precedence). If the user's status in a particular segment is undefined, + there should be no key or value for that segment. + + This dictionary may be cached by the SDK, so it should not be modified after it is created. + It is a snapshot of the segment membership state at one point in time. + + :param user_hash: the hashed user key + :return: True/False values for Big Segments that reference this user + """ + pass + + +class BigSegmentStoreStatus: + """ + Information about the state of a Big Segment store, provided by :class:`BigSegmentStoreStatusProvider`. + + Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + documentation: https://docs.launchdarkly.com/home/users/big-segments + """ + def __init__(self, available: bool, stale: bool): + self.__available = available + self.__stale = stale + + @property + def available(self) -> bool: + """ + True if the Big Segment store is able to respond to queries, so that the SDK can evaluate + whether a user is in a segment or not. + + If this property is False, the store is not able to make queries (for instance, it may not have + a valid database connection). In this case, the SDK will treat any reference to a Big Segment + as if no users are included in that segment. Also, the :func:`ldclient.flag.EvaluationDetail.reason` + associated with with any flag evaluation that references a Big Segment when the store is not + available will have a `bigSegmentsStatus` of `"STORE_ERROR"`. + """ + return self.__available + + @property + def stale(self) -> bool: + """ + True if the Big Segment store is available, but has not been updated within the amount of time + specified by {BigSegmentsConfig#stale_after}. + + This may indicate that the LaunchDarkly Relay Proxy, which populates the store, has stopped + running or has become unable to receive fresh data from LaunchDarkly. Any feature flag + evaluations that reference a Big Segment will be using the last known data, which may be out + of date. Also, the :func:`ldclient.flag.EvaluationDetail.reason` associated with those evaluations + will have a `bigSegmentsStatus` of `"STALE"`. + """ + return self.__stale + + +class BigSegmentStoreStatusProvider: + """ + An interface for querying the status of a Big Segment store. + + The Big Segment store is the component that receives information about Big Segments, normally + from a database populated by the LaunchDarkly Relay Proxy. Big Segments are a specific type + of user segments. For more information, read the LaunchDarkly documentation: + https://docs.launchdarkly.com/home/users/big-segments + + An implementation of this abstract class is returned by :func:`ldclient.client.LDClient.big_segment_store_status_provider`. + Application code never needs to implement this interface. + + There are two ways to interact with the status. One is to simply get the current status; if its + `available` property is true, then the SDK is able to evaluate user membership in Big Segments, + and the `stale`` property indicates whether the data might be out of date. + + The other way is to subscribe to status change notifications. Applications may wish to know if + there is an outage in the Big Segment store, or if it has become stale (the Relay Proxy has + stopped updating it with new data), since then flag evaluations that reference a Big Segment + might return incorrect values. Use `add_listener` to register a callback for notifications. + """ + + @abstractproperty + def status(self) -> Optional[BigSegmentStoreStatus]: + """ + Gets the current status of the store, if known. + + :return: The status, or None if the SDK has not yet queried the Big Segment store status + """ + pass + + @abstractmethod + def add_listener(self, listener: Callable[[BigSegmentStoreStatus], None]) -> None: + """ + Subscribes for notifications of status changes. + + The listener is a function or method that will be called with a single parameter: the + new `BigSegmentStoreStatus`. + + :param listener: the listener to add + """ + pass + + @abstractmethod + def remove_listener(self, listener: Callable[[BigSegmentStoreStatus], None]) -> None: + """ + Unsubscribes from notifications of status changes. + + :param listener: a listener that was previously added with `add_listener`; if it was not, + this method does nothing + """ + pass From 3f624ba3bd8d53aaa198c936b98eed2aec955cf9 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Tue, 30 Nov 2021 21:44:32 +0000 Subject: [PATCH 228/356] added implementation of test data source --- .../test_data/test_data_source.py | 70 ++++++++++++++++--- testing/test_test_data_source.py | 37 +++++++++- 2 files changed, 97 insertions(+), 10 deletions(-) diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py index 69c94d9c..8b865e4f 100644 --- a/ldclient/impl/integrations/test_data/test_data_source.py +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -1,5 +1,6 @@ import copy -from ldclient.versioned_data_kind import FEATURES, SEGMENTS +from ldclient.versioned_data_kind import FEATURES +from ldclient.rwlock import ReadWriteLock TRUE_VARIATION_INDEX = 0 FALSE_VARIATION_INDEX = 1 @@ -12,36 +13,89 @@ def variation_for_boolean(variation): class TestData(): def __init__(self): + self._flag_builders = {} self._current_flags = {} + self._lock = ReadWriteLock() + self._instances = [] def __call__(self, config, store, ready): - return _TestDataSource() + data_source = _TestDataSource(store, self) + try: + self._lock.lock() + self._instances.append(data_source) + finally: + self._lock.unlock() + + return data_source @staticmethod def data_source(): return TestData() def flag(self, key): - return FlagBuilder(key) + try: + self._lock.rlock() + if key in self._flag_builders and self._flag_builders[key]: + return self._flag_builders[key].copy() + else: + return FlagBuilder(key).boolean_flag() + finally: + self._lock.runlock() + + def update(self, flag_builder): + try: + self._lock.lock() + + old_version = 0 + if flag_builder._key in self._current_flags: + old_flag = self._current_flags[flag_builder._key] + if old_flag: + old_version = old_flag.version + + new_flag = flag_builder.build(old_version + 1) + + self._current_flags[flag_builder._key] = new_flag + self._flag_builders[flag_builder._key] = flag_builder.copy() + finally: + self._lock.unlock() + + for instance in self._instances: + instance.upsert(new_flag) + + return self + def make_init_data(self): - return { FEATURES: self._current_flags } + return { FEATURES: copy.copy(self._current_flags) } + + def closed_instance(self, instance): + try: + self._lock.lock() + self._instances.remove(instance) + finally: + self._lock.unlock() + class _TestDataSource(): - def __init__(self): - pass + def __init__(self, feature_store, test_data): + self._feature_store = feature_store + self._test_data = test_data def start(self): - pass + self._feature_store.init(self._test_data.make_init_data()) def stop(self): - pass + self._test_data.closed_instance(self) def initialized(self): return True + def upsert(self, new_flag): + self._feature_store.upsert(FEATURES, new_flag) + + class FlagBuilder(): def __init__(self, key): diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index aa5e5a7c..26ae1981 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -28,11 +28,44 @@ def test_makes_flag(): builtFlag = flag.build(0) assert builtFlag['key'] is 'test-flag' assert builtFlag['on'] is True - assert builtFlag['variations'] == [] + assert builtFlag['variations'] == [True, False] + def test_initializes_flag_with_client(): td = TestData.data_source() - client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True)) + td.update(td.flag('some-flag')) + + store = InMemoryFeatureStore() + + client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store)) + + assert store.get(FEATURES, 'some-flag') == td.flag('some-flag').build(1) + + client.close() + +def test_update_after_close(): + td = TestData.data_source() + + store = InMemoryFeatureStore() + + client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store)) + + client.close() + + td.update(td.flag('some-flag')) + + assert store.get(FEATURES, 'some-flag') == None + +def test_update_after_client_initialization(): + td = TestData.data_source() + + store = InMemoryFeatureStore() + + client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store)) + + td.update(td.flag('some-flag')) + + assert store.get(FEATURES, 'some-flag') == td.flag('some-flag').build(1) client.close() From 4ee49e073146c5b94c105f90e693049d9b4fb634 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 30 Nov 2021 13:45:43 -0800 Subject: [PATCH 229/356] docstring --- ldclient/client.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ldclient/client.py b/ldclient/client.py index 19178f2e..f9d17fa0 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -411,6 +411,13 @@ def secure_mode_hash(self, user: dict) -> str: @property def big_segment_store_status_provider(self) -> BigSegmentStoreStatusProvider: + """ + Returns an interface for tracking the status of a Big Segment store. + + The :class:`ldclient.interfaces.BigSegmentStoreStatusProvider` has methods for checking + whether the Big Segment store is (as far as the SDK knows) currently operational and + tracking changes in this status. + """ return None From 06a62d3802cb697a09c61c3a5edca1c53bdb9094 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 30 Nov 2021 13:46:27 -0800 Subject: [PATCH 230/356] formatting --- ldclient/config.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ldclient/config.py b/ldclient/config.py index 038a7348..8d5e8118 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -39,7 +39,6 @@ def __init__(self, status_poll_interval: float=5, stale_after: float=120): """ - :param store: the implementation of :class:`ldclient.interfaces.BigSegmentStore` that will be used to query the Big Segments database :param user_cache_size: the maximum number of users whose Big Segment state will be cached From 8eb95e189e70e1fb02b6930fc8f520a071753304 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 30 Nov 2021 13:52:15 -0800 Subject: [PATCH 231/356] ensure property doesn't return None --- ldclient/client.py | 3 ++- ldclient/impl/big_segments.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 ldclient/impl/big_segments.py diff --git a/ldclient/client.py b/ldclient/client.py index f9d17fa0..1cb20af4 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -17,6 +17,7 @@ from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.flag import EvaluationDetail, evaluate, error_reason from ldclient.flags_state import FeatureFlagsState +from ldclient.impl.big_segments import NullBigSegmentStoreStatusProvider from ldclient.impl.event_factory import _EventFactory from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor from ldclient.interfaces import BigSegmentStoreStatusProvider, FeatureStore @@ -418,7 +419,7 @@ def big_segment_store_status_provider(self) -> BigSegmentStoreStatusProvider: whether the Big Segment store is (as far as the SDK knows) currently operational and tracking changes in this status. """ - return None + return NullBigSegmentStoreStatusProvider() __all__ = ['LDClient', 'Config'] diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py new file mode 100644 index 00000000..df57b9e4 --- /dev/null +++ b/ldclient/impl/big_segments.py @@ -0,0 +1,12 @@ +from ldclient.interfaces import BigSegmentStoreStatus, BigSegmentStoreStatusProvider +from typing import Callable, Optional + +class NullBigSegmentStoreStatusProvider(BigSegmentStoreStatusProvider): + def status(self) -> Optional[BigSegmentStoreStatus]: + return None + + def add_listener(self, listener: Callable[[BigSegmentStoreStatus], None]) -> None: + pass + + def remove_listener(self, listener: Callable[[BigSegmentStoreStatus], None]) -> None: + pass From be01431eb39645496e955dca9c70c4973e28d469 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 1 Dec 2021 10:53:58 -0800 Subject: [PATCH 232/356] (big segments 2) implement evaluation, refactor eval logic & modules --- docs/api-deprecated.rst | 12 + docs/api-main.rst | 13 +- docs/index.rst | 1 + ldclient/client.py | 17 +- ldclient/evaluation.py | 196 ++++++++++ ldclient/flag.py | 369 +----------------- ldclient/flags_state.py | 103 +---- ldclient/impl/evaluator.py | 309 +++++++++++++++ testing/impl/__init__.py | 0 testing/impl/evaluator_util.py | 99 +++++ .../{test_flag.py => impl/test_evaluator.py} | 119 +++--- testing/impl/test_evaluator_big_segment.py | 77 ++++ .../test_evaluator_segment.py} | 9 +- testing/test_event_factory.py | 2 +- 14 files changed, 791 insertions(+), 535 deletions(-) create mode 100644 docs/api-deprecated.rst create mode 100644 ldclient/evaluation.py create mode 100644 ldclient/impl/evaluator.py create mode 100644 testing/impl/__init__.py create mode 100644 testing/impl/evaluator_util.py rename testing/{test_flag.py => impl/test_evaluator.py} (79%) create mode 100644 testing/impl/test_evaluator_big_segment.py rename testing/{test_segment.py => impl/test_evaluator_segment.py} (93%) diff --git a/docs/api-deprecated.rst b/docs/api-deprecated.rst new file mode 100644 index 00000000..4b24254b --- /dev/null +++ b/docs/api-deprecated.rst @@ -0,0 +1,12 @@ +Deprecated modules +=============================== + +ldclient.flag module +-------------------- + +This module is deprecated. For the :class:`~ldclient.evaluation.EvaluationDetail` type, please use :mod:`ldclient.evaluation`. + +ldclient.flags_state module +--------------------------- + +This module is deprecated. For the :class:`~ldclient.evaluation.FeatureFlagsState` type, please use :mod:`ldclient.evaluation`. diff --git a/docs/api-main.rst b/docs/api-main.rst index 003737f8..15ddca1b 100644 --- a/docs/api-main.rst +++ b/docs/api-main.rst @@ -19,15 +19,8 @@ ldclient.config module .. automodule:: ldclient.config :members: -ldclient.flag module --------------------- +ldclient.evaluation module +-------------------------- -.. automodule:: ldclient.flag - :members: EvaluationDetail - -ldclient.flags_state module ---------------------------- - -.. automodule:: ldclient.flags_state +.. automodule:: ldclient.evaluation :members: - :exclude-members: __init__, add_flag diff --git a/docs/index.rst b/docs/index.rst index 12e66506..92c01ed2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -19,3 +19,4 @@ For more information, see LaunchDarkly's `Quickstart FeatureFlagsState: if client_only and not flag.get('clientSide', False): continue try: - detail = evaluate(flag, user, self._store, self._event_factory_default).detail + detail = self._evaluator.evaluate(flag, user, self._event_factory_default).detail state.add_flag(flag, detail.value, detail.variation_index, detail.reason if with_reasons else None, details_only_if_tracked) except Exception as e: diff --git a/ldclient/evaluation.py b/ldclient/evaluation.py new file mode 100644 index 00000000..77e409fa --- /dev/null +++ b/ldclient/evaluation.py @@ -0,0 +1,196 @@ +import json +import time +from typing import Any, Dict, Optional + +class EvaluationDetail: + """ + The return type of :func:`ldclient.client.LDClient.variation_detail()`, combining the result of a + flag evaluation with information about how it was calculated. + """ + def __init__(self, value: object, variation_index: Optional[int], reason: dict): + """Constructs an instance. + """ + self.__value = value + self.__variation_index = variation_index + self.__reason = reason + + @property + def value(self) -> object: + """The result of the flag evaluation. This will be either one of the flag's + variations or the default value that was passed to the + :func:`ldclient.client.LDClient.variation_detail()` method. + """ + return self.__value + + @property + def variation_index(self) -> Optional[int]: + """The index of the returned value within the flag's list of variations, e.g. + 0 for the first variation -- or None if the default value was returned. + """ + return self.__variation_index + + @property + def reason(self) -> dict: + """A dictionary describing the main factor that influenced the flag evaluation value. + It contains the following properties: + + * ``kind``: The general category of reason, as follows: + + * ``"OFF"``: the flag was off + * ``"FALLTHROUGH"``: the flag was on but the user did not match any targets or rules + * ``"TARGET_MATCH"``: the user was specifically targeted for this flag + * ``"RULE_MATCH"``: the user matched one of the flag's rules + * ``"PREREQUISITE_FAILED"``: the flag was considered off because it had at least one + prerequisite flag that did not return the desired variation + * ``"ERROR"``: the flag could not be evaluated due to an unexpected error. + + * ``ruleIndex``, ``ruleId``: The positional index and unique identifier of the matched + rule, if the kind was ``RULE_MATCH`` + + * ``prerequisiteKey``: The flag key of the prerequisite that failed, if the kind was + ``PREREQUISITE_FAILED`` + + * ``errorKind``: further describes the nature of the error if the kind was ``ERROR``, + e.g. ``"FLAG_NOT_FOUND"`` + + * ``bigSegmentsStatus``: describes the validity of Big Segment information, if and only if + the flag evaluation required querying at least one Big Segment; otherwise it returns None. + Allowable values are defined in :class:`BigSegmentsStatus`. For more information, read the + LaunchDarkly documentation: https://docs.launchdarkly.com/home/users/big-segments + """ + return self.__reason + + def is_default_value(self) -> bool: + """Returns True if the flag evaluated to the default value rather than one of its + variations. + """ + return self.__variation_index is None + + def __eq__(self, other) -> bool: + return self.value == other.value and self.variation_index == other.variation_index and self.reason == other.reason + + def __ne__(self, other) -> bool: + return not self.__eq__(other) + + def __str__(self) -> str: + return "(value=%s, variation_index=%s, reason=%s)" % (self.value, self.variation_index, self.reason) + + def __repr__(self) -> str: + return self.__str__() + + +class BigSegmentsStatus: + """ + Indicates that the Big Segment query involved in the flag evaluation was successful, and + the segment state is considered up to date. + """ + HEALTHY = "HEALTHY" + + """ + Indicates that the Big Segment query involved in the flag evaluation was successful, but + segment state may not be up to date. + """ + STALE = "STALE" + + """ + Indicates that Big Segments could not be queried for the flag evaluation because the SDK + configuration did not include a Big Segment store. + """ + NOT_CONFIGURED = "NOT_CONFIGURED" + + """ + Indicates that the Big Segment query involved in the flag evaluation failed, for + instance due to a database error. + """ + STORE_ERROR = "STORE_ERROR" + + +class FeatureFlagsState: + """ + A snapshot of the state of all feature flags with regard to a specific user, generated by + calling the :func:`ldclient.client.LDClient.all_flags_state()` method. Serializing this + object to JSON, using the :func:`to_json_dict` method or ``jsonpickle``, will produce the + appropriate data structure for bootstrapping the LaunchDarkly JavaScript client. See the + JavaScript SDK Reference Guide on `Bootstrapping `_. + """ + def __init__(self, valid: bool): + self.__flag_values = {} # type: Dict[str, Any] + self.__flag_metadata = {} # type: Dict[str, Any] + self.__valid = valid + + # Used internally to build the state map + def add_flag(self, flag, value, variation, reason, details_only_if_tracked): + key = flag['key'] + self.__flag_values[key] = value + meta = {} + with_details = (not details_only_if_tracked) or flag.get('trackEvents') + if not with_details: + if flag.get('debugEventsUntilDate'): + now = int(time.time() * 1000) + with_details = (flag.get('debugEventsUntilDate') > now) + if with_details: + meta['version'] = flag.get('version') + if reason is not None: + meta['reason'] = reason + if variation is not None: + meta['variation'] = variation + if flag.get('trackEvents'): + meta['trackEvents'] = True + if flag.get('debugEventsUntilDate') is not None: + meta['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') + self.__flag_metadata[key] = meta + + @property + def valid(self) -> bool: + """True if this object contains a valid snapshot of feature flag state, or False if the + state could not be computed (for instance, because the client was offline or there was no user). + """ + return self.__valid + + + def get_flag_value(self, key: str) -> object: + """Returns the value of an individual feature flag at the time the state was recorded. + + :param key: the feature flag key + :return: the flag's value; None if the flag returned the default value, or if there was no such flag + """ + return self.__flag_values.get(key) + + def get_flag_reason(self, key: str) -> Optional[dict]: + """Returns the evaluation reason for an individual feature flag at the time the state was recorded. + + :param key: the feature flag key + :return: a dictionary describing the reason; None if reasons were not recorded, or if there was no + such flag + """ + meta = self.__flag_metadata.get(key) + return None if meta is None else meta.get('reason') + + def to_values_map(self) -> dict: + """Returns a dictionary of flag keys to flag values. If the flag would have evaluated to the + default value, its value will be None. + + Do not use this method if you are passing data to the front end to "bootstrap" the JavaScript client. + Instead, use :func:`to_json_dict()`. + """ + return self.__flag_values + + def to_json_dict(self) -> dict: + """Returns a dictionary suitable for passing as JSON, in the format used by the LaunchDarkly + JavaScript SDK. Use this method if you are passing data to the front end in order to + "bootstrap" the JavaScript client. + """ + ret = self.__flag_values.copy() + ret['$flagsState'] = self.__flag_metadata + ret['$valid'] = self.__valid + return ret + + def to_json_string(self) -> str: + """Same as to_json_dict, but serializes the JSON structure into a string. + """ + return json.dumps(self.to_json_dict()) + + def __getstate__(self) -> dict: + """Equivalent to to_json_dict() - used if you are serializing the object with jsonpickle. + """ + return self.to_json_dict() diff --git a/ldclient/flag.py b/ldclient/flag.py index 629f831b..1d35e4dd 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -1,354 +1,27 @@ -""" -This submodule contains a helper class for feature flag evaluation, as well as some implementation details. -""" -from collections import namedtuple -import hashlib -import logging - -from typing import Optional, List, Any -import sys - -from ldclient import operators -from ldclient.util import stringify_attrs +# This module exists only for historical reasons. Previously, ldclient.flag contained a +# combination of public API types (EvaluationDetail) and implementation details (the evaluate() +# function, etc.). Our new convention is to keep all such implementation details within +# ldclient.impl and its submodules, to make it clear that applications should never try to +# reference them directly. Since some application code may have done so in the past, and since +# we do not want to move anything in the public API yet, we are retaining this module as a +# deprecated entry point and re-exporting some symbols. +# +# In the future, ldclient.evaluation will be the preferred entry point for the public types and +# ldclient.flag will be removed. + +from ldclient.evaluation import EvaluationDetail +from ldclient.impl.evaluator import Evaluator, EvalResult, error_reason from ldclient.versioned_data_kind import FEATURES, SEGMENTS -__LONG_SCALE__ = float(0xFFFFFFFFFFFFFFF) - -__BUILTINS__ = ["key", "ip", "country", "email", - "firstName", "lastName", "avatar", "name", "anonymous"] - -__USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__ = [ "key", "secondary" ] -# Currently we are not stringifying the rest of the built-in attributes prior to evaluation, only for events. -# This is because it could affect evaluation results for existing users (ch35206). - -log = logging.getLogger(sys.modules[__name__].__name__) - - -class EvaluationDetail: - """ - The return type of :func:`ldclient.client.LDClient.variation_detail()`, combining the result of a - flag evaluation with information about how it was calculated. - """ - def __init__(self, value: object, variation_index: Optional[int], reason: dict): - """Constructs an instance. - """ - self.__value = value - self.__variation_index = variation_index - self.__reason = reason - - @property - def value(self) -> object: - """The result of the flag evaluation. This will be either one of the flag's - variations or the default value that was passed to the - :func:`ldclient.client.LDClient.variation_detail()` method. - """ - return self.__value - - @property - def variation_index(self) -> Optional[int]: - """The index of the returned value within the flag's list of variations, e.g. - 0 for the first variation -- or None if the default value was returned. - """ - return self.__variation_index - - @property - def reason(self) -> dict: - """A dictionary describing the main factor that influenced the flag evaluation value. - It contains the following properties: - - * ``kind``: The general category of reason, as follows: - - * ``"OFF"``: the flag was off - * ``"FALLTHROUGH"``: the flag was on but the user did not match any targets or rules - * ``"TARGET_MATCH"``: the user was specifically targeted for this flag - * ``"RULE_MATCH"``: the user matched one of the flag's rules - * ``"PREREQUISITE_FAILED"``: the flag was considered off because it had at least one - prerequisite flag that did not return the desired variation - * ``"ERROR"``: the flag could not be evaluated due to an unexpected error. - - * ``ruleIndex``, ``ruleId``: The positional index and unique identifier of the matched - rule, if the kind was ``RULE_MATCH`` - - * ``prerequisiteKey``: The flag key of the prerequisite that failed, if the kind was - ``PREREQUISITE_FAILED`` - - * ``errorKind``: further describes the nature of the error if the kind was ``ERROR``, - e.g. ``"FLAG_NOT_FOUND"`` - - * ``bigSegmentsStatus``: describes the validity of Big Segment information, if and only if - the flag evaluation required querying at least one Big Segment; otherwise it returns None. - Allowable values are defined in `BigSegmentStatus`. For more information, read the - LaunchDarkly documentation: https://docs.launchdarkly.com/home/users/big-segments - """ - return self.__reason - - def is_default_value(self) -> bool: - """Returns True if the flag evaluated to the default value rather than one of its - variations. - """ - return self.__variation_index is None - - def __eq__(self, other) -> bool: - return self.value == other.value and self.variation_index == other.variation_index and self.reason == other.reason - - def __ne__(self, other) -> bool: - return not self.__eq__(other) - - def __str__(self) -> str: - return "(value=%s, variation_index=%s, reason=%s)" % (self.value, self.variation_index, self.reason) - - def __repr__(self) -> str: - return self.__str__() - - -class BigSegmentStatus: - """ - Indicates that the Big Segment query involved in the flag evaluation was successful, and - the segment state is considered up to date. - """ - HEALTHY = "HEALTHY" - - """ - Indicates that the Big Segment query involved in the flag evaluation was successful, but - segment state may not be up to date. - """ - STALE = "STALE" - - """ - Indicates that Big Segments could not be queried for the flag evaluation because the SDK - configuration did not include a Big Segment store. - """ - NOT_CONFIGURED = "NOT_CONFIGURED" - - """ - Indicates that the Big Segment query involved in the flag evaluation failed, for - instance due to a database error. - """ - STORE_ERROR = "STORE_ERROR" - - -EvalResult = namedtuple('EvalResult', ['detail', 'events']) - - -def error_reason(error_kind: str) -> dict: - return {'kind': 'ERROR', 'errorKind': error_kind} - - +# Deprecated internal function for evaluating flags. def evaluate(flag, user, store, event_factory) -> EvalResult: - sanitized_user = stringify_attrs(user, __USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__) - prereq_events = [] # type: List[Any] - detail = _evaluate(flag, sanitized_user, store, prereq_events, event_factory) - return EvalResult(detail = detail, events = prereq_events) - -def _evaluate(flag, user, store, prereq_events, event_factory): - if not flag.get('on', False): - return _get_off_value(flag, {'kind': 'OFF'}) - - prereq_failure_reason = _check_prerequisites(flag, user, store, prereq_events, event_factory) - if prereq_failure_reason is not None: - return _get_off_value(flag, prereq_failure_reason) - - # Check to see if any user targets match: - for target in flag.get('targets') or []: - for value in target.get('values') or []: - if value == user['key']: - return _get_variation(flag, target.get('variation'), {'kind': 'TARGET_MATCH'}) - - # Now walk through the rules to see if any match - for index, rule in enumerate(flag.get('rules') or []): - if _rule_matches_user(rule, user, store): - return _get_value_for_variation_or_rollout(flag, rule, user, - {'kind': 'RULE_MATCH', 'ruleIndex': index, 'ruleId': rule.get('id')}) - - # Walk through fallthrough and see if it matches - if flag.get('fallthrough') is not None: - return _get_value_for_variation_or_rollout(flag, flag['fallthrough'], user, {'kind': 'FALLTHROUGH'}) - - -def _check_prerequisites(flag, user, store, events, event_factory): - failed_prereq = None - prereq_res = None - for prereq in flag.get('prerequisites') or []: - prereq_flag = store.get(FEATURES, prereq.get('key'), lambda x: x) - if prereq_flag is None: - log.warning("Missing prereq flag: " + prereq.get('key')) - failed_prereq = prereq - else: - prereq_res = _evaluate(prereq_flag, user, store, events, event_factory) - # Note that if the prerequisite flag is off, we don't consider it a match no matter what its - # off variation was. But we still need to evaluate it in order to generate an event. - if (not prereq_flag.get('on', False)) or prereq_res.variation_index != prereq.get('variation'): - failed_prereq = prereq - event = event_factory.new_eval_event(prereq_flag, user, prereq_res, None, flag) - events.append(event) - if failed_prereq: - return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.get('key')} - return None - - -def _get_variation(flag, variation, reason): - vars = flag.get('variations') or [] - if variation < 0 or variation >= len(vars): - return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) - return EvaluationDetail(vars[variation], variation, reason) - - -def _get_off_value(flag, reason): - off_var = flag.get('offVariation') - if off_var is None: - return EvaluationDetail(None, None, reason) - return _get_variation(flag, off_var, reason) - - -def _get_value_for_variation_or_rollout(flag, vr, user, reason): - index, inExperiment = _variation_index_for_user(flag, vr, user) - if index is None: - return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) - if inExperiment: - reason['inExperiment'] = inExperiment - return _get_variation(flag, index, reason) - - -def _get_user_attribute(user, attr): - if attr == 'secondary': - return None, True - if attr in __BUILTINS__: - return user.get(attr), False - else: # custom attribute - if user.get('custom') is None or user['custom'].get(attr) is None: - return None, True - return user['custom'][attr], False - - -def _variation_index_for_user(feature, rule, user): - if rule.get('variation') is not None: - return (rule['variation'], False) - - rollout = rule.get('rollout') - if rollout is None: - return (None, False) - variations = rollout.get('variations') - seed = rollout.get('seed') - if variations is not None and len(variations) > 0: - bucket_by = 'key' - if rollout.get('bucketBy') is not None: - bucket_by = rollout['bucketBy'] - bucket = _bucket_user(seed, user, feature['key'], feature['salt'], bucket_by) - is_experiment = rollout.get('kind') == 'experiment' - sum = 0.0 - for wv in variations: - sum += wv.get('weight', 0.0) / 100000.0 - if bucket < sum: - is_experiment_partition = is_experiment and not wv.get('untracked') - return (wv.get('variation'), is_experiment_partition) - - # The user's bucket value was greater than or equal to the end of the last bucket. This could happen due - # to a rounding error, or due to the fact that we are scaling to 100000 rather than 99999, or the flag - # data could contain buckets that don't actually add up to 100000. Rather than returning an error in - # this case (or changing the scaling, which would potentially change the results for *all* users), we - # will simply put the user in the last bucket. - is_experiment_partition = is_experiment and not variations[-1].get('untracked') - return (variations[-1].get('variation'), is_experiment_partition) - - return (None, False) - - -def _bucket_user(seed, user, key, salt, bucket_by): - u_value, should_pass = _get_user_attribute(user, bucket_by) - bucket_by_value = _bucketable_string_value(u_value) - - if should_pass or bucket_by_value is None: - return 0.0 - - id_hash = u_value - if user.get('secondary') is not None: - id_hash = id_hash + '.' + user['secondary'] - - if seed is not None: - prefix = str(seed) - else: - prefix = '%s.%s' % (key, salt) - - hash_key = '%s.%s' % (prefix, id_hash) - hash_val = int(hashlib.sha1(hash_key.encode('utf-8')).hexdigest()[:15], 16) - result = hash_val / __LONG_SCALE__ - return result - - -def _bucketable_string_value(u_value): - return str(u_value) if isinstance(u_value, (str, int)) else None - -def _rule_matches_user(rule, user, store): - for clause in rule.get('clauses') or []: - if clause.get('attribute') is not None: - if not _clause_matches_user(clause, user, store): - return False - return True - - -def _clause_matches_user(clause, user, store): - if clause.get('op') == 'segmentMatch': - for seg_key in clause.get('values') or []: - segment = store.get(SEGMENTS, seg_key, lambda x: x) - if segment is not None and _segment_matches_user(segment, user): - return _maybe_negate(clause, True) - return _maybe_negate(clause, False) - else: - return _clause_matches_user_no_segments(clause, user) - -def _clause_matches_user_no_segments(clause, user): - u_value, should_pass = _get_user_attribute(user, clause.get('attribute')) - if should_pass is True: - return False - if u_value is None: - return None - # is the attr an array? - op_fn = operators.ops[clause['op']] - if isinstance(u_value, (list, tuple)): - for u in u_value: - if _match_any(op_fn, u, clause.get('values') or []): - return _maybe_negate(clause, True) - return _maybe_negate(clause, False) - else: - return _maybe_negate(clause, _match_any(op_fn, u_value, clause.get('values') or [])) - -def _segment_matches_user(segment, user): - key = user.get('key') - if key is not None: - if key in segment.get('included', []): - return True - if key in segment.get('excluded', []): - return False - for rule in segment.get('rules', []): - if _segment_rule_matches_user(rule, user, segment.get('key'), segment.get('salt')): - return True - return False - -def _segment_rule_matches_user(rule, user, segment_key, salt): - for clause in rule.get('clauses') or []: - if not _clause_matches_user_no_segments(clause, user): - return False - - # If the weight is absent, this rule matches - if 'weight' not in rule or rule['weight'] is None: - return True - - # All of the clauses are met. See if the user buckets in - bucket_by = 'key' if rule.get('bucketBy') is None else rule['bucketBy'] - bucket = _bucket_user(None, user, segment_key, salt, bucket_by) - weight = rule['weight'] / 100000.0 - return bucket < weight - - -def _match_any(op_fn, u, vals): - for v in vals: - if op_fn(u, v): - return True - return False + evaluator = Evaluator( + lambda key: store.get(FEATURES, key), + lambda key: store.get(SEGMENTS, key), + None + ) + return evaluator.evaluate(flag, user, event_factory) -def _maybe_negate(clause, val): - if clause.get('negate', False) is True: - return not val - return val +__all__ = ['EvaluationDetail', 'evaluate', 'error_reason', 'EvalResult'] diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py index 0bb0dbd0..4701031e 100644 --- a/ldclient/flags_state.py +++ b/ldclient/flags_state.py @@ -1,97 +1,8 @@ -""" -This submodule contains a helper class for feature flag evaluation. -""" -from typing import Optional, Dict, Any -import json -import time - -class FeatureFlagsState: - """ - A snapshot of the state of all feature flags with regard to a specific user, generated by - calling the :func:`ldclient.client.LDClient.all_flags_state()` method. Serializing this - object to JSON, using the :func:`to_json_dict` method or ``jsonpickle``, will produce the - appropriate data structure for bootstrapping the LaunchDarkly JavaScript client. See the - JavaScript SDK Reference Guide on `Bootstrapping `_. - """ - def __init__(self, valid: bool): - self.__flag_values = {} # type: Dict[str, Any] - self.__flag_metadata = {} # type: Dict[str, Any] - self.__valid = valid - - # Used internally to build the state map - def add_flag(self, flag, value, variation, reason, details_only_if_tracked): - key = flag['key'] - self.__flag_values[key] = value - meta = {} - with_details = (not details_only_if_tracked) or flag.get('trackEvents') - if not with_details: - if flag.get('debugEventsUntilDate'): - now = int(time.time() * 1000) - with_details = (flag.get('debugEventsUntilDate') > now) - if with_details: - meta['version'] = flag.get('version') - if reason is not None: - meta['reason'] = reason - if variation is not None: - meta['variation'] = variation - if flag.get('trackEvents'): - meta['trackEvents'] = True - if flag.get('debugEventsUntilDate') is not None: - meta['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') - self.__flag_metadata[key] = meta - - @property - def valid(self) -> bool: - """True if this object contains a valid snapshot of feature flag state, or False if the - state could not be computed (for instance, because the client was offline or there was no user). - """ - return self.__valid - - - def get_flag_value(self, key: str) -> object: - """Returns the value of an individual feature flag at the time the state was recorded. - - :param key: the feature flag key - :return: the flag's value; None if the flag returned the default value, or if there was no such flag - """ - return self.__flag_values.get(key) - - def get_flag_reason(self, key: str) -> Optional[dict]: - """Returns the evaluation reason for an individual feature flag at the time the state was recorded. - - :param key: the feature flag key - :return: a dictionary describing the reason; None if reasons were not recorded, or if there was no - such flag - """ - meta = self.__flag_metadata.get(key) - return None if meta is None else meta.get('reason') - - def to_values_map(self) -> dict: - """Returns a dictionary of flag keys to flag values. If the flag would have evaluated to the - default value, its value will be None. - - Do not use this method if you are passing data to the front end to "bootstrap" the JavaScript client. - Instead, use :func:`to_json_dict()`. - """ - return self.__flag_values - - def to_json_dict(self) -> dict: - """Returns a dictionary suitable for passing as JSON, in the format used by the LaunchDarkly - JavaScript SDK. Use this method if you are passing data to the front end in order to - "bootstrap" the JavaScript client. - """ - ret = self.__flag_values.copy() - ret['$flagsState'] = self.__flag_metadata - ret['$valid'] = self.__valid - return ret - - def to_json_string(self) -> str: - """Same as to_json_dict, but serializes the JSON structure into a string. - """ - return json.dumps(self.to_json_dict()) - - def __getstate__(self) -> dict: - """Equivalent to to_json_dict() - used if you are serializing the object with jsonpickle. - """ - return self.to_json_dict() +# This module exists only for historical reasons. It only contained the FeatureFlagsState class, +# which is now in the ldclient.evaluation module. We are retaining this module as a deprecated +# entry point and re-exporting the class from ldclient.evaluation. +# +# In the future, ldclient.evaluation will be the preferred entry point and ldclient.flags_state +# will be removed. +from ldclient.evaluation import FeatureFlagsState diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py new file mode 100644 index 00000000..369fb2a5 --- /dev/null +++ b/ldclient/impl/evaluator.py @@ -0,0 +1,309 @@ +from ldclient import operators +from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail +from ldclient.impl.event_factory import _EventFactory +from ldclient.util import stringify_attrs + +from collections import namedtuple +import hashlib +import logging +from typing import Callable, Optional, Tuple + +# For consistency with past logging behavior, we are pretending that the evaluation logic still lives in +# the ldclient.flag module. +log = logging.getLogger('ldclient.flag') + +__LONG_SCALE__ = float(0xFFFFFFFFFFFFFFF) + +__BUILTINS__ = ["key", "ip", "country", "email", + "firstName", "lastName", "avatar", "name", "anonymous"] + +__USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__ = [ "key", "secondary" ] +# Currently we are not stringifying the rest of the built-in attributes prior to evaluation, only for events. +# This is because it could affect evaluation results for existing users (ch35206). + + +# EvalResult is used internally to hold the EvaluationDetail result of an evaluation along with +# other side effects that are not exposed to the application, such as events generated by +# prerequisite evaluations, and the cached state of any Big Segments query that we may have +# ended up having to do for the user. +class EvalResult: + def __init__(self): + self.detail = None + self.events = None + self.big_segments_status = None + self.big_segments_membership = None + + def add_event(self, event): + if self.events is None: + self.events = [] + self.events.append(event) + + +class Evaluator: + """ + Encapsulates the feature flag evaluation logic. The Evaluator has no knowledge of the rest of the SDK environment; + if it needs to retrieve flags or segments that are referenced by a flag, it does so through a read-only interface + that is provided in the constructor. It also produces feature events as appropriate for any referenced prerequisite + flags, but does not send them. + """ + def __init__( + self, + get_flag: Callable[[str], Optional[dict]], + get_segment: Callable[[str], Optional[dict]], + get_big_segments_membership: Callable[[str], Optional[Tuple[dict, BigSegmentsStatus]]] + ): + self.__get_flag = get_flag + self.__get_segment = get_segment + self.__get_big_segments_membership = get_big_segments_membership + + def evaluate(self, flag: dict, user: dict, event_factory: _EventFactory) -> EvalResult: + sanitized_user = stringify_attrs(user, __USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__) + state = EvalResult() + state.detail = self._evaluate(flag, sanitized_user, state, event_factory) + if state.big_segments_status is not None: + state.detail.reason['bigSegmentsStatus'] = state.big_segments_status + return state + + def _evaluate(self, flag: dict, user: dict, state: EvalResult, event_factory: _EventFactory): + if not flag.get('on', False): + return _get_off_value(flag, {'kind': 'OFF'}) + + prereq_failure_reason = self._check_prerequisites(flag, user, state, event_factory) + if prereq_failure_reason is not None: + return _get_off_value(flag, prereq_failure_reason) + + # Check to see if any user targets match: + for target in flag.get('targets') or []: + for value in target.get('values') or []: + if value == user['key']: + return _get_variation(flag, target.get('variation'), {'kind': 'TARGET_MATCH'}) + + # Now walk through the rules to see if any match + for index, rule in enumerate(flag.get('rules') or []): + if self._rule_matches_user(rule, user, state): + return _get_value_for_variation_or_rollout(flag, rule, user, + {'kind': 'RULE_MATCH', 'ruleIndex': index, 'ruleId': rule.get('id')}) + + # Walk through fallthrough and see if it matches + if flag.get('fallthrough') is not None: + return _get_value_for_variation_or_rollout(flag, flag['fallthrough'], user, {'kind': 'FALLTHROUGH'}) + + def _check_prerequisites(self, flag: dict, user: dict, state: EvalResult, event_factory: _EventFactory): + failed_prereq = None + prereq_res = None + for prereq in flag.get('prerequisites') or []: + prereq_flag = self.__get_flag(prereq.get('key')) + if prereq_flag is None: + log.warning("Missing prereq flag: " + prereq.get('key')) + failed_prereq = prereq + else: + prereq_res = self._evaluate(prereq_flag, user, state, event_factory) + # Note that if the prerequisite flag is off, we don't consider it a match no matter what its + # off variation was. But we still need to evaluate it in order to generate an event. + if (not prereq_flag.get('on', False)) or prereq_res.variation_index != prereq.get('variation'): + failed_prereq = prereq + event = event_factory.new_eval_event(prereq_flag, user, prereq_res, None, flag) + state.add_event(event) + if failed_prereq: + return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.get('key')} + return None + + def _rule_matches_user(self, rule: dict, user: dict, state: EvalResult): + for clause in rule.get('clauses') or []: + if clause.get('attribute') is not None: + if not self._clause_matches_user(clause, user, state): + return False + return True + + def _clause_matches_user(self, clause: dict, user: dict, state: EvalResult): + if clause.get('op') == 'segmentMatch': + for seg_key in clause.get('values') or []: + segment = self.__get_segment(seg_key) + if segment is not None and self._segment_matches_user(segment, user, state): + return _maybe_negate(clause, True) + return _maybe_negate(clause, False) + else: + return _clause_matches_user_no_segments(clause, user) + + def _segment_matches_user(self, segment: dict, user: dict, state: EvalResult): + if segment.get('unbounded', False): + return self._big_segment_match_user(segment, user, state) + return _simple_segment_match_user(segment, user, True) + + def _big_segment_match_user(self, segment: dict, user: dict, state: EvalResult): + generation = segment.get('generation', None) + if generation is None: + # Big segment queries can only be done if the generation is known. If it's unset, + # that probably means the data store was populated by an older SDK that doesn't know + # about the generation property and therefore dropped it from the JSON data. We'll treat + # that as a "not configured" condition. + state.big_segments_status = BigSegmentsStatus.NOT_CONFIGURED + return False + if state.big_segments_status is None: + user_key = user.get('key') + result = self.__get_big_segments_membership(user_key) + if result: + state.big_segments_membership, state.big_segments_status = result + else: + state.big_segments_membership = None + state.big_segments_status = BigSegmentsStatus.NOT_CONFIGURED + segment_ref = _make_big_segment_ref(segment) + membership = state.big_segments_membership + included = None if membership is None else membership.get(segment_ref, None) + if included is not None: + return included + return _simple_segment_match_user(segment, user, False) + + +# The following functions are declared outside Evaluator because they do not depend on any +# of Evaluator's state. + +def _get_variation(flag, variation, reason): + vars = flag.get('variations') or [] + if variation < 0 or variation >= len(vars): + return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) + return EvaluationDetail(vars[variation], variation, reason) + +def _get_off_value(flag, reason): + off_var = flag.get('offVariation') + if off_var is None: + return EvaluationDetail(None, None, reason) + return _get_variation(flag, off_var, reason) + +def _get_value_for_variation_or_rollout(flag, vr, user, reason): + index, inExperiment = _variation_index_for_user(flag, vr, user) + if index is None: + return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) + if inExperiment: + reason['inExperiment'] = inExperiment + return _get_variation(flag, index, reason) + +def _get_user_attribute(user, attr): + if attr == 'secondary': + return None, True + if attr in __BUILTINS__: + return user.get(attr), False + else: # custom attribute + if user.get('custom') is None or user['custom'].get(attr) is None: + return None, True + return user['custom'][attr], False + +def _variation_index_for_user(feature, rule, user): + if rule.get('variation') is not None: + return (rule['variation'], False) + + rollout = rule.get('rollout') + if rollout is None: + return (None, False) + variations = rollout.get('variations') + seed = rollout.get('seed') + if variations is not None and len(variations) > 0: + bucket_by = 'key' + if rollout.get('bucketBy') is not None: + bucket_by = rollout['bucketBy'] + bucket = _bucket_user(seed, user, feature['key'], feature['salt'], bucket_by) + is_experiment = rollout.get('kind') == 'experiment' + sum = 0.0 + for wv in variations: + sum += wv.get('weight', 0.0) / 100000.0 + if bucket < sum: + is_experiment_partition = is_experiment and not wv.get('untracked') + return (wv.get('variation'), is_experiment_partition) + + # The user's bucket value was greater than or equal to the end of the last bucket. This could happen due + # to a rounding error, or due to the fact that we are scaling to 100000 rather than 99999, or the flag + # data could contain buckets that don't actually add up to 100000. Rather than returning an error in + # this case (or changing the scaling, which would potentially change the results for *all* users), we + # will simply put the user in the last bucket. + is_experiment_partition = is_experiment and not variations[-1].get('untracked') + return (variations[-1].get('variation'), is_experiment_partition) + + return (None, False) + +def _bucket_user(seed, user, key, salt, bucket_by): + u_value, should_pass = _get_user_attribute(user, bucket_by) + bucket_by_value = _bucketable_string_value(u_value) + + if should_pass or bucket_by_value is None: + return 0.0 + + id_hash = u_value + if user.get('secondary') is not None: + id_hash = id_hash + '.' + user['secondary'] + + if seed is not None: + prefix = str(seed) + else: + prefix = '%s.%s' % (key, salt) + + hash_key = '%s.%s' % (prefix, id_hash) + hash_val = int(hashlib.sha1(hash_key.encode('utf-8')).hexdigest()[:15], 16) + result = hash_val / __LONG_SCALE__ + return result + +def _bucketable_string_value(u_value): + return str(u_value) if isinstance(u_value, (str, int)) else None + +def _clause_matches_user_no_segments(clause, user): + u_value, should_pass = _get_user_attribute(user, clause.get('attribute')) + if should_pass is True: + return False + if u_value is None: + return None + # is the attr an array? + op_fn = operators.ops[clause['op']] + if isinstance(u_value, (list, tuple)): + for u in u_value: + if _match_any(op_fn, u, clause.get('values') or []): + return _maybe_negate(clause, True) + return _maybe_negate(clause, False) + else: + return _maybe_negate(clause, _match_any(op_fn, u_value, clause.get('values') or [])) + +def _simple_segment_match_user(segment, user, use_includes_and_excludes): + key = user.get('key') + if key is not None: + if use_includes_and_excludes: + if key in segment.get('included', []): + return True + if key in segment.get('excluded', []): + return False + for rule in segment.get('rules', []): + if _segment_rule_matches_user(rule, user, segment.get('key'), segment.get('salt')): + return True + return False + +def _segment_rule_matches_user(rule, user, segment_key, salt): + for clause in rule.get('clauses') or []: + if not _clause_matches_user_no_segments(clause, user): + return False + + # If the weight is absent, this rule matches + if 'weight' not in rule or rule['weight'] is None: + return True + + # All of the clauses are met. See if the user buckets in + bucket_by = 'key' if rule.get('bucketBy') is None else rule['bucketBy'] + bucket = _bucket_user(None, user, segment_key, salt, bucket_by) + weight = rule['weight'] / 100000.0 + return bucket < weight + +def _match_any(op_fn, u, vals): + for v in vals: + if op_fn(u, v): + return True + return False + +def _maybe_negate(clause, val): + if clause.get('negate', False) is True: + return not val + return val + +def _make_big_segment_ref(segment: dict) -> str: + # The format of Big Segment references is independent of what store implementation is being + # used; the store implementation receives only this string and does not know the details of + # the data model. The Relay Proxy will use the same format when writing to the store. + return "%s:%d" % (segment.get('key', ''), segment.get('generation', 0)) + +def error_reason(error_kind: str) -> dict: + return {'kind': 'ERROR', 'errorKind': error_kind} diff --git a/testing/impl/__init__.py b/testing/impl/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py new file mode 100644 index 00000000..d5d1fa15 --- /dev/null +++ b/testing/impl/evaluator_util.py @@ -0,0 +1,99 @@ +from ldclient.evaluation import BigSegmentsStatus +from ldclient.impl.evaluator import Evaluator, _make_big_segment_ref +from ldclient.impl.event_factory import _EventFactory + +from typing import Optional + +basic_user = { "key": "user-key" } +event_factory = _EventFactory(False) + +class EvaluatorBuilder: + def __init__(self): + self.__flags = {} + self.__segments = {} + self.__big_segments = {} + self.__big_segments_status = BigSegmentsStatus.HEALTHY + + def build(self) -> Evaluator: + return Evaluator( + self._get_flag, + self._get_segment, + self._get_big_segments_membership + ) + + def with_flag(self, flag: dict) -> 'EvaluatorBuilder': + self.__flags[flag['key']] = flag + return self + + def with_unknown_flag(self, key) -> 'EvaluatorBuilder': + self.__flags[key] = None + return self + + def with_segment(self, segment: dict) -> 'EvaluatorBuilder': + self.__segments[segment['key']] = segment + return self + + def with_unknown_segment(self, key) -> 'EvaluatorBuilder': + self.__segments[key] = None + return self + + def with_big_segment_for_user(self, user: dict, segment: dict, included: bool) -> 'EvaluatorBuilder': + user_key = user['key'] + if user_key not in self.__big_segments: + self.__big_segments[user_key] = {} + self.__big_segments[user_key][_make_big_segment_ref(segment)] = included + return self + + def with_no_big_segments_for_user(self, user: dict) -> 'EvaluatorBuilder': + self.__big_segments[user['key']] = {} + return self + + def with_big_segments_status(self, status: str) -> 'EvaluatorBuilder': + self.__big_segments_status = status + return self + + def _get_flag(self, key) -> Optional[dict]: + if key not in self.__flags: + raise Exception("test made unexpected request for flag '%s'" % key) + return self.__flags[key] + + def _get_segment(self, key) -> Optional[dict]: + if key not in self.__segments: + raise Exception("test made unexpected request for segment '%s'" % key) + return self.__segments[key] + + def _get_big_segments_membership(self, key) -> Optional[dict]: + if key not in self.__big_segments: + raise Exception("test made unexpected request for big segments for user key '%s'" % key) + return (self.__big_segments[key], self.__big_segments_status) + +basic_evaluator = EvaluatorBuilder().build() + + +def make_boolean_flag_with_rules(rules) -> dict: + return { + 'key': 'feature', + 'on': True, + 'rules': rules, + 'fallthrough': { 'variation': 0 }, + 'variations': [ False, True ], + 'salt': '' + } + +def make_boolean_flag_with_clause(clause: dict) -> dict: + return make_boolean_flag_with_rules([ + { + 'clauses': [ clause ], + 'variation': 1 + } + ]) + +def make_boolean_flag_matching_segment(segment: dict) -> dict: + return make_boolean_flag_with_clause({ + 'attribute': '', + 'op': 'segmentMatch', + 'values': [ segment['key'] ] + }) + +def make_clause_matching_user(user: dict) -> dict: + return { 'attribute': 'key', 'op': 'in', 'values': [ user['key'] ] } diff --git a/testing/test_flag.py b/testing/impl/test_evaluator.py similarity index 79% rename from testing/test_flag.py rename to testing/impl/test_evaluator.py index c0d61707..e48353ab 100644 --- a/testing/test_flag.py +++ b/testing/impl/test_evaluator.py @@ -1,24 +1,13 @@ import math import pytest -from ldclient.feature_store import InMemoryFeatureStore -from ldclient.flag import EvaluationDetail, EvalResult, _bucket_user, _variation_index_for_user, evaluate -from ldclient.impl.event_factory import _EventFactory -from ldclient.versioned_data_kind import FEATURES, SEGMENTS +from ldclient.evaluation import EvaluationDetail +from ldclient.impl.evaluator import _bucket_user, _variation_index_for_user +from testing.impl.evaluator_util import * -empty_store = InMemoryFeatureStore() -event_factory = _EventFactory(False) - - -def make_boolean_flag_with_rules(rules): - return { - 'key': 'feature', - 'on': True, - 'rules': rules, - 'fallthrough': { 'variation': 0 }, - 'variations': [ False, True ], - 'salt': '' - } +def assert_eval_result(result, expected_detail, expected_events): + assert result.detail == expected_detail + assert result.events == expected_events def test_flag_returns_off_variation_if_flag_is_off(): @@ -30,7 +19,7 @@ def test_flag_returns_off_variation_if_flag_is_off(): } user = { 'key': 'x' } detail = EvaluationDetail('b', 1, {'kind': 'OFF'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_none_if_flag_is_off_and_off_variation_is_unspecified(): flag = { @@ -40,7 +29,7 @@ def test_flag_returns_none_if_flag_is_off_and_off_variation_is_unspecified(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'OFF'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_off_variation_is_too_high(): flag = { @@ -51,7 +40,7 @@ def test_flag_returns_error_if_off_variation_is_too_high(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_off_variation_is_negative(): flag = { @@ -62,7 +51,7 @@ def test_flag_returns_error_if_off_variation_is_negative(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_off_variation_if_prerequisite_not_found(): flag = { @@ -73,12 +62,12 @@ def test_flag_returns_off_variation_if_prerequisite_not_found(): 'offVariation': 1, 'variations': ['a', 'b', 'c'] } + evaluator = EvaluatorBuilder().with_unknown_flag('badfeature').build() user = { 'key': 'x' } detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): - store = InMemoryFeatureStore() flag = { 'key': 'feature0', 'on': True, @@ -98,15 +87,14 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): 'version': 2, 'trackEvents': False } - store.upsert(FEATURES, flag1) + evaluator = EvaluatorBuilder().with_flag(flag1).build() user = { 'key': 'x' } detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, 'version': 2, 'user': user, 'prereqOf': 'feature0'}] - assert evaluate(flag, user, store, event_factory) == EvalResult(detail, events_should_be) + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): - store = InMemoryFeatureStore() flag = { 'key': 'feature0', 'on': True, @@ -124,15 +112,14 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): 'version': 2, 'trackEvents': False } - store.upsert(FEATURES, flag1) + evaluator = EvaluatorBuilder().with_flag(flag1).build() user = { 'key': 'x' } detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None, 'version': 2, 'user': user, 'prereqOf': 'feature0'}] - assert evaluate(flag, user, store, event_factory) == EvalResult(detail, events_should_be) + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules(): - store = InMemoryFeatureStore() flag = { 'key': 'feature0', 'on': True, @@ -150,12 +137,12 @@ def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_ru 'version': 2, 'trackEvents': False } - store.upsert(FEATURES, flag1) + evaluator = EvaluatorBuilder().with_flag(flag1).build() user = { 'key': 'x' } detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, 'version': 2, 'user': user, 'prereqOf': 'feature0'}] - assert evaluate(flag, user, store, event_factory) == EvalResult(detail, events_should_be) + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_error_if_fallthrough_variation_is_too_high(): flag = { @@ -166,7 +153,7 @@ def test_flag_returns_error_if_fallthrough_variation_is_too_high(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_fallthrough_variation_is_negative(): flag = { @@ -177,7 +164,7 @@ def test_flag_returns_error_if_fallthrough_variation_is_negative(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_fallthrough_has_no_variation_or_rollout(): flag = { @@ -188,7 +175,7 @@ def test_flag_returns_error_if_fallthrough_has_no_variation_or_rollout(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_fallthrough_has_rollout_with_no_variations(): flag = { @@ -200,7 +187,7 @@ def test_flag_returns_error_if_fallthrough_has_rollout_with_no_variations(): } user = { 'key': 'x' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_matches_user_from_targets(): flag = { @@ -213,35 +200,35 @@ def test_flag_matches_user_from_targets(): } user = { 'key': 'userkey' } detail = EvaluationDetail('c', 2, {'kind': 'TARGET_MATCH'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_matches_user_from_rules(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 1} flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(True, 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0, 'ruleId': 'id'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_variation_is_too_high(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 999} flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_variation_is_negative(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': -1} flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_has_no_variation_or_rollout(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}]} flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_has_rollout_with_no_variations(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], @@ -249,13 +236,13 @@ def test_flag_returns_error_if_rule_has_rollout_with_no_variations(): flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey' } detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) - assert evaluate(flag, user, empty_store, event_factory) == EvalResult(detail, []) + assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_user_key_is_coerced_to_string_for_evaluation(): clause = { 'attribute': 'key', 'op': 'in', 'values': [ '999' ] } - flag = _make_bool_flag_from_clause(clause) + flag = make_boolean_flag_with_clause(clause) user = { 'key': 999 } - assert evaluate(flag, user, empty_store, event_factory).detail.value == True + assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True def test_secondary_key_is_coerced_to_string_for_evaluation(): # We can't really verify that the rollout calculation works correctly, but we can at least @@ -272,16 +259,15 @@ def test_secondary_key_is_coerced_to_string_for_evaluation(): } flag = make_boolean_flag_with_rules([rule]) user = { 'key': 'userkey', 'secondary': 999 } - assert evaluate(flag, user, empty_store, event_factory).detail.value == True + assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True def test_segment_match_clause_retrieves_segment_from_store(): - store = InMemoryFeatureStore() segment = { "key": "segkey", "included": [ "foo" ], "version": 1 } - store.upsert(SEGMENTS, segment) + evaluator = EvaluatorBuilder().with_segment(segment).build() user = { "key": "foo" } flag = { @@ -303,7 +289,7 @@ def test_segment_match_clause_retrieves_segment_from_store(): ] } - assert evaluate(flag, user, store, event_factory).detail.value == True + assert evaluator.evaluate(flag, user, event_factory).detail.value == True def test_segment_match_clause_falls_through_with_no_errors_if_segment_not_found(): user = { "key": "foo" } @@ -325,8 +311,9 @@ def test_segment_match_clause_falls_through_with_no_errors_if_segment_not_found( } ] } - - assert evaluate(flag, user, empty_store, event_factory).detail.value == False + evaluator = EvaluatorBuilder().with_unknown_segment('segkey').build() + + assert evaluator.evaluate(flag, user, event_factory).detail.value == False def test_clause_matches_builtin_attribute(): clause = { @@ -335,8 +322,8 @@ def test_clause_matches_builtin_attribute(): 'values': [ 'Bob' ] } user = { 'key': 'x', 'name': 'Bob' } - flag = _make_bool_flag_from_clause(clause) - assert evaluate(flag, user, empty_store, event_factory).detail.value == True + flag = make_boolean_flag_with_clause(clause) + assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True def test_clause_matches_custom_attribute(): clause = { @@ -345,8 +332,8 @@ def test_clause_matches_custom_attribute(): 'values': [ 4 ] } user = { 'key': 'x', 'name': 'Bob', 'custom': { 'legs': 4 } } - flag = _make_bool_flag_from_clause(clause) - assert evaluate(flag, user, empty_store, event_factory).detail.value == True + flag = make_boolean_flag_with_clause(clause) + assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True def test_clause_returns_false_for_missing_attribute(): clause = { @@ -355,8 +342,8 @@ def test_clause_returns_false_for_missing_attribute(): 'values': [ 4 ] } user = { 'key': 'x', 'name': 'Bob' } - flag = _make_bool_flag_from_clause(clause) - assert evaluate(flag, user, empty_store, event_factory).detail.value == False + flag = make_boolean_flag_with_clause(clause) + assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == False def test_clause_can_be_negated(): clause = { @@ -366,24 +353,8 @@ def test_clause_can_be_negated(): 'negate': True } user = { 'key': 'x', 'name': 'Bob' } - flag = _make_bool_flag_from_clause(clause) - assert evaluate(flag, user, empty_store, event_factory).detail.value == False - - -def _make_bool_flag_from_clause(clause): - return { - 'key': 'feature', - 'on': True, - 'rules': [ - { - 'clauses': [ clause ], - 'variation': 1 - } - ], - 'fallthrough': { 'variation': 0 }, - 'offVariation': 0, - 'variations': [ False, True ] - } + flag = make_boolean_flag_with_clause(clause) + assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == False def test_variation_index_is_returned_for_bucket(): user = { 'key': 'userkey' } @@ -493,4 +464,4 @@ def test_seed_changes_hash_evaluation(): seed2 = 62 point2 = _bucket_user(seed2, user, 'hashKey', 'saltyB', 'key') - assert point1 != point2 \ No newline at end of file + assert point1 != point2 diff --git a/testing/impl/test_evaluator_big_segment.py b/testing/impl/test_evaluator_big_segment.py new file mode 100644 index 00000000..1c60bd2d --- /dev/null +++ b/testing/impl/test_evaluator_big_segment.py @@ -0,0 +1,77 @@ +import pytest + +from ldclient.evaluation import BigSegmentsStatus +from testing.impl.evaluator_util import * + + +def test_big_segment_with_no_generation_is_not_matched(): + segment = { + 'key': 'test', + 'included': [ basic_user['key'] ], # included should be ignored for a big segment + 'version': 1, + 'unbounded': True + } + evaluator = EvaluatorBuilder().with_segment(segment).build() + flag = make_boolean_flag_matching_segment(segment) + result = evaluator.evaluate(flag, basic_user, event_factory) + assert result.detail.value == False + assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.NOT_CONFIGURED + +def test_big_segment_matched_with_include(): + segment = { + 'key': 'test', + 'version': 1, + 'unbounded': True, + 'generation': 2 + } + evaluator = EvaluatorBuilder().with_segment(segment).with_big_segment_for_user(basic_user, segment, True).build() + flag = make_boolean_flag_matching_segment(segment) + result = evaluator.evaluate(flag, basic_user, event_factory) + assert result.detail.value == True + assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.HEALTHY + +def test_big_segment_matched_with_rule(): + segment = { + 'key': 'test', + 'version': 1, + 'unbounded': True, + 'generation': 2, + 'rules': [ + { 'clauses': [ make_clause_matching_user(basic_user) ] } + ] + } + evaluator = EvaluatorBuilder().with_segment(segment).with_no_big_segments_for_user(basic_user).build() + flag = make_boolean_flag_matching_segment(segment) + result = evaluator.evaluate(flag, basic_user, event_factory) + assert result.detail.value == True + assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.HEALTHY + +def test_big_segment_unmatched_by_exclude_regardless_of_rule(): + segment = { + 'key': 'test', + 'version': 1, + 'unbounded': True, + 'generation': 2, + 'rules': [ + { 'clauses': make_clause_matching_user(basic_user) } + ] + } + evaluator = EvaluatorBuilder().with_segment(segment).with_big_segment_for_user(basic_user, segment, False).build() + flag = make_boolean_flag_matching_segment(segment) + result = evaluator.evaluate(flag, basic_user, event_factory) + assert result.detail.value == False + assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.HEALTHY + +def test_big_segment_status_is_returned_by_provider(): + segment = { + 'key': 'test', + 'version': 1, + 'unbounded': True, + 'generation': 1 + } + evaluator = EvaluatorBuilder().with_segment(segment).with_no_big_segments_for_user(basic_user). \ + with_big_segments_status(BigSegmentsStatus.NOT_CONFIGURED).build() + flag = make_boolean_flag_matching_segment(segment) + result = evaluator.evaluate(flag, basic_user, event_factory) + assert result.detail.value == False + assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.NOT_CONFIGURED diff --git a/testing/test_segment.py b/testing/impl/test_evaluator_segment.py similarity index 93% rename from testing/test_segment.py rename to testing/impl/test_evaluator_segment.py index 02b9ecfa..901aef1f 100644 --- a/testing/test_segment.py +++ b/testing/impl/test_evaluator_segment.py @@ -1,6 +1,13 @@ import pytest -from ldclient.flag import _segment_matches_user +from testing.impl.evaluator_util import * + + +def _segment_matches_user(segment: dict, user: dict) -> bool: + e = EvaluatorBuilder().with_segment(segment).build() + flag = make_boolean_flag_matching_segment(segment) + result = e.evaluate(flag, user, event_factory) + return result.detail.value def test_explicit_include_user(): diff --git a/testing/test_event_factory.py b/testing/test_event_factory.py index 6b763e84..e039c6c7 100644 --- a/testing/test_event_factory.py +++ b/testing/test_event_factory.py @@ -1,5 +1,5 @@ import pytest -from ldclient.flag import EvaluationDetail +from ldclient.evaluation import EvaluationDetail from ldclient.impl.event_factory import _EventFactory _event_factory_default = _EventFactory(False) From 7fc6feddc1a3df0f2da4b156c158ed6793ff24f6 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 1 Dec 2021 11:41:55 -0800 Subject: [PATCH 233/356] linting --- ldclient/client.py | 4 ++-- ldclient/flag.py | 2 +- ldclient/impl/evaluator.py | 2 +- testing/impl/evaluator_util.py | 8 ++++---- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 055f6168..d37ad7c3 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -91,8 +91,8 @@ def __init__(self, config: Config, start_wait: float=5): """ :type: FeatureStore """ self._evaluator = Evaluator( - lambda key: store.get(FEATURES, key), - lambda key: store.get(SEGMENTS, key), + lambda key: store.get(FEATURES, key, lambda x: x), + lambda key: store.get(SEGMENTS, key, lambda x: x), lambda key: None # temporary - haven't yet implemented the component that does the big segments queries ) diff --git a/ldclient/flag.py b/ldclient/flag.py index 1d35e4dd..e5c40f1d 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -19,7 +19,7 @@ def evaluate(flag, user, store, event_factory) -> EvalResult: evaluator = Evaluator( lambda key: store.get(FEATURES, key), lambda key: store.get(SEGMENTS, key), - None + lambda key: None ) return evaluator.evaluate(flag, user, event_factory) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 369fb2a5..d272aaa2 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -140,7 +140,7 @@ def _big_segment_match_user(self, segment: dict, user: dict, state: EvalResult): state.big_segments_status = BigSegmentsStatus.NOT_CONFIGURED return False if state.big_segments_status is None: - user_key = user.get('key') + user_key = str(user.get('key')) result = self.__get_big_segments_membership(user_key) if result: state.big_segments_membership, state.big_segments_status = result diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index d5d1fa15..2322f5bf 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -2,7 +2,7 @@ from ldclient.impl.evaluator import Evaluator, _make_big_segment_ref from ldclient.impl.event_factory import _EventFactory -from typing import Optional +from typing import Optional, Tuple basic_user = { "key": "user-key" } event_factory = _EventFactory(False) @@ -52,17 +52,17 @@ def with_big_segments_status(self, status: str) -> 'EvaluatorBuilder': self.__big_segments_status = status return self - def _get_flag(self, key) -> Optional[dict]: + def _get_flag(self, key: str) -> Optional[dict]: if key not in self.__flags: raise Exception("test made unexpected request for flag '%s'" % key) return self.__flags[key] - def _get_segment(self, key) -> Optional[dict]: + def _get_segment(self, key: str) -> Optional[dict]: if key not in self.__segments: raise Exception("test made unexpected request for segment '%s'" % key) return self.__segments[key] - def _get_big_segments_membership(self, key) -> Optional[dict]: + def _get_big_segments_membership(self, key: str) -> Optional[Tuple[dict, BigSegmentsStatus]]: if key not in self.__big_segments: raise Exception("test made unexpected request for big segments for user key '%s'" % key) return (self.__big_segments[key], self.__big_segments_status) From 38124123b6396218002e5080fed8e9b0dacee42d Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 1 Dec 2021 18:09:04 -0800 Subject: [PATCH 234/356] (big segments 3) implement big segment status tracking, wire up components --- ldclient/client.py | 11 +- ldclient/event_processor.py | 10 +- ldclient/impl/big_segments.py | 114 ++++++++++++- .../integrations/files/file_data_source.py | 4 +- ldclient/impl/listeners.py | 33 ++++ ldclient/impl/repeating_task.py | 50 ++++++ ldclient/interfaces.py | 12 +- ldclient/polling.py | 63 ++++--- ldclient/repeating_timer.py | 26 +-- testing/impl/test_big_segments.py | 155 ++++++++++++++++++ testing/impl/test_listeners.py | 45 +++++ testing/impl/test_repeating_task.py | 58 +++++++ testing/mock_components.py | 43 +++++ testing/test_ldclient_evaluation.py | 33 +++- testing/test_ldclient_listeners.py | 47 ++++++ 15 files changed, 632 insertions(+), 72 deletions(-) create mode 100644 ldclient/impl/listeners.py create mode 100644 ldclient/impl/repeating_task.py create mode 100644 testing/impl/test_big_segments.py create mode 100644 testing/impl/test_listeners.py create mode 100644 testing/impl/test_repeating_task.py create mode 100644 testing/mock_components.py create mode 100644 testing/test_ldclient_listeners.py diff --git a/ldclient/client.py b/ldclient/client.py index d37ad7c3..67916398 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -3,6 +3,7 @@ """ from typing import Optional, Any, Dict, Mapping + from .impl import AnyNum import hashlib @@ -16,7 +17,7 @@ from ldclient.feature_requester import FeatureRequesterImpl from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.evaluation import EvaluationDetail, FeatureFlagsState -from ldclient.impl.big_segments import NullBigSegmentStoreStatusProvider +from ldclient.impl.big_segments import BigSegmentStoreManager from ldclient.impl.evaluator import Evaluator, error_reason from ldclient.impl.event_factory import _EventFactory from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor @@ -90,10 +91,13 @@ def __init__(self, config: Config, start_wait: float=5): self._store = store """ :type: FeatureStore """ + big_segment_store_manager = BigSegmentStoreManager(self._config.big_segments) + self.__big_segment_store_manager = big_segment_store_manager + self._evaluator = Evaluator( lambda key: store.get(FEATURES, key, lambda x: x), lambda key: store.get(SEGMENTS, key, lambda x: x), - lambda key: None # temporary - haven't yet implemented the component that does the big segments queries + lambda key: big_segment_store_manager.get_user_membership(key) ) if self._config.offline: @@ -165,6 +169,7 @@ def close(self): log.info("Closing LaunchDarkly client..") self._event_processor.stop() self._update_processor.stop() + self.__big_segment_store_manager.stop() # These magic methods allow a client object to be automatically cleaned up by the "with" scope operator def __enter__(self): @@ -426,7 +431,7 @@ def big_segment_store_status_provider(self) -> BigSegmentStoreStatusProvider: whether the Big Segment store is (as far as the SDK knows) currently operational and tracking changes in this status. """ - return NullBigSegmentStoreStatusProvider() + return self.__big_segment_store_manager.status_provider __all__ = ['LDClient', 'Config'] diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 1afb3221..7d39078f 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -17,11 +17,10 @@ from ldclient.event_summarizer import EventSummarizer from ldclient.fixed_thread_pool import FixedThreadPool from ldclient.impl.http import _http_factory +from ldclient.impl.repeating_task import RepeatingTask from ldclient.lru_cache import SimpleLRUCache from ldclient.user_filter import UserFilter from ldclient.interfaces import EventProcessor -from ldclient.repeating_timer import RepeatingTimer -from ldclient.util import UnsuccessfulResponseException from ldclient.util import log from ldclient.util import check_if_error_is_recoverable_and_log, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response, _headers from ldclient.diagnostics import create_diagnostic_init @@ -391,12 +390,13 @@ class DefaultEventProcessor(EventProcessor): def __init__(self, config, http=None, dispatcher_class=None, diagnostic_accumulator=None): self._inbox = queue.Queue(config.events_max_pending) self._inbox_full = False - self._flush_timer = RepeatingTimer(config.flush_interval, self.flush) - self._users_flush_timer = RepeatingTimer(config.user_keys_flush_interval, self._flush_users) + self._flush_timer = RepeatingTask(config.flush_interval, config.flush_interval, self.flush) + self._users_flush_timer = RepeatingTask(config.user_keys_flush_interval, config.user_keys_flush_interval, self._flush_users) self._flush_timer.start() self._users_flush_timer.start() if diagnostic_accumulator is not None: - self._diagnostic_event_timer = RepeatingTimer(config.diagnostic_recording_interval, self._send_diagnostic) + self._diagnostic_event_timer = RepeatingTask(config.diagnostic_recording_interval, + config.diagnostic_recording_interval, self._send_diagnostic) self._diagnostic_event_timer.start() else: self._diagnostic_event_timer = None diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index df57b9e4..d39e9913 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -1,12 +1,114 @@ +from ldclient.config import BigSegmentsConfig +from ldclient.evaluation import BigSegmentsStatus +from ldclient.impl.listeners import Listeners +from ldclient.impl.repeating_task import RepeatingTask from ldclient.interfaces import BigSegmentStoreStatus, BigSegmentStoreStatusProvider -from typing import Callable, Optional +from ldclient.util import log -class NullBigSegmentStoreStatusProvider(BigSegmentStoreStatusProvider): - def status(self) -> Optional[BigSegmentStoreStatus]: - return None +import base64 +from expiringdict import ExpiringDict +from hashlib import md5 +import time +from typing import Callable, Optional, Tuple + + +class BigSegmentStoreStatusProviderImpl(BigSegmentStoreStatusProvider): + """ + Default implementation of the BigSegmentStoreStatusProvider interface. + + The real implementation of getting the status is in BigSegmentStoreManager - we pass in a lambda that + allows us to get the current status from that class. So this class provides a facade for that, and + also adds the listener mechanism. + """ + def __init__(self, status_getter: Callable[[], BigSegmentStoreStatus]): + self.__status_getter = status_getter + self.__last_status = None + self.__status_listeners = Listeners() + + @property + def status(self) -> BigSegmentStoreStatus: + return self.__status_getter() def add_listener(self, listener: Callable[[BigSegmentStoreStatus], None]) -> None: - pass + self.__status_listeners.add(listener) def remove_listener(self, listener: Callable[[BigSegmentStoreStatus], None]) -> None: - pass + self.__status_listeners.remove(listener) + + def _update_status(self, new_status: BigSegmentStoreStatus): + last = self.__last_status + if last is None: + self.__last_status = new_status + elif new_status.available != last.available or new_status.stale != last.stale: + self.__last_status = new_status + self.__status_listeners.notify(new_status) + +class BigSegmentStoreManager: + """ + Internal component that decorates the Big Segment store with caching behavior, and also polls the + store to track its status. + """ + def __init__(self, config: BigSegmentsConfig): + self.__store = config.store + + self.__stale_after_millis = config.stale_after * 1000 + self.__status_provider = BigSegmentStoreStatusProviderImpl(self.get_status) + + self.__last_status = None + """ :type: Optional[BigSegmentStoreStatus] """ + + if self.__store: + self.__cache = ExpiringDict(max_len = config.user_cache_size, max_age_seconds=config.user_cache_size) + self.__poll_task = RepeatingTask(config.status_poll_interval, 0, self.poll_store_and_update_status) + self.__poll_task.start() + else: + self.__poll_task = None + + def stop(self): + if self.__poll_task: + self.__poll_task.stop() + if self.__store: + self.__store.stop() + + @property + def status_provider(self) -> BigSegmentStoreStatusProvider: + return self.__status_provider + + def get_user_membership(self, user_key: str) -> Optional[Tuple[Optional[dict], str]]: + if not self.__store: + return None + membership = self.__cache.get(user_key) + if membership is None: + try: + membership = self.__store.get_membership(_hash_for_user_key(user_key)) + self.__cache[user_key] = membership + except Exception as e: + log.exception("Big Segment store membership query returned error: %s" % e) + status = self.__last_status + if not status: + status = self.poll_store_and_update_status() + if not status.available: + return (membership, BigSegmentsStatus.STORE_ERROR) + return (membership, BigSegmentsStatus.STALE if status.stale else BigSegmentsStatus.HEALTHY) + + def get_status(self) -> BigSegmentStoreStatus: + status = self.__last_status + return status if status else self.poll_store_and_update_status() + + def poll_store_and_update_status(self) -> BigSegmentStoreStatus: + new_status = BigSegmentStoreStatus(False, False) # default to "unavailable" if we don't get a new status below + if self.__store: + try: + metadata = self.__store.get_metadata() + new_status = BigSegmentStoreStatus(True, (metadata is None) or self.is_stale(metadata.last_up_to_date)) + except Exception as e: + log.exception("Big Segment store status query returned error: %s" % e) + self.__last_status = new_status + self.__status_provider._update_status(new_status) + return new_status + + def is_stale(self, timestamp) -> bool: + return (timestamp is None) or ((int(time.time() * 1000) - timestamp) >= self.__stale_after_millis) + +def _hash_for_user_key(user_key: str) -> str: + return base64.b64encode(md5(user_key.encode('utf-8')).digest()).decode('utf-8') diff --git a/ldclient/impl/integrations/files/file_data_source.py b/ldclient/impl/integrations/files/file_data_source.py index 8e197a6e..f25eecae 100644 --- a/ldclient/impl/integrations/files/file_data_source.py +++ b/ldclient/impl/integrations/files/file_data_source.py @@ -18,8 +18,8 @@ except ImportError: pass +from ldclient.impl.repeating_task import RepeatingTask from ldclient.interfaces import UpdateProcessor -from ldclient.repeating_timer import RepeatingTimer from ldclient.util import log from ldclient.versioned_data_kind import FEATURES, SEGMENTS @@ -144,7 +144,7 @@ def __init__(self, resolved_paths, reloader, interval): self._paths = resolved_paths self._reloader = reloader self._file_times = self._check_file_times() - self._timer = RepeatingTimer(interval, self._poll) + self._timer = RepeatingTask(interval, interval, self._poll) self._timer.start() def stop(self): diff --git a/ldclient/impl/listeners.py b/ldclient/impl/listeners.py new file mode 100644 index 00000000..6a1e5c86 --- /dev/null +++ b/ldclient/impl/listeners.py @@ -0,0 +1,33 @@ +from ldclient.util import log + +from threading import RLock +from typing import Any, Callable + +class Listeners: + """ + Simple abstraction for a list of callbacks that can receive a single value. Callbacks are + done synchronously on the caller's thread. + """ + def __init__(self): + self.__listeners = [] + self.__lock = RLock() + + def add(self, listener: Callable): + with self.__lock: + self.__listeners.append(listener) + + def remove(self, listener: Callable): + with self.__lock: + try: + self.__listeners.remove(listener) + except ValueError: + pass # removing a listener that wasn't in the list is a no-op + + def notify(self, value: Any): + with self.__lock: + listeners_copy = self.__listeners.copy() + for listener in listeners_copy: + try: + listener(value) + except Exception as e: + log.exception("Unexpected error in listener for %s: %s" % (type(value), e)) diff --git a/ldclient/impl/repeating_task.py b/ldclient/impl/repeating_task.py new file mode 100644 index 00000000..15794e3a --- /dev/null +++ b/ldclient/impl/repeating_task.py @@ -0,0 +1,50 @@ +from ldclient.util import log + +from threading import Event, Thread +import time +from typing import Callable + +class RepeatingTask: + """ + A generic mechanism for calling a callback repeatedly at fixed intervals on a worker thread. + """ + def __init__(self, interval: float, initial_delay: float, callable: Callable): + """ + Creates the task, but does not start the worker thread yet. + + :param interval: maximum time in seconds between invocations of the callback + :param initial_delay: time in seconds to wait before the first invocation + :param callable: the function to execute repeatedly + """ + self.__interval = interval + self.__initial_delay = initial_delay + self.__action = callable + self.__stop = Event() + self.__thread = Thread(target=self._run) + self.__thread.daemon = True + + def start(self): + """ + Starts the worker thread. + """ + self.__thread.start() + + def stop(self): + """ + Tells the worker thread to stop. It cannot be restarted after this. + """ + self.__stop.set() + + def _run(self): + if self.__initial_delay > 0: + if self.__stop.wait(self.__initial_delay): + return + stopped = self.__stop.is_set() + while not stopped: + next_time = time.time() + self.__interval + try: + self.__action() + except Exception as e: + log.exception("Unexpected exception on worker thread: %s" % e) + delay = next_time - time.time() + stopped = self.__stop.wait(delay) if delay > 0 else self.__stop.is_set() diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index a4e960e7..dc4b50e9 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -315,6 +315,12 @@ def get_membership(self, user_hash: str) -> dict: """ pass + @abstractmethod + def stop(self): + """ + Shuts down the store component and releases and resources it is using. + """ + pass class BigSegmentStoreStatus: """ @@ -379,11 +385,11 @@ class BigSegmentStoreStatusProvider: """ @abstractproperty - def status(self) -> Optional[BigSegmentStoreStatus]: + def status(self) -> BigSegmentStoreStatus: """ - Gets the current status of the store, if known. + Gets the current status of the store. - :return: The status, or None if the SDK has not yet queried the Big Segment store status + :return: the status """ pass diff --git a/ldclient/polling.py b/ldclient/polling.py index 59803a30..5b2a3c44 100644 --- a/ldclient/polling.py +++ b/ldclient/polling.py @@ -3,53 +3,46 @@ """ # currently excluded from documentation - see docs/README.md -from threading import Thread +from threading import Event -from ldclient.interfaces import UpdateProcessor +from ldclient.config import Config +from ldclient.impl.repeating_task import RepeatingTask +from ldclient.interfaces import FeatureRequester, FeatureStore, UpdateProcessor from ldclient.util import log from ldclient.util import UnsuccessfulResponseException, http_error_message, is_http_error_recoverable -import time - -class PollingUpdateProcessor(Thread, UpdateProcessor): - def __init__(self, config, requester, store, ready): - Thread.__init__(self) - self.daemon = True +class PollingUpdateProcessor(UpdateProcessor): + def __init__(self, config: Config, requester: FeatureRequester, store: FeatureStore, ready: Event): self._config = config self._requester = requester self._store = store - self._running = False self._ready = ready + self._task = RepeatingTask(config.poll_interval, 0, self._poll) - def run(self): - if not self._running: - log.info("Starting PollingUpdateProcessor with request interval: " + str(self._config.poll_interval)) - self._running = True - while self._running: - start_time = time.time() - try: - all_data = self._requester.get_all_data() - self._store.init(all_data) - if not self._ready.is_set() is True and self._store.initialized is True: - log.info("PollingUpdateProcessor initialized ok") - self._ready.set() - except UnsuccessfulResponseException as e: - log.error(http_error_message(e.status, "polling request")) - if not is_http_error_recoverable(e.status): - self._ready.set() # if client is initializing, make it stop waiting; has no effect if already inited - self.stop() - except Exception as e: - log.exception( - 'Error: Exception encountered when updating flags. %s' % e) - - elapsed = time.time() - start_time - if elapsed < self._config.poll_interval: - time.sleep(self._config.poll_interval - elapsed) + def start(self): + log.info("Starting PollingUpdateProcessor with request interval: " + str(self._config.poll_interval)) + self._task.start() def initialized(self): - return self._running and self._ready.is_set() is True and self._store.initialized is True + return self._ready.is_set() is True and self._store.initialized is True def stop(self): log.info("Stopping PollingUpdateProcessor") - self._running = False + self._task.stop() + + def _poll(self): + try: + all_data = self._requester.get_all_data() + self._store.init(all_data) + if not self._ready.is_set() and self._store.initialized: + log.info("PollingUpdateProcessor initialized ok") + self._ready.set() + except UnsuccessfulResponseException as e: + log.error(http_error_message(e.status, "polling request")) + if not is_http_error_recoverable(e.status): + self._ready.set() # if client is initializing, make it stop waiting; has no effect if already inited + self.stop() + except Exception as e: + log.exception( + 'Error: Exception encountered when updating flags. %s' % e) diff --git a/ldclient/repeating_timer.py b/ldclient/repeating_timer.py index 91a0f52d..1f160c63 100644 --- a/ldclient/repeating_timer.py +++ b/ldclient/repeating_timer.py @@ -3,22 +3,14 @@ """ # currently excluded from documentation - see docs/README.md -from threading import Event, Thread +from ldclient.impl.repeating_task import RepeatingTask -class RepeatingTimer: +class RepeatingTimer(RepeatingTask): + """ + Deprecated internal class, retained until the next major version in case any application code was + referencing it. This was used in situations where we did not want the callback to execute + immediately, but to always wait for the interval first, so we are setting both the interval + parameter and the initial_delay parameter of RepeatingTask to the same value. + """ def __init__(self, interval, callable): - self._interval = interval - self._action = callable - self._stop = Event() - self._thread = Thread(target=self._run) - self._thread.daemon = True - - def start(self): - self._thread.start() - - def stop(self): - self._stop.set() - - def _run(self): - while not self._stop.wait(self._interval): - self._action() + super().init(self, interval, interval, callable) diff --git a/testing/impl/test_big_segments.py b/testing/impl/test_big_segments.py new file mode 100644 index 00000000..1e764810 --- /dev/null +++ b/testing/impl/test_big_segments.py @@ -0,0 +1,155 @@ +from ldclient.config import BigSegmentsConfig +from ldclient.evaluation import BigSegmentsStatus +from ldclient.impl.big_segments import BigSegmentStoreManager, _hash_for_user_key +from ldclient.interfaces import BigSegmentStoreMetadata +from testing.mock_components import MockBigSegmentStore + +from queue import Queue +import time + +user_key = 'user-key' +user_hash = _hash_for_user_key(user_key) + + +def always_up_to_date() -> BigSegmentStoreMetadata: + return BigSegmentStoreMetadata(time.time() * 1000) + +def always_stale() -> BigSegmentStoreMetadata: + return BigSegmentStoreMetadata(0) + + +def test_membership_query_uncached_result_healthy_status(): + expected_membership = { "key1": True, "key2": False } + store = MockBigSegmentStore() + store.setup_metadata_always_up_to_date() + store.setup_membership(user_hash, expected_membership) + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store)) + try: + expected_result = (expected_membership, BigSegmentsStatus.HEALTHY) + assert manager.get_user_membership(user_key) == expected_result + finally: + manager.stop() + +def test_membership_query_cached_result_healthy_status(): + expected_membership = { "key1": True, "key2": False } + store = MockBigSegmentStore() + store.setup_metadata_always_up_to_date() + store.setup_membership(user_hash, expected_membership) + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store)) + try: + expected_result = (expected_membership, BigSegmentsStatus.HEALTHY) + assert manager.get_user_membership(user_key) == expected_result + finally: + manager.stop() + assert store.membership_queries == [ user_hash ] + +def test_membership_query_stale_status(): + expected_membership = { "key1": True, "key2": False } + store = MockBigSegmentStore() + store.setup_metadata_always_stale() + store.setup_membership(user_hash, expected_membership) + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store)) + try: + expected_result = (expected_membership, BigSegmentsStatus.STALE) + assert manager.get_user_membership(user_key) == expected_result + finally: + manager.stop() + +def test_membership_query_stale_status_no_store_metadata(): + expected_membership = { "key1": True, "key2": False } + store = MockBigSegmentStore() + store.setup_metadata_none() + store.setup_membership(user_hash, expected_membership) + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store)) + try: + expected_result = (expected_membership, BigSegmentsStatus.STALE) + assert manager.get_user_membership(user_key) == expected_result + finally: + manager.stop() + +def test_membership_query_least_recent_user_evicted_from_cache(): + user_key_1, user_key_2, user_key_3 = 'userkey1', 'userkey2', 'userkey3' + user_hash_1, user_hash_2, user_hash_3 = _hash_for_user_key(user_key_1), \ + _hash_for_user_key(user_key_2), _hash_for_user_key(user_key_3) + membership_1, membership_2, membership_3 = { 'seg1': True }, { 'seg2': True }, { 'seg3': True } + store = MockBigSegmentStore() + store.setup_metadata_always_up_to_date() + store.setup_membership(user_hash_1, membership_1) + store.setup_membership(user_hash_2, membership_2) + store.setup_membership(user_hash_3, membership_3) + + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store, user_cache_size=2)) + + try: + result1 = manager.get_user_membership(user_key_1) + result2 = manager.get_user_membership(user_key_2) + result3 = manager.get_user_membership(user_key_3) + + assert store.membership_queries == [user_hash_1, user_hash_2, user_hash_3] + + # Since the capacity is only 2 and user_key_1 was the least recently used, that key should be + # evicted by the user_key_3 query. Now only user_key_2 and user_key_3 are in the cache, and + # querying them again should not cause a new query to the store. + result2a = manager.get_user_membership(user_key_2) + result3a = manager.get_user_membership(user_key_3) + assert result2a == result2 + assert result3a == result3 + + assert store.membership_queries == [user_hash_1, user_hash_2, user_hash_3] + + result1a = manager.get_user_membership(user_key_1) + assert result1a == result1 + + assert store.membership_queries == [user_hash_1, user_hash_2, user_hash_3, user_hash_1] + finally: + manager.stop() + +def test_status_polling_detects_store_unavailability(): + store = MockBigSegmentStore() + store.setup_metadata_always_up_to_date() + statuses = Queue() + + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store, status_poll_interval=0.01)) + + try: + manager.status_provider.add_listener(lambda status: statuses.put(status)) + + status1 = manager.status_provider.status + assert status1.available == True + + store.setup_metadata_error() + + status2 = statuses.get(True, 1.0) + assert status2.available == False + + store.setup_metadata_always_up_to_date() + + status3 = statuses.get(True, 1.0) + assert status3.available == True + finally: + manager.stop() + +def test_status_polling_detects_stale_status(): + store = MockBigSegmentStore() + store.setup_metadata_always_up_to_date() + statuses = Queue() + + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store, status_poll_interval=0.01)) + + try: + manager.status_provider.add_listener(lambda status: statuses.put(status)) + + status1 = manager.status_provider.status + assert status1.stale == False + + store.setup_metadata_always_stale() + + status2 = statuses.get(True, 1.0) + assert status2.stale == True + + store.setup_metadata_always_up_to_date() + + status3 = statuses.get(True, 1.0) + assert status3.stale == False + finally: + manager.stop() diff --git a/testing/impl/test_listeners.py b/testing/impl/test_listeners.py new file mode 100644 index 00000000..70a0dc7f --- /dev/null +++ b/testing/impl/test_listeners.py @@ -0,0 +1,45 @@ +from ldclient.impl.listeners import Listeners + +from queue import Queue + +def test_notify_with_no_listeners_does_not_throw_exception(): + l = Listeners() + l.notify("hi") + +def test_notify_calls_listeners(): + q1 = Queue() + q2 = Queue() + l = Listeners() + l.add(lambda v: q1.put(v)) + l.add(lambda v: q2.put(v)) + l.notify("hi") + assert q1.get() == "hi" + assert q2.get() == "hi" + assert q1.empty() == True + assert q2.empty() == True + +def test_remove_listener(): + q1 = Queue() + q2 = Queue() + p1 = lambda v: q1.put(v) + p2 = lambda v: q2.put(v) + l = Listeners() + l.add(p1) + l.add(p2) + l.remove(p1) + l.remove(lambda v: print(v)) # removing nonexistent listener does not throw exception + l.notify("hi") + assert q1.empty() == True + assert q2.get() == "hi" + assert q2.empty() == True + +def test_exception_from_listener_is_caught_and_other_listeners_are_still_called(): + def fail(v): + raise Exception("deliberate error") + q = Queue() + l = Listeners() + l.add(fail) + l.add(lambda v: q.put(v)) + l.notify("hi") + assert q.get() == "hi" + assert q.empty() == True diff --git a/testing/impl/test_repeating_task.py b/testing/impl/test_repeating_task.py new file mode 100644 index 00000000..f39a3d59 --- /dev/null +++ b/testing/impl/test_repeating_task.py @@ -0,0 +1,58 @@ +from ldclient.impl.repeating_task import RepeatingTask + +from queue import Empty, Queue +from threading import Event +import time + + +def test_task_does_not_start_when_created(): + signal = Event() + task = RepeatingTask(0.01, 0, lambda: signal.set()) + try: + signal_was_set = signal.wait(0.1) + assert signal_was_set == False + finally: + task.stop() + +def test_task_executes_until_stopped(): + queue = Queue() + task = RepeatingTask(0.1, 0, lambda: queue.put(time.time())) + try: + last = None + task.start() + for _ in range(3): + t = queue.get(True, 1) + if last is not None: + assert (time.time() - last) >= 0.05 + last = t + finally: + task.stop() + stopped_time = time.time() + no_more_items = False + for _ in range(2): + try: + t = queue.get(False) + assert t <= stopped_time + except Empty: + no_more_items = True + assert no_more_items == True + +def test_task_can_be_stopped_from_within_the_task(): + counter = 0 + stopped = Event() + task = None + def do_task(): + nonlocal counter + counter += 1 + if counter >= 2: + task.stop() + stopped.set() + task = RepeatingTask(0.01, 0, do_task) + try: + task.start() + assert stopped.wait(0.1) == True + assert counter == 2 + time.sleep(0.1) + assert counter == 2 + finally: + task.stop() diff --git a/testing/mock_components.py b/testing/mock_components.py new file mode 100644 index 00000000..bc2037b4 --- /dev/null +++ b/testing/mock_components.py @@ -0,0 +1,43 @@ +from ldclient.interfaces import BigSegmentStore, BigSegmentStoreMetadata + +import time +from typing import Callable + +class MockBigSegmentStore(BigSegmentStore): + def __init__(self): + self.__get_metadata = lambda: BigSegmentStoreMetadata(time.time()) + self.__memberships = {} + self.__membership_queries = [] + self.setup_metadata_always_up_to_date() + + def get_metadata(self) -> BigSegmentStoreMetadata: + return self.__get_metadata() + + def get_membership(self, user_hash: str) -> dict: + self.__membership_queries.append(user_hash) + return self.__memberships.get(user_hash, None) + + def setup_metadata(self, callback: Callable[[], BigSegmentStoreMetadata]): + self.__get_metadata = callback + + def setup_metadata_always_up_to_date(self): + self.setup_metadata(lambda: BigSegmentStoreMetadata(time.time()*1000)) + + def setup_metadata_always_stale(self): + self.setup_metadata(lambda: BigSegmentStoreMetadata(0)) + + def setup_metadata_none(self): + self.setup_metadata(lambda: None) + + def setup_metadata_error(self): + self.setup_metadata(self.__fail) + + def setup_membership(self, user_hash: str, membership: dict): + self.__memberships[user_hash] = membership + + @property + def membership_queries(self) -> list: + return self.__membership_queries.copy() + + def __fail(self): + raise Exception("deliberate error") diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 06ec99f7..346e1aad 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -2,10 +2,16 @@ import json import time from ldclient.client import LDClient, Config +from ldclient.config import BigSegmentsConfig +from ldclient.evaluation import BigSegmentsStatus from ldclient.feature_store import InMemoryFeatureStore from ldclient.flag import EvaluationDetail +from ldclient.impl.big_segments import _hash_for_user_key +from ldclient.impl.evaluator import _make_big_segment_ref from ldclient.interfaces import FeatureStore -from ldclient.versioned_data_kind import FEATURES +from ldclient.versioned_data_kind import FEATURES, SEGMENTS +from testing.impl.evaluator_util import make_boolean_flag_matching_segment +from testing.mock_components import MockBigSegmentStore from testing.stub_util import MockEventProcessor, MockUpdateProcessor from testing.test_ldclient import make_off_flag_with_value @@ -162,6 +168,31 @@ def test_variation_detail_when_feature_store_throws_error(caplog): errlog = get_log_lines(caplog, 'ERROR') assert errlog == [ 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' ] +def test_flag_using_big_segment(): + segment = { + 'key': 'segkey', + 'version': 1, + 'generation': 1, + 'unbounded': True + } + flag = make_boolean_flag_matching_segment(segment) + store = InMemoryFeatureStore() + store.init({ FEATURES: { flag['key']: flag }, SEGMENTS: { segment['key']: segment } }) + segstore = MockBigSegmentStore() + segstore.setup_metadata_always_up_to_date() + segstore.setup_membership(_hash_for_user_key(user['key']), { _make_big_segment_ref(segment): True }) + config=Config( + sdk_key='SDK_KEY', + feature_store=store, + big_segments=BigSegmentsConfig(store=segstore), + event_processor_class=MockEventProcessor, + update_processor_class=MockUpdateProcessor + ) + with LDClient(config) as client: + detail = client.variation_detail(flag['key'], user, False) + assert detail.value == True + assert detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.HEALTHY + def test_all_flags_returns_values(): store = InMemoryFeatureStore() store.init({ FEATURES: { 'key1': flag1, 'key2': flag2 } }) diff --git a/testing/test_ldclient_listeners.py b/testing/test_ldclient_listeners.py new file mode 100644 index 00000000..b160135e --- /dev/null +++ b/testing/test_ldclient_listeners.py @@ -0,0 +1,47 @@ +from ldclient.client import LDClient, Config +from ldclient.config import BigSegmentsConfig +from testing.mock_components import MockBigSegmentStore +from testing.stub_util import MockEventProcessor, MockUpdateProcessor + +from queue import Queue + +def test_big_segment_store_status_unavailable(): + config=Config( + sdk_key='SDK_KEY', + event_processor_class=MockEventProcessor, + update_processor_class=MockUpdateProcessor + ) + client = LDClient(config) + assert client.big_segment_store_status_provider.status.available == False + +def test_big_segment_store_status_updates(): + segstore = MockBigSegmentStore() + segstore.setup_metadata_always_up_to_date() + config=Config( + sdk_key='SDK_KEY', + big_segments=BigSegmentsConfig(store=segstore, status_poll_interval=0.01), + event_processor_class=MockEventProcessor, + update_processor_class=MockUpdateProcessor + ) + statuses = Queue() + + with LDClient(config) as client: + client.big_segment_store_status_provider.add_listener(lambda status: statuses.put(status)) + + status1 = client.big_segment_store_status_provider.status + assert status1.available == True + assert status1.stale == False + + segstore.setup_metadata_always_stale() + + status2 = statuses.get(True, 1.0) + assert status2.available == True + assert status2.stale == True + + segstore.setup_metadata_always_up_to_date() + + status3 = statuses.get(True, 1.0) + assert status3.available == True + assert status3.stale == False + assert client.big_segment_store_status_provider.status.available == True + From b5d96169d3a7b6626f38c6d5fdba112ee06ff028 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 1 Dec 2021 18:23:53 -0800 Subject: [PATCH 235/356] typing fixes --- ldclient/flag.py | 4 ++-- ldclient/impl/big_segments.py | 13 +++++-------- ldclient/impl/evaluator.py | 15 +++++++++------ testing/impl/evaluator_util.py | 2 +- testing/impl/test_big_segments.py | 7 ------- 5 files changed, 17 insertions(+), 24 deletions(-) diff --git a/ldclient/flag.py b/ldclient/flag.py index e5c40f1d..67dfa838 100644 --- a/ldclient/flag.py +++ b/ldclient/flag.py @@ -10,7 +10,7 @@ # In the future, ldclient.evaluation will be the preferred entry point for the public types and # ldclient.flag will be removed. -from ldclient.evaluation import EvaluationDetail +from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail from ldclient.impl.evaluator import Evaluator, EvalResult, error_reason from ldclient.versioned_data_kind import FEATURES, SEGMENTS @@ -19,7 +19,7 @@ def evaluate(flag, user, store, event_factory) -> EvalResult: evaluator = Evaluator( lambda key: store.get(FEATURES, key), lambda key: store.get(SEGMENTS, key), - lambda key: None + lambda key: (None, BigSegmentsStatus.NOT_CONFIGURED) ) return evaluator.evaluate(flag, user, event_factory) diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index d39e9913..e27b9211 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -22,8 +22,8 @@ class BigSegmentStoreStatusProviderImpl(BigSegmentStoreStatusProvider): """ def __init__(self, status_getter: Callable[[], BigSegmentStoreStatus]): self.__status_getter = status_getter - self.__last_status = None self.__status_listeners = Listeners() + self.__last_status: Optional[BigSegmentStoreStatus] = None @property def status(self) -> BigSegmentStoreStatus: @@ -53,16 +53,13 @@ def __init__(self, config: BigSegmentsConfig): self.__stale_after_millis = config.stale_after * 1000 self.__status_provider = BigSegmentStoreStatusProviderImpl(self.get_status) - - self.__last_status = None - """ :type: Optional[BigSegmentStoreStatus] """ + self.__last_status: Optional[BigSegmentStoreStatus] = None + self.__poll_task: Optional[RepeatingTask] = None if self.__store: self.__cache = ExpiringDict(max_len = config.user_cache_size, max_age_seconds=config.user_cache_size) self.__poll_task = RepeatingTask(config.status_poll_interval, 0, self.poll_store_and_update_status) self.__poll_task.start() - else: - self.__poll_task = None def stop(self): if self.__poll_task: @@ -74,9 +71,9 @@ def stop(self): def status_provider(self) -> BigSegmentStoreStatusProvider: return self.__status_provider - def get_user_membership(self, user_key: str) -> Optional[Tuple[Optional[dict], str]]: + def get_user_membership(self, user_key: str) -> Tuple[Optional[dict], str]: if not self.__store: - return None + return (None, BigSegmentsStatus.NOT_CONFIGURED) membership = self.__cache.get(user_key) if membership is None: try: diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index d272aaa2..90b4ccfa 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -50,8 +50,15 @@ def __init__( self, get_flag: Callable[[str], Optional[dict]], get_segment: Callable[[str], Optional[dict]], - get_big_segments_membership: Callable[[str], Optional[Tuple[dict, BigSegmentsStatus]]] + get_big_segments_membership: Callable[[str], Tuple[Optional[dict], str]] ): + """ + :param get_flag: function provided by LDClient that takes a flag key and returns either the flag or None + :param get_segment: same as get_flag but for segments + :param get_big_segments_membership: takes a user key (not a user hash) and returns a tuple of + (membership, status) where membership is as defined in BigSegmentStore, and status is one + of the BigSegmentStoreStatus constants + """ self.__get_flag = get_flag self.__get_segment = get_segment self.__get_big_segments_membership = get_big_segments_membership @@ -142,11 +149,7 @@ def _big_segment_match_user(self, segment: dict, user: dict, state: EvalResult): if state.big_segments_status is None: user_key = str(user.get('key')) result = self.__get_big_segments_membership(user_key) - if result: - state.big_segments_membership, state.big_segments_status = result - else: - state.big_segments_membership = None - state.big_segments_status = BigSegmentsStatus.NOT_CONFIGURED + state.big_segments_membership, state.big_segments_status = result segment_ref = _make_big_segment_ref(segment) membership = state.big_segments_membership included = None if membership is None else membership.get(segment_ref, None) diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index 2322f5bf..9bae2dc1 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -62,7 +62,7 @@ def _get_segment(self, key: str) -> Optional[dict]: raise Exception("test made unexpected request for segment '%s'" % key) return self.__segments[key] - def _get_big_segments_membership(self, key: str) -> Optional[Tuple[dict, BigSegmentsStatus]]: + def _get_big_segments_membership(self, key: str) -> Tuple[Optional[dict], str]: if key not in self.__big_segments: raise Exception("test made unexpected request for big segments for user key '%s'" % key) return (self.__big_segments[key], self.__big_segments_status) diff --git a/testing/impl/test_big_segments.py b/testing/impl/test_big_segments.py index 1e764810..f433db56 100644 --- a/testing/impl/test_big_segments.py +++ b/testing/impl/test_big_segments.py @@ -11,13 +11,6 @@ user_hash = _hash_for_user_key(user_key) -def always_up_to_date() -> BigSegmentStoreMetadata: - return BigSegmentStoreMetadata(time.time() * 1000) - -def always_stale() -> BigSegmentStoreMetadata: - return BigSegmentStoreMetadata(0) - - def test_membership_query_uncached_result_healthy_status(): expected_membership = { "key1": True, "key2": False } store = MockBigSegmentStore() From 5e754420725871db4781b288fb539d19da102451 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 1 Dec 2021 18:39:02 -0800 Subject: [PATCH 236/356] typing fixes --- ldclient/client.py | 8 +++---- ldclient/impl/big_segments.py | 6 ++--- ldclient/versioned_data_kind.py | 39 +++++++++++++++++++++++++++------ 3 files changed, 38 insertions(+), 15 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 67916398..2bb98aaa 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -21,7 +21,7 @@ from ldclient.impl.evaluator import Evaluator, error_reason from ldclient.impl.event_factory import _EventFactory from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor -from ldclient.interfaces import BigSegmentStoreStatusProvider, FeatureStore +from ldclient.interfaces import BigSegmentStoreStatusProvider, FeatureRequester, FeatureStore from ldclient.polling import PollingUpdateProcessor from ldclient.streaming import StreamingUpdateProcessor from ldclient.util import check_uwsgi, log @@ -88,8 +88,7 @@ def __init__(self, config: Config, start_wait: float=5): self._event_factory_with_reasons = _EventFactory(True) store = _FeatureStoreClientWrapper(self._config.feature_store) - self._store = store - """ :type: FeatureStore """ + self._store = store # type: FeatureStore big_segment_store_manager = BigSegmentStoreManager(self._config.big_segments) self.__big_segment_store_manager = big_segment_store_manager @@ -151,8 +150,7 @@ def _make_update_processor(self, config, store, ready, diagnostic_accumulator): if config.feature_requester_class: feature_requester = config.feature_requester_class(config) else: - feature_requester = FeatureRequesterImpl(config) - """ :type: FeatureRequester """ + feature_requester = FeatureRequesterImpl(config) # type: FeatureRequester return PollingUpdateProcessor(config, feature_requester, store, ready) diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index e27b9211..d06a0563 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -23,7 +23,7 @@ class BigSegmentStoreStatusProviderImpl(BigSegmentStoreStatusProvider): def __init__(self, status_getter: Callable[[], BigSegmentStoreStatus]): self.__status_getter = status_getter self.__status_listeners = Listeners() - self.__last_status: Optional[BigSegmentStoreStatus] = None + self.__last_status = None # type: Optional[BigSegmentStoreStatus] @property def status(self) -> BigSegmentStoreStatus: @@ -53,8 +53,8 @@ def __init__(self, config: BigSegmentsConfig): self.__stale_after_millis = config.stale_after * 1000 self.__status_provider = BigSegmentStoreStatusProviderImpl(self.get_status) - self.__last_status: Optional[BigSegmentStoreStatus] = None - self.__poll_task: Optional[RepeatingTask] = None + self.__last_status = None # type: Optional[BigSegmentStoreStatus] + self.__poll_task = None # type: Optional[RepeatingTask] if self.__store: self.__cache = ExpiringDict(max_len = config.user_cache_size, max_age_seconds=config.user_cache_size) diff --git a/ldclient/versioned_data_kind.py b/ldclient/versioned_data_kind.py index 37504394..910618e6 100644 --- a/ldclient/versioned_data_kind.py +++ b/ldclient/versioned_data_kind.py @@ -10,17 +10,42 @@ """ from collections import namedtuple +from typing import Callable, Iterable, Optional # Note that VersionedDataKind without the extra attributes is no longer used in the SDK, # but it's preserved here for backward compatibility just in case someone else used it -VersionedDataKind = namedtuple('VersionedDataKind', - ['namespace', 'request_api_path', 'stream_api_path']) +class VersionedDataKind: + def __init__(self, namespace: str, request_api_path: str, stream_api_path: str): + self._namespace = namespace + self._request_api_path = request_api_path + self._stream_api_path = stream_api_path -# Note, feature store implementors really don't need to know about this class so we could just -# not document it at all, but apparently namedtuple() creates its own docstrings so it's going -# to show up in any case. -VersionedDataKindWithOrdering = namedtuple('VersionedDataKindWithOrdering', - ['namespace', 'request_api_path', 'stream_api_path', 'priority', 'get_dependency_keys']) + @property + def namespace(self) -> str: + return self._namespace + + @property + def request_api_path(self) -> str: + return self._request_api_path + + @property + def stream_api_path(self) -> str: + return self._stream_api_path + +class VersionedDataKindWithOrdering(VersionedDataKind): + def __init__(self, namespace: str, request_api_path: str, stream_api_path: str, + priority: int, get_dependency_keys: Optional[Callable[[dict], Iterable[str]]]): + super().__init__(namespace, request_api_path, stream_api_path) + self._priority = priority + self._get_dependency_keys = get_dependency_keys + + @property + def priority(self) -> int: + return self._priority + + @property + def get_dependency_keys(self) -> Optional[Callable[[dict], Iterable[str]]]: + return self._get_dependency_keys FEATURES = VersionedDataKindWithOrdering(namespace = "features", request_api_path = "/sdk/latest-flags", From 7439e1fa833b52c4c400bba25d2ce0366b9de5ed Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 2 Dec 2021 14:32:11 -0800 Subject: [PATCH 237/356] implement SSE contract tests --- .circleci/config.yml | 9 +++ .gitignore | 1 + sse-contract-tests/Makefile | 20 ++++++ sse-contract-tests/requirements.txt | 2 + sse-contract-tests/service.py | 96 ++++++++++++++++++++++++++++ sse-contract-tests/stream_entity.py | 98 +++++++++++++++++++++++++++++ 6 files changed, 226 insertions(+) create mode 100644 sse-contract-tests/Makefile create mode 100644 sse-contract-tests/requirements.txt create mode 100644 sse-contract-tests/service.py create mode 100644 sse-contract-tests/stream_entity.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 8e7b5e85..0f195e4c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -90,6 +90,15 @@ jobs: command: | export PATH="/home/circleci/.local/bin:$PATH" mypy --config-file mypy.ini ldclient testing + + - run: + name: start SSE contract test service + command: cd sse-contract-tests && start-contract-test-service + background: true + - run: + name: run SSE contract tests + command: cd sse-contract-tests && run-contract-tests + - store_test_results: path: test-reports - store_artifacts: diff --git a/.gitignore b/.gitignore index f0def2a6..291d3e29 100644 --- a/.gitignore +++ b/.gitignore @@ -69,3 +69,4 @@ p2venv test-packaging-venv .vscode/ +.python-version diff --git a/sse-contract-tests/Makefile b/sse-contract-tests/Makefile new file mode 100644 index 00000000..b676f5ec --- /dev/null +++ b/sse-contract-tests/Makefile @@ -0,0 +1,20 @@ + +TEMP_TEST_OUTPUT=/tmp/sse-contract-test-service.log + +build-contract-tests: + @pip install -r requirements.txt + +start-contract-test-service: + @python service.py + +start-contract-test-service-bg: + @echo "Test service output will be captured in $(TEMP_TEST_OUTPUT)" + @make start-contract-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & + +run-contract-tests: + @curl -s https://raw.githubusercontent.com/launchdarkly/sse-contract-tests/master/downloader/run.sh \ + | VERSION=v1 PARAMS="-url http://localhost:8000 -debug -stop-service-at-end" sh + +contract-tests: build-contract-tests start-contract-test-service-bg run-contract-tests + +.PHONY: build-contract-tests start-contract-test-service run-contract-tests contract-tests diff --git a/sse-contract-tests/requirements.txt b/sse-contract-tests/requirements.txt new file mode 100644 index 00000000..2d1d2a7b --- /dev/null +++ b/sse-contract-tests/requirements.txt @@ -0,0 +1,2 @@ +Flask==2.0.2 +urllib3>=1.22.0 diff --git a/sse-contract-tests/service.py b/sse-contract-tests/service.py new file mode 100644 index 00000000..23e642aa --- /dev/null +++ b/sse-contract-tests/service.py @@ -0,0 +1,96 @@ +from stream_entity import StreamEntity + +import json +import logging +import os +import sys +import threading +import traceback +import urllib3 +from flask import Flask, request +from flask.logging import default_handler +from logging.config import dictConfig + +# Import ldclient from parent directory +sys.path.insert(1, os.path.join(sys.path[0], '..')) +from ldclient.config import HTTPConfig +from ldclient.impl.http import HTTPFactory +from ldclient.sse_client import SSEClient + +port = 8000 + +# logging configuration +dictConfig({ + 'version': 1, + 'formatters': { + 'default': { + 'format': '[%(asctime)s] [%(name)s] %(levelname)s: %(message)s', + } + }, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'default' + } + }, + 'root': { + 'level': 'INFO', + 'handlers': ['console'] + }, + 'loggers': { + 'werkzeug': { 'level': 'ERROR' } # disable irrelevant Flask app logging + } +}) + +app = Flask(__name__) +app.logger.removeHandler(default_handler) + +stream_counter = 0 +streams = {} +global_log = logging.getLogger('testservice') + +http_client = urllib3.PoolManager() + +@app.route('/', methods=['GET']) +def status(): + body = { + 'capabilities': [ + 'headers', + 'last-event-id' + ] + } + return (json.dumps(body), 200, {'Content-type': 'application/json'}) + +@app.route('/', methods=['DELETE']) +def delete_stop_service(): + print("Test service has told us to exit") + quit() + +@app.route('/', methods=['POST']) +def post_create_stream(): + global stream_counter, streams + + options = json.loads(request.data) + + stream_counter += 1 + stream_id = str(stream_counter) + resource_url = '/streams/%s' % stream_id + + stream = StreamEntity(options) + streams[stream_id] = stream + + return ('', 201, {'Location': resource_url}) + +@app.route('/streams/', methods=['DELETE']) +def delete_stream(id): + global streams + + stream = streams[id] + if stream is None: + return ('', 404) + stream.close() + return ('', 204) + +if __name__ == "__main__": + global_log.info('Listening on port %d', port) + app.run(host='0.0.0.0', port=8000) diff --git a/sse-contract-tests/stream_entity.py b/sse-contract-tests/stream_entity.py new file mode 100644 index 00000000..9f8bf2de --- /dev/null +++ b/sse-contract-tests/stream_entity.py @@ -0,0 +1,98 @@ +import json +import logging +import os +import sys +import threading +import traceback +import urllib3 + +# Import ldclient from parent directory +sys.path.insert(1, os.path.join(sys.path[0], '..')) +from ldclient.config import HTTPConfig +from ldclient.impl.http import HTTPFactory +from ldclient.sse_client import SSEClient + +port = 8000 + +stream_counter = 0 +streams = {} + +http_client = urllib3.PoolManager() + +class StreamEntity: + def __init__(self, options): + self.options = options + self.callback_url = options["callbackUrl"] + self.log = logging.getLogger(options["tag"]) + self.closed = False + self.callback_counter = 0 + + thread = threading.Thread(target=self.run) + thread.start() + + def run(self): + stream_url = self.options["streamUrl"] + http_factory = HTTPFactory( + self.options.get("headers", {}), + HTTPConfig(read_timeout = + None if self.options.get("readTimeoutMs") is None else + self.options["readTimeoutMs"] / 1000) + ) + try: + self.log.info('Opening stream from %s', stream_url) + sse = SSEClient( + stream_url, + retry = + None if self.options.get("initialDelayMs") is None else + self.options.get("initialDelayMs") / 1000, + last_id = self.options.get("lastEventId"), + http_factory = http_factory + ) + self.sse = sse + for message in sse: + self.log.info('Received event from stream (%s)', message.event) + self.send_message({ + 'kind': 'event', + 'event': { + 'type': message.event, + 'data': message.data, + 'id': message.id + } + }) + self.send_message({ + 'kind': 'error', + 'error': 'Stream closed' + }) + except Exception as e: + self.log.info('Received error from stream: %s', e) + self.log.debug(traceback.format_exc()) + self.send_message({ + 'kind': 'error', + 'error': str(e) + }) + + def send_message(self, message): + global http_client + + if self.closed: + return + self.callback_counter += 1 + callback_url = "%s/%d" % (self.options["callbackUrl"], self.callback_counter) + + try: + resp = http_client.request( + 'POST', + callback_url, + headers = {'Content-Type': 'application/json'}, + body = json.dumps(message) + ) + if resp.status >= 300 and not self.closed: + self.log.error('Callback request returned HTTP error %d', resp.status) + except Exception as e: + if not self.closed: + self.log.error('Callback request failed: %s', e) + + def close(self): + # how to close the stream?? + self.closed = True + self.log.info('Test ended') From e67d91589642b4b53b3d2e35e4a019e5dd5b5a0f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 2 Dec 2021 14:33:50 -0800 Subject: [PATCH 238/356] fix CI --- .circleci/config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0f195e4c..e23dd0e9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -93,11 +93,11 @@ jobs: - run: name: start SSE contract test service - command: cd sse-contract-tests && start-contract-test-service + command: cd sse-contract-tests && make start-contract-test-service background: true - run: name: run SSE contract tests - command: cd sse-contract-tests && run-contract-tests + command: cd sse-contract-tests && make run-contract-tests - store_test_results: path: test-reports From 38d15c9bd206cd2336db2b11cb3a4410be9e8837 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 2 Dec 2021 14:39:19 -0800 Subject: [PATCH 239/356] fix CI again --- .circleci/config.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index e23dd0e9..d0250a2c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -91,6 +91,9 @@ jobs: export PATH="/home/circleci/.local/bin:$PATH" mypy --config-file mypy.ini ldclient testing + - run: + name: build SSE contract test service + command: cd sse-contract-tests && make build-contract-test-service - run: name: start SSE contract test service command: cd sse-contract-tests && make start-contract-test-service From fb93d872a4aaf73890f7741ce8f720bf8d6ede0e Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 2 Dec 2021 14:42:59 -0800 Subject: [PATCH 240/356] fix CI --- .circleci/config.yml | 4 ++-- sse-contract-tests/Makefile | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d0250a2c..839baa32 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -93,10 +93,10 @@ jobs: - run: name: build SSE contract test service - command: cd sse-contract-tests && make build-contract-test-service + command: cd sse-contract-tests && make build-test-service - run: name: start SSE contract test service - command: cd sse-contract-tests && make start-contract-test-service + command: cd sse-contract-tests && make start-test-service background: true - run: name: run SSE contract tests diff --git a/sse-contract-tests/Makefile b/sse-contract-tests/Makefile index b676f5ec..080e62e8 100644 --- a/sse-contract-tests/Makefile +++ b/sse-contract-tests/Makefile @@ -1,13 +1,13 @@ TEMP_TEST_OUTPUT=/tmp/sse-contract-test-service.log -build-contract-tests: +build-test-service: @pip install -r requirements.txt -start-contract-test-service: +start-test-service: @python service.py -start-contract-test-service-bg: +start-test-service-bg: @echo "Test service output will be captured in $(TEMP_TEST_OUTPUT)" @make start-contract-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & @@ -15,6 +15,6 @@ run-contract-tests: @curl -s https://raw.githubusercontent.com/launchdarkly/sse-contract-tests/master/downloader/run.sh \ | VERSION=v1 PARAMS="-url http://localhost:8000 -debug -stop-service-at-end" sh -contract-tests: build-contract-tests start-contract-test-service-bg run-contract-tests +contract-tests: build-test-service start-test-service-bg run-contract-tests -.PHONY: build-contract-tests start-contract-test-service run-contract-tests contract-tests +.PHONY: build-test-service start-test-service start-test-service-bg run-contract-tests contract-tests From 2cd71a608eeb124355f3d15aba01ccef4e52559f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 2 Dec 2021 14:50:49 -0800 Subject: [PATCH 241/356] disable SSE tests in Python 3.5 --- .circleci/config.yml | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 839baa32..8aea6976 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,6 +9,7 @@ workflows: - test-linux: name: Python 3.5 docker-image: cimg/python:3.5 + skip-sse-contract-tests: true # the test service app has dependencies that aren't available in 3.5, which is EOL anyway - test-linux: name: Python 3.6 docker-image: cimg/python:3.6 @@ -42,6 +43,9 @@ jobs: test-with-mypy: type: boolean default: true + skip-sse-contract-tests: + type: boolean + default: false docker: - image: <> - image: redis @@ -91,16 +95,19 @@ jobs: export PATH="/home/circleci/.local/bin:$PATH" mypy --config-file mypy.ini ldclient testing - - run: - name: build SSE contract test service - command: cd sse-contract-tests && make build-test-service - - run: - name: start SSE contract test service - command: cd sse-contract-tests && make start-test-service - background: true - - run: - name: run SSE contract tests - command: cd sse-contract-tests && make run-contract-tests + - unless: + condition: <> + steps: + - run: + name: build SSE contract test service + command: cd sse-contract-tests && make build-test-service + - run: + name: start SSE contract test service + command: cd sse-contract-tests && make start-test-service + background: true + - run: + name: run SSE contract tests + command: cd sse-contract-tests && make run-contract-tests - store_test_results: path: test-reports From aa24aacae188726e94c6458387677751d9f9c70d Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 2 Dec 2021 14:56:37 -0800 Subject: [PATCH 242/356] make test service port configurable --- sse-contract-tests/Makefile | 7 +++++-- sse-contract-tests/service.py | 15 +++++---------- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/sse-contract-tests/Makefile b/sse-contract-tests/Makefile index 080e62e8..d39a2bc3 100644 --- a/sse-contract-tests/Makefile +++ b/sse-contract-tests/Makefile @@ -1,11 +1,14 @@ TEMP_TEST_OUTPUT=/tmp/sse-contract-test-service.log +# port 8000 is already used in the CI environment because we're running a DynamoDB container +PORT=9000 + build-test-service: @pip install -r requirements.txt start-test-service: - @python service.py + @python service.py $(PORT) start-test-service-bg: @echo "Test service output will be captured in $(TEMP_TEST_OUTPUT)" @@ -13,7 +16,7 @@ start-test-service-bg: run-contract-tests: @curl -s https://raw.githubusercontent.com/launchdarkly/sse-contract-tests/master/downloader/run.sh \ - | VERSION=v1 PARAMS="-url http://localhost:8000 -debug -stop-service-at-end" sh + | VERSION=v1 PARAMS="-url http://localhost:$(PORT) -debug -stop-service-at-end" sh contract-tests: build-test-service start-test-service-bg run-contract-tests diff --git a/sse-contract-tests/service.py b/sse-contract-tests/service.py index 23e642aa..6d07fc59 100644 --- a/sse-contract-tests/service.py +++ b/sse-contract-tests/service.py @@ -4,20 +4,12 @@ import logging import os import sys -import threading -import traceback import urllib3 from flask import Flask, request from flask.logging import default_handler from logging.config import dictConfig -# Import ldclient from parent directory -sys.path.insert(1, os.path.join(sys.path[0], '..')) -from ldclient.config import HTTPConfig -from ldclient.impl.http import HTTPFactory -from ldclient.sse_client import SSEClient - -port = 8000 +default_port = 8000 # logging configuration dictConfig({ @@ -92,5 +84,8 @@ def delete_stream(id): return ('', 204) if __name__ == "__main__": + port = default_port + if sys.argv[len(sys.argv) - 1] != 'service.py': + port = int(sys.argv[len(sys.argv) - 1]) global_log.info('Listening on port %d', port) - app.run(host='0.0.0.0', port=8000) + app.run(host='0.0.0.0', port=port) From 9414b33fb6281cc3eee46180eedadd10c50efdb3 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 3 Dec 2021 11:30:39 -0800 Subject: [PATCH 243/356] better SSE implementation that fixes linefeed and multi-byte char issues --- ldclient/impl/sse.py | 195 ++++++++++++++++++++++++++++ ldclient/sse_client.py | 22 +++- ldclient/streaming.py | 7 +- sse-contract-tests/Makefile | 6 +- sse-contract-tests/stream_entity.py | 6 +- testing/impl/__init__.py | 0 testing/impl/test_sse.py | 58 +++++++++ 7 files changed, 279 insertions(+), 15 deletions(-) create mode 100644 ldclient/impl/sse.py create mode 100644 testing/impl/__init__.py create mode 100644 testing/impl/test_sse.py diff --git a/ldclient/impl/sse.py b/ldclient/impl/sse.py new file mode 100644 index 00000000..f34e9c74 --- /dev/null +++ b/ldclient/impl/sse.py @@ -0,0 +1,195 @@ +import urllib3 + +from ldclient.config import HTTPConfig +from ldclient.impl.http import HTTPFactory +from ldclient.util import throw_if_unsuccessful_response + + +class _BufferedLineReader: + def lines_from(chunks): + last_char_was_cr = False + partial_line = None + + for chunk in chunks: + if len(chunk) == 0: + continue + + # bytes.splitlines() will correctly break lines at \n, \r, or \r\n, and is faster than + # iterating through the characters in Python code. However, we have to adjust the results + # in several ways as described below. + lines = chunk.splitlines() + if last_char_was_cr: + last_char_was_cr = False + if chunk[0] == 10: + # If the last character we saw was \r, and then the first character in buf is \n, then + # that's just a single \r\n terminator, so we should remove the extra blank line that + # splitlines added for that first \n. + lines.pop(0) + if len(lines) == 0: + continue # ran out of data, continue to get next chunk + if partial_line is not None: + # On our last time through the loop, we ended up with an unterminated line, so we should + # treat our first parsed line here as a continuation of that. + lines[0] = partial_line + lines[0] + partial_line = None + # Check whether the buffer really ended in a terminator. If it did not, then the last line in + # lines is a partial line and should not be emitted yet. + last_char = chunk[len(chunk)-1] + if last_char == 13: + last_char_was_cr = True # remember this in case the next chunk starts with \n + elif last_char != 10: + partial_line = lines.pop() # remove last element which is the partial line + for line in lines: + yield line.decode() + + +class Event: + """ + An event received by SSEClient. + """ + def __init__(self, event='message', data='', last_event_id=None, retry=None): + self._event = event + self._data = data + self._id = last_event_id + self._retry = retry + + @property + def event(self): + """ + The event type, or "message" if not specified. + """ + return self._event + + @property + def data(self): + """ + The event data. + """ + return self._data + + @property + def last_event_id(self): + """ + The last non-empty "id" value received from this stream so far. + """ + return self._id + + def dump(self): + lines = [] + if self.id: + lines.append('id: %s' % self.id) + + # Only include an event line if it's not the default already. + if self.event != 'message': + lines.append('event: %s' % self.event) + + lines.extend('data: %s' % d for d in self.data.split('\n')) + return '\n'.join(lines) + '\n\n' + + +class SSEClient: + """ + A simple Server-Sent Events client. + + This implementation does not include automatic retrying of a dropped connection; the caller will do that. + If a connection ends, the events iterator will simply end. + """ + def __init__(self, url, last_id=None, connect_timeout=10, read_timeout=300, chunk_size=10000, + verify_ssl=False, http=None, http_proxy=None, http_factory=None, **kwargs): + self.url = url + self.last_id = last_id + self._chunk_size = chunk_size + + if http_factory: + self._timeout = http_factory.timeout + base_headers = http_factory.base_headers + else: + # for backward compatibility in case anyone else is using this class + self._timeout = urllib3.Timeout(connect=connect_timeout, read=read_timeout) + base_headers = {} + + # Optional support for passing in an HTTP client + if http: + self.http = http + else: + hf = http_factory + if hf is None: # build from individual parameters which we're only retaining for backward compatibility + hc = HTTPConfig( + connect_timeout=connect_timeout, + read_timeout=read_timeout, + disable_ssl_verification=not verify_ssl, + http_proxy=http_proxy + ) + hf = HTTPFactory({}, hc) + self.http = hf.create_pool_manager(1, url) + + # Any extra kwargs will be fed into the request call later. + self.requests_kwargs = kwargs + + # The SSE spec requires making requests with Cache-Control: nocache + if 'headers' not in self.requests_kwargs: + self.requests_kwargs['headers'] = {} + + self.requests_kwargs['headers'].update(base_headers) + + self.requests_kwargs['headers']['Cache-Control'] = 'no-cache' + + # The 'Accept' header is not required, but explicit > implicit + self.requests_kwargs['headers']['Accept'] = 'text/event-stream' + + self._connect() + + def _connect(self): + if self.last_id: + self.requests_kwargs['headers']['Last-Event-ID'] = self.last_id + + # Use session if set. Otherwise fall back to requests module. + self.resp = self.http.request( + 'GET', + self.url, + timeout=self._timeout, + preload_content=False, + retries=0, # caller is responsible for implementing appropriate retry semantics, e.g. backoff + **self.requests_kwargs) + + # Raw readlines doesn't work because we may be missing newline characters until the next chunk + # For some reason, we also need to specify a chunk size because stream=True doesn't seem to guarantee + # that we get the newlines in a timeline manner + self.resp_file = self.resp.stream(amt=self._chunk_size) + + # TODO: Ensure we're handling redirects. Might also stick the 'origin' + # attribute on Events like the Javascript spec requires. + throw_if_unsuccessful_response(self.resp) + + @property + def events(self): + """ + An iterable series of Event objects received from the stream. + """ + event_type = "" + event_data = None + for line in _BufferedLineReader.lines_from(self.resp_file): + if line == "": + if event_data is not None: + yield Event("message" if event_type == "" else event_type, event_data, self.last_id) + event_type = "" + event_data = None + continue + colon_pos = line.find(':') + if colon_pos < 0: + continue # malformed line - ignore + if colon_pos == 0: + continue # comment - currently we're not surfacing these + name = line[0:colon_pos] + if colon_pos < (len(line) - 1) and line[colon_pos + 1] == ' ': + colon_pos += 1 + value = line[colon_pos+1:] + if name == 'event': + event_type = value + elif name == 'data': + event_data = value if event_data is None else (event_data + "\n" + value) + elif name == 'id': + self.last_id = value + elif name == 'retry': + pass # auto-reconnect is not implemented in this simplified client + # unknown field names are ignored in SSE diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py index e1531f8c..80dea242 100644 --- a/ldclient/sse_client.py +++ b/ldclient/sse_client.py @@ -1,10 +1,14 @@ -""" -Server-Sent Events implementation for streaming. - -Based on: https://bitbucket.org/btubbs/sseclient/src/a47a380a3d7182a205c0f1d5eb470013ce796b4d/sseclient.py?at=default&fileviewer=file-view-default -""" -# currently excluded from documentation - see docs/README.md - +# +# This deprecated implementation was based on: +# https://bitbucket.org/btubbs/sseclient/src/a47a380a3d7182a205c0f1d5eb470013ce796b4d/sseclient.py?at=default&fileviewer=file-view-default +# +# It has the following known issues: +# - It does not properly handle line terminators other than \n. +# - It does not properly handle multi-line data that starts with a blank line. +# - It fails if a multi-byte character is split across chunks of the stream. +# +# It is replaced by the ldclient.impl.sse module. +# import re import time @@ -21,6 +25,10 @@ class SSEClient: + """ + This class is deprecated and no longer used in the SDK. It is retained here for backward compatibility in case + any external code was referencing it, but it will be removed in a future major version. + """ def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeout=300, chunk_size=10000, verify_ssl=False, http=None, http_proxy=None, http_factory=None, **kwargs): self.url = url diff --git a/ldclient/streaming.py b/ldclient/streaming.py index 061bca65..2255b419 100644 --- a/ldclient/streaming.py +++ b/ldclient/streaming.py @@ -9,13 +9,12 @@ from threading import Thread import logging -import math import time from ldclient.impl.http import HTTPFactory, _http_factory from ldclient.impl.retry_delay import RetryDelayStrategy, DefaultBackoffStrategy, DefaultJitterStrategy +from ldclient.impl.sse import SSEClient from ldclient.interfaces import UpdateProcessor -from ldclient.sse_client import SSEClient from ldclient.util import log, UnsuccessfulResponseException, http_error_message, is_http_error_recoverable from ldclient.versioned_data_kind import FEATURES, SEGMENTS @@ -106,11 +105,11 @@ def _connect(self): # We don't want the stream to use the same read timeout as the rest of the SDK. http_factory = _http_factory(self._config) stream_http_factory = HTTPFactory(http_factory.base_headers, http_factory.http_config, override_read_timeout=stream_read_timeout) - return SSEClient( + client = SSEClient( self._uri, - retry = None, # we're implementing our own retry http_factory = stream_http_factory ) + return client.events def stop(self): log.info("Stopping StreamingUpdateProcessor") diff --git a/sse-contract-tests/Makefile b/sse-contract-tests/Makefile index d39a2bc3..4d9327a3 100644 --- a/sse-contract-tests/Makefile +++ b/sse-contract-tests/Makefile @@ -4,6 +4,10 @@ TEMP_TEST_OUTPUT=/tmp/sse-contract-test-service.log # port 8000 is already used in the CI environment because we're running a DynamoDB container PORT=9000 +# we're skipping the "reconnection" test group because the simplified SSE client we're currently using +# does not do automatic retrying of connections - that is done at a higher level in the SDK +EXTRA_TEST_PARAMS=-skip reconnection + build-test-service: @pip install -r requirements.txt @@ -16,7 +20,7 @@ start-test-service-bg: run-contract-tests: @curl -s https://raw.githubusercontent.com/launchdarkly/sse-contract-tests/master/downloader/run.sh \ - | VERSION=v1 PARAMS="-url http://localhost:$(PORT) -debug -stop-service-at-end" sh + | VERSION=v1 PARAMS="-url http://localhost:$(PORT) -debug -stop-service-at-end $(EXTRA_TEST_PARAMS)" sh contract-tests: build-test-service start-test-service-bg run-contract-tests diff --git a/sse-contract-tests/stream_entity.py b/sse-contract-tests/stream_entity.py index 9f8bf2de..b526116c 100644 --- a/sse-contract-tests/stream_entity.py +++ b/sse-contract-tests/stream_entity.py @@ -10,7 +10,7 @@ sys.path.insert(1, os.path.join(sys.path[0], '..')) from ldclient.config import HTTPConfig from ldclient.impl.http import HTTPFactory -from ldclient.sse_client import SSEClient +from ldclient.impl.sse import SSEClient port = 8000 @@ -49,14 +49,14 @@ def run(self): http_factory = http_factory ) self.sse = sse - for message in sse: + for message in sse.events: self.log.info('Received event from stream (%s)', message.event) self.send_message({ 'kind': 'event', 'event': { 'type': message.event, 'data': message.data, - 'id': message.id + 'id': message.last_event_id } }) self.send_message({ diff --git a/testing/impl/__init__.py b/testing/impl/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testing/impl/test_sse.py b/testing/impl/test_sse.py new file mode 100644 index 00000000..3ffaec28 --- /dev/null +++ b/testing/impl/test_sse.py @@ -0,0 +1,58 @@ +from ldclient.impl.sse import _BufferedLineReader + +import pytest + + +class TestBufferedLineReader: + @pytest.fixture(params = ["\r", "\n", "\r\n"]) + def terminator(self, request): + return request.param + + @pytest.fixture(params = [ + [ + [ "first line*", "second line*", "3rd line*" ], + [ "first line", "second line", "3rd line"] + ], + [ + [ "*", "second line*", "3rd line*" ], + [ "", "second line", "3rd line"] + ], + [ + [ "first line*", "*", "3rd line*" ], + [ "first line", "", "3rd line"] + ], + [ + [ "first line*", "*", "*", "*", "3rd line*" ], + [ "first line", "", "", "", "3rd line" ] + ], + [ + [ "first line*second line*third", " line*fourth line*"], + [ "first line", "second line", "third line", "fourth line" ] + ], + ]) + def inputs_outputs(self, terminator, request): + inputs = list(s.replace("*", terminator).encode() for s in request.param[0]) + return [inputs, request.param[1]] + + def test_parsing(self, inputs_outputs): + assert list(_BufferedLineReader.lines_from(inputs_outputs[0])) == inputs_outputs[1] + + def test_mixed_terminators(self): + chunks = [ + b"first line\nsecond line\r\nthird line\r", + b"\nfourth line\r", + b"\r\nlast\r\n" + ] + expected = [ + "first line", + "second line", + "third line", + "fourth line", + "", + "last" + ] + assert list(_BufferedLineReader.lines_from(chunks)) == expected + + +class TestSSEClient: + pass From 3588db70e3db1b5df5cfa4a4db275263e62dbee0 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 3 Dec 2021 11:45:04 -0800 Subject: [PATCH 244/356] fix constructor parameters in test service --- ldclient/impl/sse.py | 3 +-- sse-contract-tests/stream_entity.py | 9 +++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ldclient/impl/sse.py b/ldclient/impl/sse.py index f34e9c74..c2b14dcb 100644 --- a/ldclient/impl/sse.py +++ b/ldclient/impl/sse.py @@ -47,11 +47,10 @@ class Event: """ An event received by SSEClient. """ - def __init__(self, event='message', data='', last_event_id=None, retry=None): + def __init__(self, event='message', data='', last_event_id=None): self._event = event self._data = data self._id = last_event_id - self._retry = retry @property def event(self): diff --git a/sse-contract-tests/stream_entity.py b/sse-contract-tests/stream_entity.py index b526116c..ac5c7d00 100644 --- a/sse-contract-tests/stream_entity.py +++ b/sse-contract-tests/stream_entity.py @@ -42,9 +42,10 @@ def run(self): self.log.info('Opening stream from %s', stream_url) sse = SSEClient( stream_url, - retry = - None if self.options.get("initialDelayMs") is None else - self.options.get("initialDelayMs") / 1000, + # Currently this client implementation does not support automatic retry + # retry = + # None if self.options.get("initialDelayMs") is None else + # self.options.get("initialDelayMs") / 1000, last_id = self.options.get("lastEventId"), http_factory = http_factory ) @@ -65,7 +66,7 @@ def run(self): }) except Exception as e: self.log.info('Received error from stream: %s', e) - self.log.debug(traceback.format_exc()) + self.log.info(traceback.format_exc()) self.send_message({ 'kind': 'error', 'error': str(e) From 8efc7bf25e9ee2e314787139037bfa1ced4089dd Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 3 Dec 2021 11:55:57 -0800 Subject: [PATCH 245/356] comment --- ldclient/impl/sse.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ldclient/impl/sse.py b/ldclient/impl/sse.py index c2b14dcb..9b2bf774 100644 --- a/ldclient/impl/sse.py +++ b/ldclient/impl/sse.py @@ -6,7 +6,15 @@ class _BufferedLineReader: + """ + Helper class that encapsulates the logic for reading UTF-8 stream data as a series of text lines, + each of which can be terminated by \n, \r, or \r\n. + """ def lines_from(chunks): + """ + Takes an iterable series of encoded chunks (each of "bytes" type) and parses it into an iterable + series of strings, each of which is one line of text. The line does not include the terminator. + """ last_char_was_cr = False partial_line = None From 7fdd3b301d738fb4dc0116b813ab5b0425320bc6 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 3 Dec 2021 12:30:40 -0800 Subject: [PATCH 246/356] test improvements --- ldclient/impl/sse.py | 6 ++++++ sse-contract-tests/Makefile | 2 +- sse-contract-tests/README.md | 5 +++++ testing/impl/test_sse.py | 35 +++++++++++++++++++++++++++++++++-- 4 files changed, 45 insertions(+), 3 deletions(-) create mode 100644 sse-contract-tests/README.md diff --git a/ldclient/impl/sse.py b/ldclient/impl/sse.py index 9b2bf774..de6c1a5f 100644 --- a/ldclient/impl/sse.py +++ b/ldclient/impl/sse.py @@ -200,3 +200,9 @@ def events(self): elif name == 'retry': pass # auto-reconnect is not implemented in this simplified client # unknown field names are ignored in SSE + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() diff --git a/sse-contract-tests/Makefile b/sse-contract-tests/Makefile index 4d9327a3..37f69644 100644 --- a/sse-contract-tests/Makefile +++ b/sse-contract-tests/Makefile @@ -16,7 +16,7 @@ start-test-service: start-test-service-bg: @echo "Test service output will be captured in $(TEMP_TEST_OUTPUT)" - @make start-contract-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & + @make start-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & run-contract-tests: @curl -s https://raw.githubusercontent.com/launchdarkly/sse-contract-tests/master/downloader/run.sh \ diff --git a/sse-contract-tests/README.md b/sse-contract-tests/README.md new file mode 100644 index 00000000..f5892c91 --- /dev/null +++ b/sse-contract-tests/README.md @@ -0,0 +1,5 @@ +# SSE client contract test service + +This directory contains an implementation of the cross-platform SSE testing protocol defined by https://github.com/launchdarkly/sse-contract-tests. See that project's `README` for details of this protocol, and the kinds of SSE client capabilities that are relevant to the contract tests. This code should not need to be updated unless the SSE client has added or removed such capabilities. + +To run these tests locally, run `make contract-tests`. This downloads the correct version of the test harness tool automatically. diff --git a/testing/impl/test_sse.py b/testing/impl/test_sse.py index 3ffaec28..9e006531 100644 --- a/testing/impl/test_sse.py +++ b/testing/impl/test_sse.py @@ -1,4 +1,6 @@ -from ldclient.impl.sse import _BufferedLineReader +from ldclient.impl.sse import _BufferedLineReader, SSEClient + +from testing.http_util import ChunkedResponse, start_server import pytest @@ -54,5 +56,34 @@ def test_mixed_terminators(self): assert list(_BufferedLineReader.lines_from(chunks)) == expected +# The tests for SSEClient are fairly basic, just ensuring that it is really making HTTP requests and that the +# API works as expected. The contract test suite is much more thorough - see sse-contract-tests. + class TestSSEClient: - pass + def test_sends_expected_headers(self): + with start_server() as server: + with ChunkedResponse({ 'Content-Type': 'text/event-stream' }) as stream: + server.for_path('/', stream) + client = SSEClient(server.uri) + + r = server.await_request() + assert r.headers['Accept'] == 'text/event-stream' + assert r.headers['Cache-Control'] == 'no-cache' + + def test_receives_messages(self): + with start_server() as server: + with ChunkedResponse({ 'Content-Type': 'text/event-stream' }) as stream: + server.for_path('/', stream) + client = SSEClient(server.uri) + + stream.push("event: event1\ndata: data1\n\nevent: event2\ndata: data2\n\n") + + events = client.events + + event1 = next(events) + assert event1.event == 'event1' + assert event1.data == 'data1' + + event2 = next(events) + assert event2.event == 'event2' + assert event2.data == 'data2' From e76aef13bc2fd60e3583bb1fb1548759be9796df Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 3 Dec 2021 14:23:41 -0800 Subject: [PATCH 247/356] rm obsolete default config logic --- ldclient/impl/sse.py | 35 +++++++++-------------------------- 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/ldclient/impl/sse.py b/ldclient/impl/sse.py index de6c1a5f..5a867096 100644 --- a/ldclient/impl/sse.py +++ b/ldclient/impl/sse.py @@ -101,34 +101,17 @@ class SSEClient: This implementation does not include automatic retrying of a dropped connection; the caller will do that. If a connection ends, the events iterator will simply end. """ - def __init__(self, url, last_id=None, connect_timeout=10, read_timeout=300, chunk_size=10000, - verify_ssl=False, http=None, http_proxy=None, http_factory=None, **kwargs): + def __init__(self, url, last_id=None, http_factory=None, **kwargs): self.url = url self.last_id = last_id - self._chunk_size = chunk_size - - if http_factory: - self._timeout = http_factory.timeout - base_headers = http_factory.base_headers - else: - # for backward compatibility in case anyone else is using this class - self._timeout = urllib3.Timeout(connect=connect_timeout, read=read_timeout) - base_headers = {} - - # Optional support for passing in an HTTP client - if http: - self.http = http - else: - hf = http_factory - if hf is None: # build from individual parameters which we're only retaining for backward compatibility - hc = HTTPConfig( - connect_timeout=connect_timeout, - read_timeout=read_timeout, - disable_ssl_verification=not verify_ssl, - http_proxy=http_proxy - ) - hf = HTTPFactory({}, hc) - self.http = hf.create_pool_manager(1, url) + self._chunk_size = 10000 + + if http_factory is None: + http_factory = HTTPFactory({}, HTTPConfig()) + self._timeout = http_factory.timeout + base_headers = http_factory.base_headers + + self.http = http_factory.create_pool_manager(1, url) # Any extra kwargs will be fed into the request call later. self.requests_kwargs = kwargs From 6657474502c40909d422cf907bff9c84fa2f4fff Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 6 Dec 2021 10:12:00 -0800 Subject: [PATCH 248/356] (big segments 4) implement big segment stores in Redis+DynamoDB, refactor db tests (#158) --- .../dynamodb/dynamodb_big_segment_store.py | 75 ++++ .../dynamodb/dynamodb_feature_store.py | 4 +- .../redis/redis_big_segment_store.py | 47 +++ ldclient/integrations.py | 74 +++- ldclient/interfaces.py | 4 +- testing/feature_store_test_base.py | 146 ++++++++ testing/integrations/__init__.py | 0 .../big_segment_store_test_base.py | 122 ++++++ .../persistent_feature_store_test_base.py | 105 ++++++ testing/integrations/test_consul.py | 31 ++ testing/integrations/test_dynamodb.py | 168 +++++++++ testing/integrations/test_redis.py | 105 ++++++ testing/test_feature_store.py | 353 ------------------ testing/test_in_memory_feature_store.py | 17 + 14 files changed, 890 insertions(+), 361 deletions(-) create mode 100644 ldclient/impl/integrations/dynamodb/dynamodb_big_segment_store.py create mode 100644 ldclient/impl/integrations/redis/redis_big_segment_store.py create mode 100644 testing/feature_store_test_base.py create mode 100644 testing/integrations/__init__.py create mode 100644 testing/integrations/big_segment_store_test_base.py create mode 100644 testing/integrations/persistent_feature_store_test_base.py create mode 100644 testing/integrations/test_consul.py create mode 100644 testing/integrations/test_dynamodb.py create mode 100644 testing/integrations/test_redis.py delete mode 100644 testing/test_feature_store.py create mode 100644 testing/test_in_memory_feature_store.py diff --git a/ldclient/impl/integrations/dynamodb/dynamodb_big_segment_store.py b/ldclient/impl/integrations/dynamodb/dynamodb_big_segment_store.py new file mode 100644 index 00000000..e021ecf6 --- /dev/null +++ b/ldclient/impl/integrations/dynamodb/dynamodb_big_segment_store.py @@ -0,0 +1,75 @@ + +have_dynamodb = False +try: + import boto3 + have_dynamodb = True +except ImportError: + pass + +from ldclient.interfaces import BigSegmentStore, BigSegmentStoreMetadata + +from typing import List, Optional + + +class _DynamoDBBigSegmentStore(BigSegmentStore): + PARTITION_KEY = 'namespace' + SORT_KEY = 'key' + KEY_METADATA = 'big_segments_metadata' + KEY_USER_DATA = 'big_segments_user' + ATTR_SYNC_TIME = 'synchronizedOn' + ATTR_INCLUDED = 'included' + ATTR_EXCLUDED = 'excluded' + + def __init__(self, table_name, prefix, dynamodb_opts): + if not have_dynamodb: + raise NotImplementedError("Cannot use DynamoDB Big Segment store because AWS SDK (boto3 package) is not installed") + self._table_name = table_name + self._prefix = (prefix + ":") if prefix else "" + self._client = boto3.client('dynamodb', **dynamodb_opts) + + def get_metadata(self) -> BigSegmentStoreMetadata: + key = self._prefix + self.KEY_METADATA + data = self._client.get_item(TableName=self._table_name, Key={ + self.PARTITION_KEY: { "S": key }, + self.SORT_KEY: { "S": key } + }) + if data is not None: + item = data.get('Item') + if item is not None: + attr = item.get(self.ATTR_SYNC_TIME) + if attr is not None: + value = attr.get('N') + return BigSegmentStoreMetadata(None if value is None else int(value)) + return BigSegmentStoreMetadata(None) + + def get_membership(self, user_hash: str) -> Optional[dict]: + data = self._client.get_item(TableName=self._table_name, Key={ + self.PARTITION_KEY: { "S": self._prefix + self.KEY_USER_DATA }, + self.SORT_KEY: { "S": user_hash } + }) + if data is not None: + item = data.get('Item') + if item is not None: + included_refs = _get_string_list(item, self.ATTR_INCLUDED) + excluded_refs = _get_string_list(item, self.ATTR_EXCLUDED) + if (included_refs is None or len(included_refs) == 0) and (excluded_refs is None or len(excluded_refs) == 0): + return None + ret = {} + if excluded_refs is not None: + for seg_ref in excluded_refs: + ret[seg_ref] = False + if included_refs is not None: + for seg_ref in included_refs: # includes should override excludes + ret[seg_ref] = True + return ret + return None + + def stop(self): + pass + +def _get_string_list(item: dict, attr_name: str) -> Optional[List[str]]: + attr = item.get(attr_name) + if attr is None: + return None + return attr.get('SS') + \ No newline at end of file diff --git a/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py b/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py index ae6bef59..98963e72 100644 --- a/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py +++ b/ldclient/impl/integrations/dynamodb/dynamodb_feature_store.py @@ -50,7 +50,7 @@ def __init__(self, table_name, prefix, dynamodb_opts): if not have_dynamodb: raise NotImplementedError("Cannot use DynamoDB feature store because AWS SDK (boto3 package) is not installed") self._table_name = table_name - self._prefix = None if prefix == "" else prefix + self._prefix = (prefix + ":") if prefix else "" self._client = boto3.client('dynamodb', **dynamodb_opts) def init_internal(self, all_data): @@ -124,7 +124,7 @@ def describe_configuration(self, config): return 'DynamoDB' def _prefixed_namespace(self, base): - return base if self._prefix is None else (self._prefix + ':' + base) + return self._prefix + base def _namespace_for_kind(self, kind): return self._prefixed_namespace(kind.namespace) diff --git a/ldclient/impl/integrations/redis/redis_big_segment_store.py b/ldclient/impl/integrations/redis/redis_big_segment_store.py new file mode 100644 index 00000000..35b42b71 --- /dev/null +++ b/ldclient/impl/integrations/redis/redis_big_segment_store.py @@ -0,0 +1,47 @@ +from ldclient import log +from ldclient.interfaces import BigSegmentStore, BigSegmentStoreMetadata + +from typing import Optional, Set, cast + +have_redis = False +try: + import redis + have_redis = True +except ImportError: + pass + + +class _RedisBigSegmentStore(BigSegmentStore): + KEY_LAST_UP_TO_DATE = ':big_segments_synchronized_on' + KEY_USER_INCLUDE = ':big_segment_include:' + KEY_USER_EXCLUDE = ':big_segment_exclude:' + + def __init__(self, url: str, prefix: Optional[str], max_connections: int): + if not have_redis: + raise NotImplementedError("Cannot use Redis Big Segment store because redis package is not installed") + self._prefix = prefix or 'launchdarkly' + self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) + log.info("Started RedisBigSegmentStore connected to URL: " + url + " using prefix: " + self._prefix) + + def get_metadata(self) -> BigSegmentStoreMetadata: + r = redis.Redis(connection_pool=self._pool) + value = r.get(self._prefix + self.KEY_LAST_UP_TO_DATE) + return BigSegmentStoreMetadata(None if value is None else int(value)) + + def get_membership(self, user_hash: str) -> Optional[dict]: + r = redis.Redis(connection_pool=self._pool) + included_refs = cast(Set[bytes], r.smembers(self._prefix + self.KEY_USER_INCLUDE + user_hash)) + excluded_refs = cast(Set[bytes], r.smembers(self._prefix + self.KEY_USER_EXCLUDE + user_hash)) + # The cast to Set[bytes] is because the linter is otherwise confused about the return type of smembers + # and thinks there could be some element type other than bytes. + if (included_refs is None or len(included_refs) == 0) and (excluded_refs is None or len(excluded_refs) == 0): + return None + ret = {} + for seg_ref in excluded_refs: + ret[seg_ref.decode()] = False + for seg_ref in included_refs: # includes should override excludes + ret[seg_ref.decode()] = True + return ret + + def stop(self): + self._pool.disconnect() diff --git a/ldclient/integrations.py b/ldclient/integrations.py index 550f0177..c9938464 100644 --- a/ldclient/integrations.py +++ b/ldclient/integrations.py @@ -6,11 +6,14 @@ from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper from ldclient.impl.integrations.consul.consul_feature_store import _ConsulFeatureStoreCore +from ldclient.impl.integrations.dynamodb.dynamodb_big_segment_store import _DynamoDBBigSegmentStore from ldclient.impl.integrations.dynamodb.dynamodb_feature_store import _DynamoDBFeatureStoreCore from ldclient.impl.integrations.files.file_data_source import _FileDataSource +from ldclient.impl.integrations.redis.redis_big_segment_store import _RedisBigSegmentStore from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore +from ldclient.interfaces import BigSegmentStore -from typing import List, Callable, Mapping, Any +from typing import Any, List, Mapping, Optional class Consul: """Provides factory methods for integrations between the LaunchDarkly SDK and Consul. @@ -60,14 +63,14 @@ class DynamoDB: @staticmethod def new_feature_store(table_name: str, - prefix: str=None, + prefix: Optional[str]=None, dynamodb_opts: Mapping[str, Any]={}, caching: CacheConfig=CacheConfig.default()) -> CachingStoreWrapper: """Creates a DynamoDB-backed implementation of :class:`ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. - To use this method, you must first install the ``boto3`` package containing the AWS SDK gems. + To use this method, you must first install the ``boto3`` package for the AWS SDK. Then, put the object returned by this method into the ``feature_store`` property of your client configuration (:class:`ldclient.config.Config`). :: @@ -95,6 +98,40 @@ def new_feature_store(table_name: str, core = _DynamoDBFeatureStoreCore(table_name, prefix, dynamodb_opts) return CachingStoreWrapper(core, caching) + @staticmethod + def new_big_segment_store(table_name: str, prefix: Optional[str]=None, dynamodb_opts: Mapping[str, Any]={}): + """ + Creates a DynamoDB-backed Big Segment store. + + Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + documentation: https://docs.launchdarkly.com/home/users/big-segments + + To use this method, you must first install the ``boto3`` package for the AWS SDK. Then, + put the object returned by this method into the ``store`` property of your Big Segments + configuration (see :class:`ldclient.config.Config`). + :: + + from ldclient.config import Config, BigSegmentsConfig + from ldclient.integrations import DynamoDB + store = DynamoDB.new_big_segment_store("my-table-name") + config = Config(big_segments=BigSegmentsConfig(store=store)) + + Note that the DynamoDB table must already exist; the LaunchDarkly SDK does not create the table + automatically, because it has no way of knowing what additional properties (such as permissions + and throughput) you would want it to have. The table must have a partition key called + "namespace" and a sort key called "key", both with a string type. + + By default, the DynamoDB client will try to get your AWS credentials and region name from + environment variables and/or local configuration files, as described in the AWS SDK documentation. + You may also pass configuration settings in ``dynamodb_opts``. + + :param table_name: the name of an existing DynamoDB table + :param prefix: an optional namespace prefix to be prepended to all DynamoDB keys + :param dynamodb_opts: optional parameters for configuring the DynamoDB client, as defined in + the `boto3 API `_ + """ + return _DynamoDBBigSegmentStore(table_name, prefix, dynamodb_opts) + class Redis: """Provides factory methods for integrations between the LaunchDarkly SDK and Redis. @@ -108,7 +145,8 @@ def new_feature_store(url: str='redis://localhost:6379/0', prefix: str='launchdarkly', max_connections: int=16, caching: CacheConfig=CacheConfig.default()) -> CachingStoreWrapper: - """Creates a Redis-backed implementation of :class:`ldclient.interfaces.FeatureStore`. + """ + Creates a Redis-backed implementation of :class:`~ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the `SDK reference guide `_. @@ -117,6 +155,7 @@ def new_feature_store(url: str='redis://localhost:6379/0', (:class:`ldclient.config.Config`). :: + from ldclient.config import Config from ldclient.integrations import Redis store = Redis.new_feature_store() config = Config(feature_store=store) @@ -134,6 +173,33 @@ def new_feature_store(url: str='redis://localhost:6379/0', wrapper._core = core # exposed for testing return wrapper + @staticmethod + def new_big_segment_store(url: str='redis://localhost:6379/0', + prefix: str='launchdarkly', + max_connections: int=16) -> BigSegmentStore: + """ + Creates a Redis-backed Big Segment store. + + Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + documentation: https://docs.launchdarkly.com/home/users/big-segments + + To use this method, you must first install the ``redis`` package. Then, put the object + returned by this method into the ``store`` property of your Big Segments configuration + (see :class:`ldclient.config.Config`). + :: + + from ldclient.config import Config, BigSegmentsConfig + from ldclient.integrations import Redis + store = Redis.new_big_segment_store() + config = Config(big_segments=BigSegmentsConfig(store=store)) + + :param url: the URL of the Redis host; defaults to ``DEFAULT_URL`` + :param prefix: a namespace prefix to be prepended to all Redis keys; defaults to + ``DEFAULT_PREFIX`` + :param max_connections: the maximum number of Redis connections to keep in the + connection pool; defaults to ``DEFAULT_MAX_CONNECTIONS`` + """ + return _RedisBigSegmentStore(url, prefix, max_connections) class Files: """Provides factory methods for integrations with filesystem data. diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index dc4b50e9..a863319f 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -288,7 +288,7 @@ def get_metadata(self) -> BigSegmentStoreMetadata: pass @abstractmethod - def get_membership(self, user_hash: str) -> dict: + def get_membership(self, user_hash: str) -> Optional[dict]: """ Queries the store for a snapshot of the current segment state for a specific user. @@ -297,7 +297,7 @@ def get_membership(self, user_hash: str) -> dict: of how this is done, because it deals only with already-hashed keys, but the string can be assumed to only contain characters that are valid in base64. - The return value should be either a `dict`, or nil if the user is not referenced in any big + The return value should be either a `dict`, or None if the user is not referenced in any big segments. Each key in the dictionary is a "segment reference", which is how segments are identified in Big Segment data. This string is not identical to the segment key-- the SDK will add other information. The store implementation should not be concerned with the diff --git a/testing/feature_store_test_base.py b/testing/feature_store_test_base.py new file mode 100644 index 00000000..e622b62d --- /dev/null +++ b/testing/feature_store_test_base.py @@ -0,0 +1,146 @@ +from ldclient.interfaces import FeatureStore +from ldclient.versioned_data_kind import FEATURES + +from abc import abstractmethod +import pytest + +# The basic test suite to be run against all feature store implementations. +# +# FeatureStoreTestBase and FeatureStoreTester are used only by test_in_memory_feature_store. For all +# database integrations, see testing.integrations.persistent_feature_store_test_base which extends +# them with additional tests. + +class FeatureStoreTester: + @abstractmethod + def create_feature_store(self) -> FeatureStore: + pass + + +class StoreTestScope: + def __init__(self, store: FeatureStore): + self.__store = store + + @property + def store(self) -> FeatureStore: + return self.__store + + # These magic methods allow the scope to be automatically cleaned up in a "with" block + def __enter__(self): + return self.__store + + def __exit__(self, type, value, traceback): + if hasattr(self.store, "stop"): # stop was not originally required for all feature store implementations + self.__store.stop() + + +# FeatureStoreTestBase is meant to be used as follows: +# - A subclass adds a pytest fixture called "tester" that will return a series of instances of +# some subclass of FeatureStoreTester. This allows the entire test suite to be repeated with +# different store configurations. +# - Tests in this class use "with self.store(tester)" or "with self.inited_store(tester)" to +# create an instance of the store and ensure that it is torn down afterward. + +class FeatureStoreTestBase: + @abstractmethod + def all_testers(self): + pass + + def store(self, tester): + return StoreTestScope(tester.create_feature_store()) + + def inited_store(self, tester): + scope = StoreTestScope(tester.create_feature_store()) + scope.store.init({ + FEATURES: { + 'foo': self.make_feature('foo', 10), + 'bar': self.make_feature('bar', 10), + } + }) + return scope + + @staticmethod + def make_feature(key, ver): + return { + u'key': key, + u'version': ver, + u'salt': u'abc', + u'on': True, + u'variations': [ + { + u'value': True, + u'weight': 100, + u'targets': [] + }, + { + u'value': False, + u'weight': 0, + u'targets': [] + } + ] + } + + def test_not_initialized_before_init(self, tester): + with self.store(tester) as store: + assert store.initialized is False + + def test_initialized(self, tester): + with self.inited_store(tester) as store: + assert store.initialized is True + + def test_get_existing_feature(self, tester): + with self.inited_store(tester) as store: + expected = self.make_feature('foo', 10) + assert store.get(FEATURES, 'foo', lambda x: x) == expected + + def test_get_nonexisting_feature(self, tester): + with self.inited_store(tester) as store: + assert store.get(FEATURES, 'biz', lambda x: x) is None + + def test_get_all_versions(self, tester): + with self.inited_store(tester) as store: + result = store.all(FEATURES, lambda x: x) + assert len(result) == 2 + assert result.get('foo') == self.make_feature('foo', 10) + assert result.get('bar') == self.make_feature('bar', 10) + + def test_upsert_with_newer_version(self, tester): + with self.inited_store(tester) as store: + new_ver = self.make_feature('foo', 11) + store.upsert(FEATURES, new_ver) + assert store.get(FEATURES, 'foo', lambda x: x) == new_ver + + def test_upsert_with_older_version(self, tester): + with self.inited_store(tester) as store: + new_ver = self.make_feature('foo', 9) + expected = self.make_feature('foo', 10) + store.upsert(FEATURES, new_ver) + assert store.get(FEATURES, 'foo', lambda x: x) == expected + + def test_upsert_with_new_feature(self, tester): + with self.inited_store(tester) as store: + new_ver = self.make_feature('biz', 1) + store.upsert(FEATURES, new_ver) + assert store.get(FEATURES, 'biz', lambda x: x) == new_ver + + def test_delete_with_newer_version(self, tester): + with self.inited_store(tester) as store: + store.delete(FEATURES, 'foo', 11) + assert store.get(FEATURES, 'foo', lambda x: x) is None + + def test_delete_unknown_feature(self, tester): + with self.inited_store(tester) as store: + store.delete(FEATURES, 'biz', 11) + assert store.get(FEATURES, 'biz', lambda x: x) is None + + def test_delete_with_older_version(self, tester): + with self.inited_store(tester) as store: + store.delete(FEATURES, 'foo', 9) + expected = self.make_feature('foo', 10) + assert store.get(FEATURES, 'foo', lambda x: x) == expected + + def test_upsert_older_version_after_delete(self, tester): + with self.inited_store(tester) as store: + store.delete(FEATURES, 'foo', 11) + old_ver = self.make_feature('foo', 9) + store.upsert(FEATURES, old_ver) + assert store.get(FEATURES, 'foo', lambda x: x) is None diff --git a/testing/integrations/__init__.py b/testing/integrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testing/integrations/big_segment_store_test_base.py b/testing/integrations/big_segment_store_test_base.py new file mode 100644 index 00000000..4477a8d0 --- /dev/null +++ b/testing/integrations/big_segment_store_test_base.py @@ -0,0 +1,122 @@ +from abc import abstractmethod, abstractproperty +from os import environ +import pytest +from typing import List + +from ldclient.interfaces import BigSegmentStore, BigSegmentStoreMetadata + +skip_database_tests = environ.get('LD_SKIP_DATABASE_TESTS') == '1' + + +# The standard test suite to be run against all Big Segment store implementations. For each database +# integration that supports Big Segments, we must define a subclass of BigSegmentStoreTester which +# overrides its abstract methods as appropriate for that database, and then define a subclass of +# BigSegmentStoreTestBase which simply specifies what tester subclass to use. + +fake_user_hash = "userhash" + + +class BigSegmentStoreTester: + @abstractmethod + def create_big_segment_store(self, prefix: str) -> BigSegmentStore: + """ + Override this method to create a Big Segment store instance. + :param prefix: the prefix parameter for the store constructor - may be None or empty to use the default + """ + pass + + @abstractmethod + def clear_data(self, prefix: str): + """ + Override this method to clear any existing data from the database for the specified prefix. + """ + pass + + @abstractmethod + def set_metadata(self, prefix: str, metadata: BigSegmentStoreMetadata): + """ + Override this method to update the metadata in the store. + """ + pass + + @abstractmethod + def set_segments(self, prefix: str, user_hash: str, includes: List[str], excludes: List[str]): + """ + Override this method to update segment data for a user in the store. + """ + pass + + +class BigSegmentStoreTestScope: + def __init__(self, store: BigSegmentStore): + self.__store = store + + @property + def store(self) -> BigSegmentStore: + return self.__store + + # These magic methods allow the scope to be automatically cleaned up in a "with" block + def __enter__(self): + return self.__store + + def __exit__(self, type, value, traceback): + self.__store.stop() + + +@pytest.mark.skipif(skip_database_tests, reason="skipping database tests") +class BigSegmentStoreTestBase: + @abstractproperty + def tester_class(self): + pass + + @pytest.fixture(params=[False, True]) + def tester(self, request): + specify_prefix = request.param + instance = self.tester_class() + instance.prefix = "testprefix" if specify_prefix else None + return instance + + @pytest.fixture(autouse=True) + def clear_data_before_each(self, tester): + tester.clear_data(tester.prefix) + + def store(self, tester): + return BigSegmentStoreTestScope(tester.create_big_segment_store(tester.prefix)) + + def test_get_metadata_valid_value(self, tester): + expected_timestamp = 1234567890 + tester.set_metadata(tester.prefix, BigSegmentStoreMetadata(expected_timestamp)) + with self.store(tester) as store: + actual = store.get_metadata() + assert actual is not None + assert actual.last_up_to_date == expected_timestamp + + def test_get_metadata_no_value(self, tester): + with self.store(tester) as store: + actual = store.get_metadata() + assert actual is not None + assert actual.last_up_to_date is None + + def test_get_membership_not_found(self, tester): + with self.store(tester) as store: + membership = store.get_membership(fake_user_hash) + assert membership is None or membership == {} + + def test_get_membership_includes_only(self, tester): + tester.set_segments(tester.prefix, fake_user_hash, ['key1', 'key2'], []) + with self.store(tester) as store: + membership = store.get_membership(fake_user_hash) + assert membership == { 'key1': True, 'key2': True } + + def test_get_membership_excludes_only(self, tester): + tester.set_segments(tester.prefix, fake_user_hash, [], ['key1', 'key2']) + with self.store(tester) as store: + membership = store.get_membership(fake_user_hash) + assert membership == { 'key1': False, 'key2': False } + + def test_get_membership_includes_and_excludes(self, tester): + tester.set_segments(tester.prefix, fake_user_hash, ['key1', 'key2'], ['key2', 'key3']) + with self.store(tester) as store: + membership = store.get_membership(fake_user_hash) + assert membership == { 'key1': True, 'key2': True, 'key3': False } + \ No newline at end of file diff --git a/testing/integrations/persistent_feature_store_test_base.py b/testing/integrations/persistent_feature_store_test_base.py new file mode 100644 index 00000000..be473e3d --- /dev/null +++ b/testing/integrations/persistent_feature_store_test_base.py @@ -0,0 +1,105 @@ +from abc import abstractmethod, abstractproperty +from os import environ +import pytest + +from ldclient.feature_store import CacheConfig +from ldclient.interfaces import FeatureStore +from ldclient.versioned_data_kind import FEATURES + +from testing.feature_store_test_base import FeatureStoreTestBase, FeatureStoreTester, StoreTestScope + +skip_database_tests = environ.get('LD_SKIP_DATABASE_TESTS') == '1' + + +# The standard test suite to be run against all persistent feature store implementations. See +# testing.feature_store_test_base for the basic model being used here. For each database integration, +# we must define a subclass of PersistentFeatureStoreTester which overrides its abstract methods as +# appropriate for that database, and then define a subclass of PersistentFeatureStoreTestBase which +# simply specifies what tester subclass to use. + + +class PersistentFeatureStoreTester(FeatureStoreTester): + def __init__(self): + self.prefix = None # type: str + self.caching = CacheConfig.disabled() + + @abstractmethod + def create_persistent_feature_store(self, prefix: str, caching: CacheConfig) -> FeatureStore: + """ + Override this method to create a feature store instance. + :param prefix: the prefix parameter for the store constructor - may be None or empty to use the default + :param caching: caching parameters for the store constructor + """ + pass + + @abstractmethod + def clear_data(self, prefix: str): + """ + Override this method to clear any existing data from the database for the specified prefix. + """ + pass + + def create_feature_store(self) -> FeatureStore: + return self.create_persistent_feature_store(self.prefix, self.caching) + + +@pytest.mark.skipif(skip_database_tests, reason="skipping database tests") +class PersistentFeatureStoreTestBase(FeatureStoreTestBase): + @abstractproperty + def tester_class(self): + pass + + @pytest.fixture(params=[ + (False, False), + (True, False), + (False, True), + (True, True) + ]) + def tester(self, request): + specify_prefix, use_caching = request.param + instance = self.tester_class() + instance.prefix = "testprefix" if specify_prefix else None + instance.caching = CacheConfig.default() if use_caching else CacheConfig.disabled() + return instance + + @pytest.fixture(autouse=True) + def clear_data_before_each(self, tester): + tester.clear_data(tester.prefix) + + def test_stores_with_different_prefixes_are_independent(self): + # This verifies that init(), get(), all(), and upsert() are all correctly using the specified key prefix. + # The delete() method isn't tested separately because it's implemented as a variant of upsert(). + tester_a = self.tester_class() + tester_a.prefix = "a" + tester_a.clear_data(tester_a.prefix) + + tester_b = self.tester_class() + tester_b.prefix = "b" + tester_b.clear_data(tester_b.prefix) + + flag_a1 = { 'key': 'flagA1', 'version': 1 } + flag_a2 = { 'key': 'flagA2', 'version': 1 } + flag_b1 = { 'key': 'flagB1', 'version': 1 } + flag_b2 = { 'key': 'flagB2', 'version': 1 } + + with StoreTestScope(tester_a.create_feature_store()) as store_a: + with StoreTestScope(tester_b.create_feature_store()) as store_b: + store_a.init({ FEATURES: { 'flagA1': flag_a1 } }) + store_a.upsert(FEATURES, flag_a2) + + store_b.init({ FEATURES: { 'flagB1': flag_b1 } }) + store_b.upsert(FEATURES, flag_b2) + + item = store_a.get(FEATURES, 'flagA1', lambda x: x) + assert item == flag_a1 + item = store_a.get(FEATURES, 'flagB1', lambda x: x) + assert item is None + items = store_a.all(FEATURES, lambda x: x) + assert items == { 'flagA1': flag_a1, 'flagA2': flag_a2 } + + item = store_b.get(FEATURES, 'flagB1', lambda x: x) + assert item == flag_b1 + item = store_b.get(FEATURES, 'flagA1', lambda x: x) + assert item is None + items = store_b.all(FEATURES, lambda x: x) + assert items == { 'flagB1': flag_b1, 'flagB2': flag_b2 } diff --git a/testing/integrations/test_consul.py b/testing/integrations/test_consul.py new file mode 100644 index 00000000..d70b8174 --- /dev/null +++ b/testing/integrations/test_consul.py @@ -0,0 +1,31 @@ +from ldclient.integrations import Consul + +from testing.integrations.persistent_feature_store_test_base import * + +have_consul = False +try: + import consul + have_consul = True +except ImportError: + pass + +pytestmark = pytest.mark.skipif(not have_consul, reason="skipping Consul tests because consul module is not installed") + + +class ConsulFeatureStoreTester(PersistentFeatureStoreTester): + def create_persistent_feature_store(self, prefix, caching) -> FeatureStore: + return Consul.new_feature_store(prefix=prefix, caching=caching) + + def clear_data(self, prefix): + client = consul.Consul() + index, keys = client.kv.get((prefix or Consul.DEFAULT_PREFIX) + "/", recurse=True, keys=True) + for key in (keys or []): + client.kv.delete(key) + +class TestConsulFeatureStore(PersistentFeatureStoreTestBase): + @property + def tester_class(self): + return ConsulFeatureStoreTester + + +# Consul does not support Big Segments. diff --git a/testing/integrations/test_dynamodb.py b/testing/integrations/test_dynamodb.py new file mode 100644 index 00000000..4c314ad7 --- /dev/null +++ b/testing/integrations/test_dynamodb.py @@ -0,0 +1,168 @@ +from ldclient.impl.integrations.dynamodb.dynamodb_big_segment_store import _DynamoDBBigSegmentStore +from ldclient.impl.integrations.dynamodb.dynamodb_feature_store import _DynamoDBFeatureStoreCore, _DynamoDBHelpers +from ldclient.integrations import DynamoDB +from ldclient.interfaces import UpdateProcessor + +from testing.integrations.big_segment_store_test_base import * +from testing.integrations.persistent_feature_store_test_base import * + +import time + +have_dynamodb = False +try: + import boto3 + have_dynamodb = True +except ImportError: + pass + +pytestmark = pytest.mark.skipif(not have_dynamodb, reason="skipping DynamoDB tests because boto3 module is not installed") + + +class DynamoDBTestHelper: + table_name = 'LD_DYNAMODB_TEST_TABLE' + table_created = False + options = { + 'aws_access_key_id': 'key', # not used by local DynamoDB, but still required + 'aws_secret_access_key': 'secret', + 'endpoint_url': 'http://localhost:8000', + 'region_name': 'us-east-1' + } + + @staticmethod + def make_client(): + return boto3.client('dynamodb', **DynamoDBTestHelper.options) + + def clear_data_for_prefix(prefix): + client = DynamoDBTestHelper.make_client() + delete_requests = [] + req = { + 'TableName': DynamoDBTestHelper.table_name, + 'ConsistentRead': True, + 'ProjectionExpression': '#namespace, #key', + 'ExpressionAttributeNames': { + '#namespace': _DynamoDBFeatureStoreCore.PARTITION_KEY, + '#key': _DynamoDBFeatureStoreCore.SORT_KEY + } + } + for resp in client.get_paginator('scan').paginate(**req): + for item in resp['Items']: + delete_requests.append({ 'DeleteRequest': { 'Key': item } }) + _DynamoDBHelpers.batch_write_requests(client, DynamoDBTestHelper.table_name, delete_requests) + + @staticmethod + def ensure_table_created(): + if DynamoDBTestHelper.table_created: + return + DynamoDBTestHelper.table_created = True + client = DynamoDBTestHelper.make_client() + try: + client.describe_table(TableName=DynamoDBTestHelper.table_name) + return + except client.exceptions.ResourceNotFoundException: + pass + req = { + 'TableName': DynamoDBTestHelper.table_name, + 'KeySchema': [ + { + 'AttributeName': _DynamoDBFeatureStoreCore.PARTITION_KEY, + 'KeyType': 'HASH', + }, + { + 'AttributeName': _DynamoDBFeatureStoreCore.SORT_KEY, + 'KeyType': 'RANGE' + } + ], + 'AttributeDefinitions': [ + { + 'AttributeName': _DynamoDBFeatureStoreCore.PARTITION_KEY, + 'AttributeType': 'S' + }, + { + 'AttributeName': _DynamoDBFeatureStoreCore.SORT_KEY, + 'AttributeType': 'S' + } + ], + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 1, + 'WriteCapacityUnits': 1 + } + } + client.create_table(**req) + while True: + try: + client.describe_table(TableName=DynamoDBTestHelper.table_name) + return + except client.exceptions.ResourceNotFoundException: + time.sleep(0.5) + + +class DynamoDBFeatureStoreTester(PersistentFeatureStoreTester): + def __init__(self): + super().__init__() + DynamoDBTestHelper.ensure_table_created() + + def create_persistent_feature_store(self, prefix, caching) -> FeatureStore: + return DynamoDB.new_feature_store(DynamoDBTestHelper.table_name, + prefix=prefix, caching=caching, dynamodb_opts=DynamoDBTestHelper.options) + + def clear_data(self, prefix): + DynamoDBTestHelper.clear_data_for_prefix(prefix) + + +class DynamoDBBigSegmentTester(BigSegmentStoreTester): + def __init__(self): + super().__init__() + DynamoDBTestHelper.ensure_table_created() + + def create_big_segment_store(self, prefix) -> BigSegmentStore: + return DynamoDB.new_big_segment_store(DynamoDBTestHelper.table_name, + prefix=prefix, dynamodb_opts=DynamoDBTestHelper.options) + + def clear_data(self, prefix): + DynamoDBTestHelper.clear_data_for_prefix(prefix) + + def set_metadata(self, prefix: str, metadata: BigSegmentStoreMetadata): + client = DynamoDBTestHelper.make_client() + actual_prefix = prefix + ":" if prefix else "" + key = actual_prefix + _DynamoDBBigSegmentStore.KEY_METADATA + client.put_item( + TableName=DynamoDBTestHelper.table_name, + Item={ + _DynamoDBBigSegmentStore.PARTITION_KEY: { "S": key }, + _DynamoDBBigSegmentStore.SORT_KEY: { "S": key }, + _DynamoDBBigSegmentStore.ATTR_SYNC_TIME: { + "N": "" if metadata.last_up_to_date is None else str(metadata.last_up_to_date) + } + } + ) + + def set_segments(self, prefix: str, user_hash: str, includes: List[str], excludes: List[str]): + client = DynamoDBTestHelper.make_client() + actual_prefix = prefix + ":" if prefix else "" + sets = { + _DynamoDBBigSegmentStore.ATTR_INCLUDED: includes, + _DynamoDBBigSegmentStore.ATTR_EXCLUDED: excludes + } + for attr_name, values in sets.items(): + if len(values) > 0: + client.update_item( + TableName=DynamoDBTestHelper.table_name, + Key={ + _DynamoDBBigSegmentStore.PARTITION_KEY: { "S": actual_prefix + _DynamoDBBigSegmentStore.KEY_USER_DATA }, + _DynamoDBBigSegmentStore.SORT_KEY: { "S": user_hash } + }, + UpdateExpression= "ADD %s :value" % attr_name, + ExpressionAttributeValues={ ":value": { "SS": values } } + ) + + +class TestDynamoDBFeatureStore(PersistentFeatureStoreTestBase): + @property + def tester_class(self): + return DynamoDBFeatureStoreTester + + +class TestDynamoDBBigSegmentStore(BigSegmentStoreTestBase): + @property + def tester_class(self): + return DynamoDBBigSegmentTester diff --git a/testing/integrations/test_redis.py b/testing/integrations/test_redis.py new file mode 100644 index 00000000..9301092e --- /dev/null +++ b/testing/integrations/test_redis.py @@ -0,0 +1,105 @@ +from ldclient.impl.integrations.redis.redis_big_segment_store import _RedisBigSegmentStore +from ldclient.integrations import Redis +from ldclient.versioned_data_kind import FEATURES + +from testing.integrations.big_segment_store_test_base import * +from testing.integrations.persistent_feature_store_test_base import * + +import json + +have_redis = False +try: + import redis + have_redis = True +except ImportError: + pass + +pytestmark = pytest.mark.skipif(not have_redis, reason="skipping Redis tests because redis module is not installed") + + +class RedisTestHelper: + @staticmethod + def make_client() -> redis.StrictRedis: + return redis.StrictRedis(host="localhost", port=6379, db=0) + + def clear_data_for_prefix(prefix): + r = RedisTestHelper.make_client() + for key in r.keys("%s:*" % prefix): + r.delete(key) + + +class RedisFeatureStoreTester(PersistentFeatureStoreTester): + def create_persistent_feature_store(self, prefix, caching) -> FeatureStore: + return Redis.new_feature_store(prefix=prefix, caching=caching) + + def clear_data(self, prefix): + RedisTestHelper.clear_data_for_prefix(prefix or Redis.DEFAULT_PREFIX) + + +class RedisBigSegmentStoreTester(BigSegmentStoreTester): + def create_big_segment_store(self, prefix) -> BigSegmentStore: + return Redis.new_big_segment_store(prefix=prefix) + + def clear_data(self, prefix): + RedisTestHelper.clear_data_for_prefix(prefix or Redis.DEFAULT_PREFIX) + + def set_metadata(self, prefix: str, metadata: BigSegmentStoreMetadata): + r = RedisTestHelper.make_client() + r.set((prefix or Redis.DEFAULT_PREFIX) + _RedisBigSegmentStore.KEY_LAST_UP_TO_DATE, + "" if metadata.last_up_to_date is None else str(metadata.last_up_to_date)) + + def set_segments(self, prefix: str, user_hash: str, includes: List[str], excludes: List[str]): + r = RedisTestHelper.make_client() + prefix = prefix or Redis.DEFAULT_PREFIX + for ref in includes: + r.sadd(prefix + _RedisBigSegmentStore.KEY_USER_INCLUDE + user_hash, ref) + for ref in excludes: + r.sadd(prefix + _RedisBigSegmentStore.KEY_USER_EXCLUDE + user_hash, ref) + + +class TestRedisFeatureStore(PersistentFeatureStoreTestBase): + @property + def tester_class(self): + return RedisFeatureStoreTester + + def test_upsert_race_condition_against_external_client_with_higher_version(self): + other_client = RedisTestHelper.make_client() + store = Redis.new_feature_store() + store.init({ FEATURES: {} }) + + other_version = {u'key': u'flagkey', u'version': 2} + def hook(base_key, key): + if other_version['version'] <= 4: + other_client.hset(base_key, key, json.dumps(other_version)) + other_version['version'] = other_version['version'] + 1 + store._core.test_update_hook = hook + + feature = { u'key': 'flagkey', u'version': 1 } + + store.upsert(FEATURES, feature) + result = store.get(FEATURES, 'flagkey', lambda x: x) + assert result['version'] == 2 + + def test_upsert_race_condition_against_external_client_with_lower_version(self): + other_client = RedisTestHelper.make_client() + store = Redis.new_feature_store() + store.init({ FEATURES: {} }) + + other_version = {u'key': u'flagkey', u'version': 2} + def hook(base_key, key): + if other_version['version'] <= 4: + other_client.hset(base_key, key, json.dumps(other_version)) + other_version['version'] = other_version['version'] + 1 + store._core.test_update_hook = hook + + feature = { u'key': 'flagkey', u'version': 5 } + + store.upsert(FEATURES, feature) + result = store.get(FEATURES, 'flagkey', lambda x: x) + assert result['version'] == 5 + + +class TestRedisBigSegmentStore(BigSegmentStoreTestBase): + @property + def tester_class(self): + return RedisBigSegmentStoreTester diff --git a/testing/test_feature_store.py b/testing/test_feature_store.py deleted file mode 100644 index 1df87694..00000000 --- a/testing/test_feature_store.py +++ /dev/null @@ -1,353 +0,0 @@ -import boto3 -import json -import os -import pytest -import redis -import time - -from typing import List - -# Consul is only supported in some Python versions -have_consul = False -try: - import consul - have_consul = True -except ImportError: - pass - -from ldclient.feature_store import CacheConfig, InMemoryFeatureStore -from ldclient.impl.integrations.dynamodb.dynamodb_feature_store import _DynamoDBFeatureStoreCore, _DynamoDBHelpers -from ldclient.integrations import Consul, DynamoDB, Redis -from ldclient.versioned_data_kind import FEATURES - -skip_db_tests = os.environ.get('LD_SKIP_DATABASE_TESTS') == '1' - -class Tester: - pass - -class InMemoryTester(Tester): - def init_store(self): - return InMemoryFeatureStore() - - @property - def supports_prefix(self): - return False - - -class RedisTester(Tester): - redis_host = 'localhost' - redis_port = 6379 - - def __init__(self, cache_config): - self._cache_config = cache_config - - def init_store(self, prefix=None): - self._clear_data() - return Redis.new_feature_store(caching=self._cache_config, prefix=prefix) - - @property - def supports_prefix(self): - return True - - def _clear_data(self): - r = redis.StrictRedis(host=self.redis_host, port=self.redis_port, db=0) - r.flushdb() - -class ConsulTester(Tester): - def __init__(self, cache_config): - self._cache_config = cache_config - - def init_store(self, prefix=None): - self._clear_data(prefix or "launchdarkly") - return Consul.new_feature_store(prefix=prefix, caching=self._cache_config) - - @property - def supports_prefix(self): - return True - - def _clear_data(self, prefix): - client = consul.Consul() - index, keys = client.kv.get(prefix + "/", recurse=True, keys=True) - for key in (keys or []): - client.kv.delete(key) - - -class DynamoDBTester(Tester): - table_name = 'LD_DYNAMODB_TEST_TABLE' - table_created = False - options = { - 'aws_access_key_id': 'key', # not used by local DynamoDB, but still required - 'aws_secret_access_key': 'secret', - 'endpoint_url': 'http://localhost:8000', - 'region_name': 'us-east-1' - } - - def __init__(self, cache_config): - self._cache_config = cache_config - - def init_store(self, prefix=None): - self._create_table() - self._clear_data() - return DynamoDB.new_feature_store(self.table_name, prefix=prefix, dynamodb_opts=self.options, - caching=self._cache_config) - - @property - def supports_prefix(self): - return True - - def _create_table(self): - if self.table_created: - return - client = boto3.client('dynamodb', **self.options) - try: - client.describe_table(TableName=self.table_name) - self.table_created = True - return - except client.exceptions.ResourceNotFoundException: - pass - req = { - 'TableName': self.table_name, - 'KeySchema': [ - { - 'AttributeName': _DynamoDBFeatureStoreCore.PARTITION_KEY, - 'KeyType': 'HASH', - }, - { - 'AttributeName': _DynamoDBFeatureStoreCore.SORT_KEY, - 'KeyType': 'RANGE' - } - ], - 'AttributeDefinitions': [ - { - 'AttributeName': _DynamoDBFeatureStoreCore.PARTITION_KEY, - 'AttributeType': 'S' - }, - { - 'AttributeName': _DynamoDBFeatureStoreCore.SORT_KEY, - 'AttributeType': 'S' - } - ], - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 1, - 'WriteCapacityUnits': 1 - } - } - client.create_table(**req) - while True: - try: - client.describe_table(TableName=self.table_name) - self.table_created = True - return - except client.exceptions.ResourceNotFoundException: - time.sleep(0.5) - - def _clear_data(self): - client = boto3.client('dynamodb', **self.options) - delete_requests = [] - req = { - 'TableName': self.table_name, - 'ConsistentRead': True, - 'ProjectionExpression': '#namespace, #key', - 'ExpressionAttributeNames': { - '#namespace': _DynamoDBFeatureStoreCore.PARTITION_KEY, - '#key': _DynamoDBFeatureStoreCore.SORT_KEY - } - } - for resp in client.get_paginator('scan').paginate(**req): - for item in resp['Items']: - delete_requests.append({ 'DeleteRequest': { 'Key': item } }) - _DynamoDBHelpers.batch_write_requests(client, self.table_name, delete_requests) - - -class TestFeatureStore: - params = [] # type: List[Tester] - if skip_db_tests: - params += [ - InMemoryTester() - ] - else: - params += [ - InMemoryTester(), - RedisTester(CacheConfig.default()), - RedisTester(CacheConfig.disabled()), - DynamoDBTester(CacheConfig.default()), - DynamoDBTester(CacheConfig.disabled()) - ] - if have_consul: - params.append(ConsulTester(CacheConfig.default())) - params.append(ConsulTester(CacheConfig.disabled())) - - @pytest.fixture(params=params) - def tester(self, request): - return request.param - - @pytest.fixture(params=params) - def store(self, request): - return request.param.init_store() - - @staticmethod - def make_feature(key, ver): - return { - u'key': key, - u'version': ver, - u'salt': u'abc', - u'on': True, - u'variations': [ - { - u'value': True, - u'weight': 100, - u'targets': [] - }, - { - u'value': False, - u'weight': 0, - u'targets': [] - } - ] - } - - def base_initialized_store(self, store): - store.init({ - FEATURES: { - 'foo': self.make_feature('foo', 10), - 'bar': self.make_feature('bar', 10), - } - }) - return store - - def test_not_initialized_before_init(self, store): - assert store.initialized is False - - def test_initialized(self, store): - store = self.base_initialized_store(store) - assert store.initialized is True - - def test_get_existing_feature(self, store): - store = self.base_initialized_store(store) - expected = self.make_feature('foo', 10) - assert store.get(FEATURES, 'foo', lambda x: x) == expected - - def test_get_nonexisting_feature(self, store): - store = self.base_initialized_store(store) - assert store.get(FEATURES, 'biz', lambda x: x) is None - - def test_get_all_versions(self, store): - store = self.base_initialized_store(store) - result = store.all(FEATURES, lambda x: x) - assert len(result) == 2 - assert result.get('foo') == self.make_feature('foo', 10) - assert result.get('bar') == self.make_feature('bar', 10) - - def test_upsert_with_newer_version(self, store): - store = self.base_initialized_store(store) - new_ver = self.make_feature('foo', 11) - store.upsert(FEATURES, new_ver) - assert store.get(FEATURES, 'foo', lambda x: x) == new_ver - - def test_upsert_with_older_version(self, store): - store = self.base_initialized_store(store) - new_ver = self.make_feature('foo', 9) - expected = self.make_feature('foo', 10) - store.upsert(FEATURES, new_ver) - assert store.get(FEATURES, 'foo', lambda x: x) == expected - - def test_upsert_with_new_feature(self, store): - store = self.base_initialized_store(store) - new_ver = self.make_feature('biz', 1) - store.upsert(FEATURES, new_ver) - assert store.get(FEATURES, 'biz', lambda x: x) == new_ver - - def test_delete_with_newer_version(self, store): - store = self.base_initialized_store(store) - store.delete(FEATURES, 'foo', 11) - assert store.get(FEATURES, 'foo', lambda x: x) is None - - def test_delete_unknown_feature(self, store): - store = self.base_initialized_store(store) - store.delete(FEATURES, 'biz', 11) - assert store.get(FEATURES, 'biz', lambda x: x) is None - - def test_delete_with_older_version(self, store): - store = self.base_initialized_store(store) - store.delete(FEATURES, 'foo', 9) - expected = self.make_feature('foo', 10) - assert store.get(FEATURES, 'foo', lambda x: x) == expected - - def test_upsert_older_version_after_delete(self, store): - store = self.base_initialized_store(store) - store.delete(FEATURES, 'foo', 11) - old_ver = self.make_feature('foo', 9) - store.upsert(FEATURES, old_ver) - assert store.get(FEATURES, 'foo', lambda x: x) is None - - def test_stores_with_different_prefixes_are_independent(self, tester): - # This verifies that init(), get(), all(), and upsert() are all correctly using the specified key prefix. - # The delete() method isn't tested separately because it's implemented as a variant of upsert(). - if not tester.supports_prefix: - return - - flag_a1 = { 'key': 'flagA1', 'version': 1 } - flag_a2 = { 'key': 'flagA2', 'version': 1 } - flag_b1 = { 'key': 'flagB1', 'version': 1 } - flag_b2 = { 'key': 'flagB2', 'version': 1 } - store_a = tester.init_store('a') - store_b = tester.init_store('b') - - store_a.init({ FEATURES: { 'flagA1': flag_a1 } }) - store_a.upsert(FEATURES, flag_a2) - - store_b.init({ FEATURES: { 'flagB1': flag_b1 } }) - store_b.upsert(FEATURES, flag_b2) - - item = store_a.get(FEATURES, 'flagA1', lambda x: x) - assert item == flag_a1 - item = store_a.get(FEATURES, 'flagB1', lambda x: x) - assert item is None - items = store_a.all(FEATURES, lambda x: x) - assert items == { 'flagA1': flag_a1, 'flagA2': flag_a2 } - - item = store_b.get(FEATURES, 'flagB1', lambda x: x) - assert item == flag_b1 - item = store_b.get(FEATURES, 'flagA1', lambda x: x) - assert item is None - items = store_b.all(FEATURES, lambda x: x) - assert items == { 'flagB1': flag_b1, 'flagB2': flag_b2 } - - -@pytest.mark.skipif(skip_db_tests, reason="skipping database tests") -class TestRedisFeatureStoreExtraTests: - def test_upsert_race_condition_against_external_client_with_higher_version(self): - other_client = redis.StrictRedis(host='localhost', port=6379, db=0) - store = Redis.new_feature_store() - store.init({ FEATURES: {} }) - - other_version = {u'key': u'flagkey', u'version': 2} - def hook(base_key, key): - if other_version['version'] <= 4: - other_client.hset(base_key, key, json.dumps(other_version)) - other_version['version'] = other_version['version'] + 1 - store._core.test_update_hook = hook - - feature = { u'key': 'flagkey', u'version': 1 } - - store.upsert(FEATURES, feature) - result = store.get(FEATURES, 'flagkey', lambda x: x) - assert result['version'] == 2 - - def test_upsert_race_condition_against_external_client_with_lower_version(self): - other_client = redis.StrictRedis(host='localhost', port=6379, db=0) - store = Redis.new_feature_store() - store.init({ FEATURES: {} }) - - other_version = {u'key': u'flagkey', u'version': 2} - def hook(base_key, key): - if other_version['version'] <= 4: - other_client.hset(base_key, key, json.dumps(other_version)) - other_version['version'] = other_version['version'] + 1 - store._core.test_update_hook = hook - - feature = { u'key': 'flagkey', u'version': 5 } - - store.upsert(FEATURES, feature) - result = store.get(FEATURES, 'flagkey', lambda x: x) - assert result['version'] == 5 diff --git a/testing/test_in_memory_feature_store.py b/testing/test_in_memory_feature_store.py new file mode 100644 index 00000000..5cd8ba8c --- /dev/null +++ b/testing/test_in_memory_feature_store.py @@ -0,0 +1,17 @@ +import pytest + +from ldclient.feature_store import InMemoryFeatureStore +from ldclient.interfaces import FeatureStore + +from testing.feature_store_test_base import FeatureStoreTestBase, FeatureStoreTester + + +class InMemoryFeatureStoreTester(FeatureStoreTester): + def create_feature_store(self) -> FeatureStore: + return InMemoryFeatureStore() + + +class TestInMemoryFeatureStore(FeatureStoreTestBase): + @pytest.fixture + def tester(self): + return InMemoryFeatureStoreTester() From c5677774aa7433ec63ffef05c164107889d6a7a8 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Mon, 6 Dec 2021 22:51:01 +0000 Subject: [PATCH 249/356] converted ldclient.integrations module from file to directory; started moving public classes out of ldclient.impl.integrations.test_data* and instead into ldclient.integrations.test_data*; started adding TestData documentation --- docs/api-testing.rst | 10 + docs/index.rst | 1 + .../test_data/test_data_source.py | 260 -------------- .../__init__.py} | 6 +- ldclient/integrations/test_data.py | 318 ++++++++++++++++++ testing/test_test_data_source.py | 6 +- 6 files changed, 334 insertions(+), 267 deletions(-) create mode 100644 docs/api-testing.rst rename ldclient/{integrations.py => integrations/__init__.py} (98%) create mode 100644 ldclient/integrations/test_data.py diff --git a/docs/api-testing.rst b/docs/api-testing.rst new file mode 100644 index 00000000..d42e9c68 --- /dev/null +++ b/docs/api-testing.rst @@ -0,0 +1,10 @@ +Testing Integrations +==================== + +ldclient.integrations.test_data module +---------------------------- + +.. automodule:: ldclient.integrations.test_data + :members: + :special-members: __init__ + :show-inheritance: diff --git a/docs/index.rst b/docs/index.rst index 12e66506..8c601890 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -19,3 +19,4 @@ For more information, see LaunchDarkly's `Quickstart object: """Provides a way to use local files as a source of feature flag state. This would typically be used in a test environment, to operate using a predetermined feature flag state without an diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py new file mode 100644 index 00000000..6fee3abc --- /dev/null +++ b/ldclient/integrations/test_data.py @@ -0,0 +1,318 @@ +import copy +from ldclient.versioned_data_kind import FEATURES +from ldclient.rwlock import ReadWriteLock +from ldclient.impl.integrations.test_data.test_data_source import _TestDataSource + +TRUE_VARIATION_INDEX = 0 +FALSE_VARIATION_INDEX = 1 + +def _variation_for_boolean(variation): + if variation: + return TRUE_VARIATION_INDEX + else: + return FALSE_VARIATION_INDEX + +class TestData(): + """A mechanism for providing dynamically updatable feature flag state in a + simplified form to an SDK client in test scenarios. + + Unlike ``Files``, this mechanism does not use any external resources. It provides only + the data that the application has put into it using the ``update`` method. + :: + + td = TestData.data_source() + td.update(td.flag('flag-key-1').variation_for_all_users(True)) + + client = LDClient(config=Config('SDK_KEY', update_processor_class = td)) + + # flags can be updated at any time: + td.update(td.flag('flag-key-1').variation_for_user('some-user-key', True) + .fallthrough_variation(False)) + + The above example uses a simple boolean flag, but more complex configurations are possible using + the methods of the ``FlagBuilder`` that is returned by ``flag``. ``FlagBuilder`` + supports many of the ways a flag can be configured on the LaunchDarkly dashboard, but does not + currently support 1. rule operators other than "in" and "not in", or 2. percentage rollouts. + + If the same `TestData` instance is used to configure multiple `LDClient` instances, + any changes made to the data will propagate to all of the `LDClient`s. + + """ + + def __init__(self): + self._flag_builders = {} + self._current_flags = {} + self._lock = ReadWriteLock() + self._instances = [] + + def __call__(self, config, store, ready): + data_source = _TestDataSource(store, self) + try: + self._lock.lock() + self._instances.append(data_source) + finally: + self._lock.unlock() + + return data_source + + + @staticmethod + def data_source(): + return TestData() + + + def flag(self, key: str): + """Creates or copies a ``FlagBuilder`` for building a test flag configuration. + + If this flag key has already been defined in this ``TestData`` instance, then the builder + starts with the same configuration that was last provided for this flag. + + Otherwise, it starts with a new default configuration in which the flag has ``True`` and + ``False`` variations, is ``True`` for all users when targeting is turned on and + ``False`` otherwise, and currently has targeting turned on. You can change any of those + properties, and provide more complex behavior, using the ``FlagBuilder`` methods. + + Once you have set the desired configuration, pass the builder to ``update``. + + :param key: the flag key + :return: the flag configuration builder object + """ + try: + self._lock.rlock() + if key in self._flag_builders and self._flag_builders[key]: + return self._flag_builders[key].copy() + else: + return FlagBuilder(key).boolean_flag() + finally: + self._lock.runlock() + + def update(self, flag_builder): + """Updates the test data with the specified flag configuration. + + This has the same effect as if a flag were added or modified on the LaunchDarkly dashboard. + It immediately propagates the flag change to any ``LDClient`` instance(s) that you have + already configured to use this ``TestData``. If no ``LDClient`` has been started yet, + it simply adds this flag to the test data which will be provided to any ``LDClient`` that + you subsequently configure. + + Any subsequent changes to this ``FlagBuilder`` instance do not affect the test data, + unless you call ``update`` again. + + :param flag_builder: a flag configuration builder + :return: self (the TestData object) + """ + try: + self._lock.lock() + + old_version = 0 + if flag_builder._key in self._current_flags: + old_flag = self._current_flags[flag_builder._key] + if old_flag: + old_version = old_flag.version + + new_flag = flag_builder.build(old_version + 1) + + self._current_flags[flag_builder._key] = new_flag + self._flag_builders[flag_builder._key] = flag_builder.copy() + finally: + self._lock.unlock() + + for instance in self._instances: + instance.upsert(new_flag) + + return self + + + def make_init_data(self): + return { FEATURES: copy.copy(self._current_flags) } + + def closed_instance(self, instance): + try: + self._lock.lock() + self._instances.remove(instance) + finally: + self._lock.unlock() + +class FlagBuilder(): + def __init__(self, key): + self._key = key + self._on = True + self._variations = [] + self._off_variation = None + self._fallthrough_variation = None + self._targets = {} + self._rules = [] + + + def copy(self): + to = FlagBuilder(self._key) + + to._on = self._on + to._variations = copy.copy(self._variations) + to._off_variation = self._off_variation + to._fallthrough_variation = self._fallthrough_variation + to._targets = copy.copy(self._targets) + to._rules = copy.copy(self._rules) + + return to + + + def on(self, aBool): + self._on = aBool + return self + + def fallthrough_variation(self, variation): + if isinstance(variation, bool): + self._boolean_flag(self)._fallthrough_variation = variation + return self + else: + self._fallthrough_variation = variation + return self + + def off_variation(self, variation) : + if isinstance(variation, bool): + self._boolean_flag(self)._off_variation = variation + return self + else: + self._off_variation = variation + return self + + def boolean_flag(self): + if self.is_boolean_flag(): + return self + else: + return (self.variations(True, False) + .fallthrough_variation(TRUE_VARIATION_INDEX) + .off_variation(FALSE_VARIATION_INDEX)) + + def is_boolean_flag(self): + return (len(self._variations) == 2 + and self._variations[TRUE_VARIATION_INDEX] == True + and self._variations[FALSE_VARIATION_INDEX] == False) + + def variations(self, *variations): + self._variations = list(variations) + + return self + + + def variation_for_all_users(self, variation): + if isinstance(variation, bool): + return self.boolean_flag().variation_for_all_users(_variation_for_boolean(variation)) + else: + return self.on(True).fallthrough_variation(variation) + + def variation_for_user(self, user_key, variation): + if isinstance(variation, bool): + # `variation` is True/False value + return self.boolean_flag().variation_for_user(user_key, _variation_for_boolean(variation)) + else: + # `variation` specifies the index of the variation to set + targets = self._targets + + for idx, var in enumerate(self._variations): + if (idx == variation): + # If there is no set at the current variation, set it to be empty + target_for_variation = [] + if idx in targets: + target_for_variation = targets[idx] + + # If user is not in the current variation set, add them + if user_key not in target_for_variation: + target_for_variation.append(user_key) + + self._targets[idx] = target_for_variation + + else: + # Remove user from the other variation set if necessary + if idx in targets: + target_for_variation = targets[idx] + if user_key in target_for_variation: + user_key_idx = target_for_variation.index(user_key) + del target_for_variation[user_key_idx] + + self._targets[idx] = target_for_variation + + return self + + def add_rule(self, flag_rule_builder): + self._rules.append(flag_rule_builder) + + def if_match(self, attribute, *values): + flag_rule_builder = FlagRuleBuilder(self) + return flag_rule_builder.and_match(attribute, *values) + + def if_not_match(self, attribute, *values): + flag_rule_builder = FlagRuleBuilder(self) + return flag_rule_builder.and_not_match(attribute, values) + + def clear_rules(self): + del self._rules + return self + + + def build(self, version): + base_flag_object = { + 'key': self._key, + 'version': version, + 'on': self._on, + 'variations': self._variations + } + + base_flag_object['off_variation'] = self._off_variation + base_flag_object['fallthrough_variation'] = self._fallthrough_variation + + targets = [] + for var_index, user_keys in self._targets.items(): + targets.append({ + 'variation': var_index, + 'values': user_keys + }) + base_flag_object['targets'] = targets + + base_flag_object['rules'] = [] + for idx, rule in enumerate(self._rules): + base_flag_object['rules'].append(rule.build(idx)) + + return base_flag_object + + +class FlagRuleBuilder(): + def __init__(self, flag_builder): + self._flag_builder = flag_builder + self._clauses = [] + self._variation = None + + def and_match(self, attribute, *values): + self._clauses.append({ + 'attribute': attribute, + 'operator': 'in', + 'values': list(values), + 'negate': False + }) + return self + + def and_not_match(self, attribute, *values): + self._clauses.append({ + 'attribute': attribute, + 'operator': 'in', + 'values': list(values), + 'negate': True + }) + return self + + def then_return(self, variation): + if isinstance(variation, bool): + self._flag_builder.boolean_flag() + return self.then_return(_variation_for_boolean(variation)) + else: + self._variation = variation + self._flag_builder.add_rule(self) + return self._flag_builder + + def build(self, id): + return { + 'id': 'rule' + str(id), + 'variation': self._variation, + 'clauses': self._clauses + } diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index 26ae1981..4146c82d 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -7,12 +7,10 @@ from ldclient.versioned_data_kind import FEATURES, SEGMENTS #from ldclient.integrations import TestData -from ldclient.impl.integrations.test_data.test_data_source import TestData +from ldclient.integrations.test_data import TestData -# Filter warning arising from Pytest treating classes starting -# with the word 'Test' as part of the test suite -warnings.filterwarnings("ignore", message="cannot collect test class 'TestData'") +TestData.__test__ = False def setup_function(): print("Setup") From 31568015ca6a5f006a2e68ea1132648e481f3710 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Mon, 6 Dec 2021 22:53:18 +0000 Subject: [PATCH 250/356] removed setup/teardown functions leftover from test scaffold --- testing/test_test_data_source.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index 45cf98e8..8e021a5a 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -10,11 +10,6 @@ -def setup_function(): - print("Setup") - -def teardown_function(): - print("Teardown") def test_makes_flag(): flag = TestData.flag('test-flag') From 8abe007bb3b35bec0af456bd77a4f686f2f81ea5 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Tue, 7 Dec 2021 21:02:45 +0000 Subject: [PATCH 251/356] added TestData, FlagBuilder, and FlagRuleBuilder documentation; minor adjustments to implementation details --- .../test_data/test_data_source.py | 4 +- ldclient/integrations/test_data.py | 217 +++++++++++++++++- testing/test_test_data_source.py | 2 - 3 files changed, 210 insertions(+), 13 deletions(-) diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py index 3cf6fa7c..db3ac729 100644 --- a/ldclient/impl/integrations/test_data/test_data_source.py +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -10,10 +10,10 @@ def __init__(self, feature_store, test_data): self._test_data = test_data def start(self): - self._feature_store.init(self._test_data.make_init_data()) + self._feature_store.init(self._test_data._make_init_data()) def stop(self): - self._test_data.closed_instance(self) + self._test_data._closed_instance(self) def initialized(self): return True diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 6fee3abc..856c570f 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -58,10 +58,14 @@ def __call__(self, config, store, ready): @staticmethod def data_source(): + """Creates a new instance of the test data source. + + :return: a new configurable test data source + """ return TestData() - def flag(self, key: str): + def flag(self, key): """Creates or copies a ``FlagBuilder`` for building a test flag configuration. If this flag key has already been defined in this ``TestData`` instance, then the builder @@ -74,7 +78,7 @@ def flag(self, key: str): Once you have set the desired configuration, pass the builder to ``update``. - :param key: the flag key + :param str key: the flag key :return: the flag configuration builder object """ try: @@ -123,10 +127,10 @@ def update(self, flag_builder): return self - def make_init_data(self): + def _make_init_data(self): return { FEATURES: copy.copy(self._current_flags) } - def closed_instance(self, instance): + def _closed_instance(self, instance): try: self._lock.lock() self._instances.remove(instance) @@ -134,7 +138,14 @@ def closed_instance(self, instance): self._lock.unlock() class FlagBuilder(): + """A builder for feature flag configurations to be used with :class:`ldclient.integrations.test_data.TestData`. + + :see: :meth:`ldclient.integrations.test_data.TestData.flag()` + :see: :meth:`ldclient.integrations.test_data.TestData.update()` + """ def __init__(self, key): + """:param str key: The name of the flag + """ self._key = key self._on = True self._variations = [] @@ -145,6 +156,11 @@ def __init__(self, key): def copy(self): + """Creates a deep copy of the flag builder. Subsequent updates to the + original ``FlagBuilder`` object will not update the copy and vise versa. + + :return: a copy of the flag builder object + """ to = FlagBuilder(self._key) to._on = self._on @@ -157,11 +173,33 @@ def copy(self): return to - def on(self, aBool): - self._on = aBool + def on(self, on): + """Sets targeting to be on or off for this flag. + + The effect of this depends on the rest of the flag configuration, just as it does on the + real LaunchDarkly dashboard. In the default configuration that you get from calling + :meth:`ldclient.integrations.test_data.TestData.flag()` with a new flag key, + the flag will return ``False`` whenever targeting is off, and ``True`` when + targeting is on. + + :param bool on: ``True`` if targeting should be on + :return: the flag builder + """ + self._on = on return self def fallthrough_variation(self, variation): + """Specifies the fallthrough variation. The fallthrough is the value + that is returned if targeting is on and the user was not matched by a more specific + target or rule. + + If the flag was previously configured with other variations and the variation + specified is a boolean, this also changes it to a boolean flag. + + :param bool/int variation: ``True`` or ``False`` or the desired fallthrough variation index: + ``0`` for the first, ``1`` for the second, etc. + :return: the flag builder + """ if isinstance(variation, bool): self._boolean_flag(self)._fallthrough_variation = variation return self @@ -170,6 +208,16 @@ def fallthrough_variation(self, variation): return self def off_variation(self, variation) : + """Specifies the fallthrough variation. This is the variation that is returned + whenever targeting is off. + + If the flag was previously configured with other variations and the variation + specified is a boolean, this also changes it to a boolean flag. + + :param bool/int variation: ``True`` or ``False`` or the desired off variation index: + ``0`` for the first, ``1`` for the second, etc. + :return: the flag builder + """ if isinstance(variation, bool): self._boolean_flag(self)._off_variation = variation return self @@ -178,31 +226,86 @@ def off_variation(self, variation) : return self def boolean_flag(self): - if self.is_boolean_flag(): + """A shortcut for setting the flag to use the standard boolean configuration. + + This is the default for all new flags created with + :meth:`ldclient.integrations.test_data.TestData.flag()`. + + The flag will have two variations, ``True`` and ``False`` (in that order); + it will return ``False`` whenever targeting is off, and ``True`` when targeting is on + if no other settings specify otherwise. + + :return: the flag builder + """ + if self._is_boolean_flag(): return self else: return (self.variations(True, False) .fallthrough_variation(TRUE_VARIATION_INDEX) .off_variation(FALSE_VARIATION_INDEX)) - def is_boolean_flag(self): + def _is_boolean_flag(self): return (len(self._variations) == 2 and self._variations[TRUE_VARIATION_INDEX] == True and self._variations[FALSE_VARIATION_INDEX] == False) def variations(self, *variations): + """Changes the allowable variation values for the flag. + + The value may be of any valid JSON type. For instance, a boolean flag + normally has ``True, False``; a string-valued flag might have + ``'red', 'green'``; etc. + + **Example:** A single variation + :: + td.flag('new-flag') + .variations(True) + + **Example:** Multiple variations + :: + td.flag('new-flag') + .variations('red', 'green', 'blue') + + :param variations: the the desired variations + :return: the flag builder + """ self._variations = list(variations) return self def variation_for_all_users(self, variation): + """Sets the flag to always return the specified variation for all users. + + The variation is specified, Targeting is switched on, and any existing targets or rules are removed. + The fallthrough variation is set to the specified value. The off variation is left unchanged. + + If the flag was previously configured with other variations and the variation specified is a boolean, + this also changes it to a boolean flag. + + :param bool/int variation: ``True`` or ``False`` or the desired variation index to return: + ``0`` for the first, ``1`` for the second, etc. + :return: the flag builder + """ if isinstance(variation, bool): return self.boolean_flag().variation_for_all_users(_variation_for_boolean(variation)) else: return self.on(True).fallthrough_variation(variation) def variation_for_user(self, user_key, variation): + """Sets the flag to return the specified variation for a specific user key when targeting + is on. + + This has no effect when targeting is turned off for the flag. + + If the flag was previously configured with other variations and the variation specified is a boolean, + this also changes it to a boolean flag. + + :param str user_key: a user key + :param bool/int variation: ``True`` or ``False`` or the desired variation index to return: + ``0`` for the first, ``1`` for the second, etc. + :return: the flag builder + """ if isinstance(variation, bool): # `variation` is True/False value return self.boolean_flag().variation_for_user(user_key, _variation_for_boolean(variation)) @@ -239,15 +342,59 @@ def add_rule(self, flag_rule_builder): self._rules.append(flag_rule_builder) def if_match(self, attribute, *values): + """Starts defining a flag rule, using the "is one of" operator. + + **Example:** create a rule that returns ``True`` if the name is "Patsy" or "Edina" + :: + td.flag("flag") + .if_match('name', 'Patsy', 'Edina') + .then_return(True); + + + + :param str attribute: the user attribute to match against + :param values: values to compare to + :return: the flag rule builder + """ flag_rule_builder = FlagRuleBuilder(self) return flag_rule_builder.and_match(attribute, *values) def if_not_match(self, attribute, *values): + """Starts defining a flag rule, using the "is not one of" operator. + + **Example:** create a rule that returns ``True`` if the name is neither "Saffron" nor "Bubble" + :: + td.flag("flag") + .if_not_match('name', 'Saffron', 'Bubble') + .then_return(True); + + + + :param str attribute: the user attribute to match against + :param values: values to compare to + :return: the flag rule builder + """ flag_rule_builder = FlagRuleBuilder(self) return flag_rule_builder.and_not_match(attribute, values) def clear_rules(self): - del self._rules + """Removes any existing rules from the flag. + This undoes the effect of methods like + :meth:`ldclient.integrations.test_data.FlagBuilder.if_match()` + + :return: the same flag builder + """ + self._rules = [] + return self + + def clear_targets(self): + """Removes any existing targets from the flag. + This undoes the effect of methods like + :meth:`ldclient.integrations.test_data.FlagBuilder.variation_for_user()` + + :return: the same flag builder + """ + self._targets = {} return self @@ -278,12 +425,41 @@ def build(self, version): class FlagRuleBuilder(): + """ + A builder for feature flag rules to be used with :class:`ldclient.integrations.test_data.FlagBuilder`. + + In the LaunchDarkly model, a flag can have any number of rules, and a rule can have any number of + clauses. A clause is an individual test such as "name is 'X'". A rule matches a user if all of the + rule's clauses match the user. + + To start defining a rule, use one of the flag builder's matching methods such as + :meth:`ldclient.integrations.test_data.FlagBuilder.if_match()`. + This defines the first clause for the rule. Optionally, you may add more + clauses with the rule builder's methods such as + :meth:`ldclient.integrations.test_data.FlagRuleBuilder.and_match()` or + :meth:`ldclient.integrations.test_data.FlagRuleBuilder.and_not_match()`. + Finally, call :meth:`ldclient.integrations.test_data.FlagRuleBuilder.then_return()` + to finish defining the rule. + """ def __init__(self, flag_builder): self._flag_builder = flag_builder self._clauses = [] self._variation = None def and_match(self, attribute, *values): + """Adds another clause, using the "is one of" operator. + + **Example:** create a rule that returns ``True`` if the name is "Patsy" and the country is "gb" + :: + td.flag('flag') + .if_match('name', 'Patsy') + .and_match('country', 'gb') + .then_return(True) + + :param str attribute: the user attribute to match against + :param values: values to compare to + :return: the flag rule builder + """ self._clauses.append({ 'attribute': attribute, 'operator': 'in', @@ -293,6 +469,19 @@ def and_match(self, attribute, *values): return self def and_not_match(self, attribute, *values): + """Adds another clause, using the "is not one of" operator. + + **Example:** create a rule that returns ``True`` if the name is "Patsy" and the country is not "gb" + :: + td.flag('flag') + .if_match('name', 'Patsy') + .and_not_match('country', 'gb') + .then_return(True) + + :param str attribute: the user attribute to match against + :param values: values to compare to + :return: the flag rule builder + """ self._clauses.append({ 'attribute': attribute, 'operator': 'in', @@ -302,6 +491,16 @@ def and_not_match(self, attribute, *values): return self def then_return(self, variation): + """Finishes defining the rule, specifying the result as either a boolean + or a variation index. + + If the flag was previously configured with other variations and the variation specified is a boolean, + this also changes it to a boolean flag. + + :param bool/int variation: ``True`` or ``False`` or the desired variation index: + ``0`` for the first, ``1`` for the second, etc. + :return: the flag builder with this rule added + """ if isinstance(variation, bool): self._flag_builder.boolean_flag() return self.then_return(_variation_for_boolean(variation)) diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index 4146c82d..c4b10898 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -92,8 +92,6 @@ def test_flagbuilder_can_make_boolean_flag(): td = TestData.data_source() flag = td.flag('boolean-flag').boolean_flag() - assert flag.is_boolean_flag() == True - builtFlag = flag.build(0) assert builtFlag['fallthrough_variation'] == 0 assert builtFlag['off_variation'] == 1 From 9dd064eba56cab097834efe14379dddd038b6d99 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Tue, 7 Dec 2021 23:43:41 +0000 Subject: [PATCH 252/356] removed warning supression from TestData tests --- testing/test_test_data_source.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index c4b10898..df35b358 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -6,12 +6,9 @@ from ldclient.feature_store import InMemoryFeatureStore from ldclient.versioned_data_kind import FEATURES, SEGMENTS -#from ldclient.integrations import TestData from ldclient.integrations.test_data import TestData -TestData.__test__ = False - def setup_function(): print("Setup") From 86e40386a84f0777db52fc70cb45160253df96f2 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 7 Dec 2021 19:03:19 -0800 Subject: [PATCH 253/356] fix big segments user hash algorithm to use SHA256 --- ldclient/impl/big_segments.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index d06a0563..d8dbf674 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -7,7 +7,7 @@ import base64 from expiringdict import ExpiringDict -from hashlib import md5 +from hashlib import sha256 import time from typing import Callable, Optional, Tuple @@ -108,4 +108,4 @@ def is_stale(self, timestamp) -> bool: return (timestamp is None) or ((int(time.time() * 1000) - timestamp) >= self.__stale_after_millis) def _hash_for_user_key(user_key: str) -> str: - return base64.b64encode(md5(user_key.encode('utf-8')).digest()).decode('utf-8') + return base64.b64encode(sha256(user_key.encode('utf-8')).digest()).decode('utf-8') From 9a4981eadaf4f15b14840d0dfbc9dbbb47e5cae7 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 8 Dec 2021 13:53:50 -0800 Subject: [PATCH 254/356] update mypy version --- .circleci/config.yml | 1 + test-requirements.txt | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8aea6976..345713f5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -93,6 +93,7 @@ jobs: name: verify typehints command: | export PATH="/home/circleci/.local/bin:$PATH" + mypy --install-types --non-interactive ldclient testing mypy --config-file mypy.ini ldclient testing - unless: diff --git a/test-requirements.txt b/test-requirements.txt index 93da9126..25b8ab88 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -6,5 +6,5 @@ coverage>=4.4 jsonpickle==0.9.3 pytest-cov>=2.4.0 codeclimate-test-reporter>=0.2.1 -pytest-mypy==0.7 -mypy==0.800 \ No newline at end of file +pytest-mypy==0.8.1 +mypy==0.910 \ No newline at end of file From 8d56a51904c1d3e3cd5ad6915ad3f9fca8861f8d Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Wed, 8 Dec 2021 22:41:10 +0000 Subject: [PATCH 255/356] updates to tests and related bug fixes --- ldclient/integrations/test_data.py | 18 ++++- testing/test_test_data_source.py | 116 +++++++++++++++++++++++++++-- 2 files changed, 122 insertions(+), 12 deletions(-) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 856c570f..6e3274d9 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -112,7 +112,7 @@ def update(self, flag_builder): if flag_builder._key in self._current_flags: old_flag = self._current_flags[flag_builder._key] if old_flag: - old_version = old_flag.version + old_version = old_flag['version'] new_flag = flag_builder.build(old_version + 1) @@ -290,7 +290,7 @@ def variation_for_all_users(self, variation): if isinstance(variation, bool): return self.boolean_flag().variation_for_all_users(_variation_for_boolean(variation)) else: - return self.on(True).fallthrough_variation(variation) + return self.clear_rules().clear_targets().on(True).fallthrough_variation(variation) def variation_for_user(self, user_key, variation): """Sets the flag to return the specified variation for a specific user key when targeting @@ -338,7 +338,7 @@ def variation_for_user(self, user_key, variation): return self - def add_rule(self, flag_rule_builder): + def _add_rule(self, flag_rule_builder): self._rules.append(flag_rule_builder) def if_match(self, attribute, *values): @@ -399,6 +399,11 @@ def clear_targets(self): def build(self, version): + """Creates a dictionary representation of the flag + + :param int version: the version number of the rule + :return: the dictionary representation of the flag + """ base_flag_object = { 'key': self._key, 'version': version, @@ -506,10 +511,15 @@ def then_return(self, variation): return self.then_return(_variation_for_boolean(variation)) else: self._variation = variation - self._flag_builder.add_rule(self) + self._flag_builder._add_rule(self) return self._flag_builder def build(self, id): + """Creates a dictionary representation of the rule + + :param id: the rule id + :return: the dictionary representation of the rule + """ return { 'id': 'rule' + str(id), 'variation': self._variation, diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index df35b358..c33f1f53 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -15,7 +15,19 @@ def setup_function(): def teardown_function(): print("Teardown") -def test_makes_flag(): + +## Test Data + Data Source + +def test_makes_valid_datasource(): + td = TestData.data_source() + store = InMemoryFeatureStore() + + client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store)) + + assert store.all(FEATURES, lambda x: x) == {} + + +def test_makes_valid_datasource_with_flag(): td = TestData.data_source() flag = td.flag(key='test-flag') assert flag is not None @@ -26,7 +38,7 @@ def test_makes_flag(): assert builtFlag['variations'] == [True, False] -def test_initializes_flag_with_client(): +def test_can_retrieve_flag_from_store(): td = TestData.data_source() td.update(td.flag('some-flag')) @@ -38,7 +50,20 @@ def test_initializes_flag_with_client(): client.close() -def test_update_after_close(): +def test_updates_to_flags_are_reflected_in_store(): + td = TestData.data_source() + + store = InMemoryFeatureStore() + + client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store)) + + td.update(td.flag('some-flag')) + + assert store.get(FEATURES, 'some-flag') == td.flag('some-flag').build(1) + + client.close() + +def test_updates_after_client_close_have_no_affect(): td = TestData.data_source() store = InMemoryFeatureStore() @@ -51,18 +76,77 @@ def test_update_after_close(): assert store.get(FEATURES, 'some-flag') == None -def test_update_after_client_initialization(): +def test_can_handle_multiple_clients(): td = TestData.data_source() + td.update(td.flag('flag')) store = InMemoryFeatureStore() + store2 = InMemoryFeatureStore() + + config = Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store) + client = LDClient(config=config) + + config2 = Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store2) + client2 = LDClient(config=config2) + + assert store.get(FEATURES, 'flag') == { + 'fallthrough_variation': 0, + 'key': 'flag', + 'off_variation': 1, + 'on': True, + 'rules': [], + 'targets': [], + 'variations': [True, False], + 'version': 1 + } - client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store)) + assert store2.get(FEATURES, 'flag') == { + 'fallthrough_variation': 0, + 'key': 'flag', + 'off_variation': 1, + 'on': True, + 'rules': [], + 'targets': [], + 'variations': [True, False], + 'version': 1 + } - td.update(td.flag('some-flag')) + td.update(td.flag('flag').variation_for_all_users(False)) + + assert store.get(FEATURES, 'flag') == { + 'fallthrough_variation': 1, + 'key': 'flag', + 'off_variation': 1, + 'on': True, + 'rules': [], + 'targets': [], + 'variations': [True, False], + 'version': 2 + } - assert store.get(FEATURES, 'some-flag') == td.flag('some-flag').build(1) + assert store2.get(FEATURES, 'flag') == { + 'fallthrough_variation': 1, + 'key': 'flag', + 'off_variation': 1, + 'on': True, + 'rules': [], + 'targets': [], + 'variations': [True, False], + 'version': 2 + } client.close() + client2.close() + + +## FlagBuilder + +def test_flagbuilder_defaults_to_boolean_flag(): + td = TestData.data_source() + flag = td.flag('empty-flag') + assert flag.build(0)['variations'] == [True, False] + assert flag.build(0)['fallthrough_variation'] == 0 + assert flag.build(0)['off_variation'] == 1 def test_flagbuilder_can_turn_flag_off(): td = TestData.data_source() @@ -93,19 +177,35 @@ def test_flagbuilder_can_make_boolean_flag(): assert builtFlag['fallthrough_variation'] == 0 assert builtFlag['off_variation'] == 1 +def test_flagbuilder_can_set_variation_when_targeting_is_off(): + td = TestData.data_source() + flag = td.flag('test-flag').on(False) + assert flag.build(0)['on'] == False + assert flag.build(0)['variations'] == [True,False] + flag.variations('dog', 'cat') + assert flag.build(0)['variations'] == ['dog','cat'] + def test_flagbuilder_can_set_variation_for_all_users(): td = TestData.data_source() flag = td.flag('test-flag') flag.variation_for_all_users(True) assert flag.build(0)['fallthrough_variation'] == 0 +def test_flagbuilder_clears_existing_rules_and_targets_when_setting_variation_for_all_users(): + td = TestData.data_source() + + flag = td.flag('test-flag').if_match('name', 'christian').then_return(False).variation_for_user('christian', False).variation_for_all_users(True).build(0) + + assert flag['rules'] == [] + assert flag['targets'] == [] + def test_flagbuilder_can_set_variations(): td = TestData.data_source() flag = td.flag('test-flag') flag.variations(2,3,4,5) assert flag.build(0)['variations'] == [2,3,4,5] -def test_flagbuilder_can_safely_copy(): +def test_flagbuilder_can_make_an_immutable_copy(): td = TestData.data_source() flag = td.flag('test-flag') flag.variations(1,2) From ecbe95de0c6eb3f964888ba8b226694040734cd5 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 8 Dec 2021 18:24:48 -0800 Subject: [PATCH 256/356] always cache Big Segment query result even if it's None --- ldclient/impl/big_segments.py | 6 ++++++ testing/impl/test_big_segments.py | 16 +++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index d8dbf674..cb11a74e 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -44,6 +44,10 @@ def _update_status(self, new_status: BigSegmentStoreStatus): self.__status_listeners.notify(new_status) class BigSegmentStoreManager: + # use EMPTY_MEMBERSHIP as a singleton whenever a membership query returns None; it's safe to reuse it + # because we will never modify the membership properties after they're queried + EMPTY_MEMBERSHIP = {} + """ Internal component that decorates the Big Segment store with caching behavior, and also polls the store to track its status. @@ -78,6 +82,8 @@ def get_user_membership(self, user_key: str) -> Tuple[Optional[dict], str]: if membership is None: try: membership = self.__store.get_membership(_hash_for_user_key(user_key)) + if membership is None: + membership = self.EMPTY_MEMBERSHIP self.__cache[user_key] = membership except Exception as e: log.exception("Big Segment store membership query returned error: %s" % e) diff --git a/testing/impl/test_big_segments.py b/testing/impl/test_big_segments.py index f433db56..67ccb1e8 100644 --- a/testing/impl/test_big_segments.py +++ b/testing/impl/test_big_segments.py @@ -32,9 +32,23 @@ def test_membership_query_cached_result_healthy_status(): try: expected_result = (expected_membership, BigSegmentsStatus.HEALTHY) assert manager.get_user_membership(user_key) == expected_result + assert manager.get_user_membership(user_key) == expected_result + finally: + manager.stop() + assert store.membership_queries == [ user_hash ] # only 1 query done rather than 2, due to caching + +def test_membership_query_can_cache_result_of_none(): + store = MockBigSegmentStore() + store.setup_metadata_always_up_to_date() + store.setup_membership(user_hash, None) + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store)) + try: + expected_result = (None, BigSegmentsStatus.HEALTHY) + assert manager.get_user_membership(user_key) == expected_result + assert manager.get_user_membership(user_key) == expected_result finally: manager.stop() - assert store.membership_queries == [ user_hash ] + assert store.membership_queries == [ user_hash ] # only 1 query done rather than 2, due to caching def test_membership_query_stale_status(): expected_membership = { "key1": True, "key2": False } From 489f1e7af94cb64b6d1317aaaca23c6ad9a2bc0a Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 8 Dec 2021 18:28:24 -0800 Subject: [PATCH 257/356] fix test assertion --- testing/impl/test_big_segments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/impl/test_big_segments.py b/testing/impl/test_big_segments.py index 67ccb1e8..7e24c726 100644 --- a/testing/impl/test_big_segments.py +++ b/testing/impl/test_big_segments.py @@ -43,7 +43,7 @@ def test_membership_query_can_cache_result_of_none(): store.setup_membership(user_hash, None) manager = BigSegmentStoreManager(BigSegmentsConfig(store=store)) try: - expected_result = (None, BigSegmentsStatus.HEALTHY) + expected_result = ({}, BigSegmentsStatus.HEALTHY) assert manager.get_user_membership(user_key) == expected_result assert manager.get_user_membership(user_key) == expected_result finally: From c524e7efe3258c4f6da3c94fc87983acfeda3a58 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 8 Dec 2021 18:31:30 -0800 Subject: [PATCH 258/356] lint --- ldclient/impl/big_segments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index cb11a74e..553b33b5 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -46,7 +46,7 @@ def _update_status(self, new_status: BigSegmentStoreStatus): class BigSegmentStoreManager: # use EMPTY_MEMBERSHIP as a singleton whenever a membership query returns None; it's safe to reuse it # because we will never modify the membership properties after they're queried - EMPTY_MEMBERSHIP = {} + EMPTY_MEMBERSHIP = {} # type: dict """ Internal component that decorates the Big Segment store with caching behavior, and also polls the From f8e95bf1a6bdc195b1bbbe034bf6e09bdc9e37ad Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 8 Dec 2021 18:53:02 -0800 Subject: [PATCH 259/356] fix big segment ref format --- ldclient/impl/evaluator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 90b4ccfa..2bc7667b 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -306,7 +306,7 @@ def _make_big_segment_ref(segment: dict) -> str: # The format of Big Segment references is independent of what store implementation is being # used; the store implementation receives only this string and does not know the details of # the data model. The Relay Proxy will use the same format when writing to the store. - return "%s:%d" % (segment.get('key', ''), segment.get('generation', 0)) + return "%s.g%d" % (segment.get('key', ''), segment.get('generation', 0)) def error_reason(error_kind: str) -> dict: return {'kind': 'ERROR', 'errorKind': error_kind} From c1d6548dc79b54db603b7dd56ccd212be6b2e6f3 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 8 Dec 2021 20:37:13 -0800 Subject: [PATCH 260/356] fix big segments cache TTL being set to wrong value --- ldclient/impl/big_segments.py | 6 ++++-- testing/impl/test_big_segments.py | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index 553b33b5..b6a013d3 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -61,7 +61,7 @@ def __init__(self, config: BigSegmentsConfig): self.__poll_task = None # type: Optional[RepeatingTask] if self.__store: - self.__cache = ExpiringDict(max_len = config.user_cache_size, max_age_seconds=config.user_cache_size) + self.__cache = ExpiringDict(max_len = config.user_cache_size, max_age_seconds=config.user_cache_time) self.__poll_task = RepeatingTask(config.status_poll_interval, 0, self.poll_store_and_update_status) self.__poll_task.start() @@ -80,8 +80,10 @@ def get_user_membership(self, user_key: str) -> Tuple[Optional[dict], str]: return (None, BigSegmentsStatus.NOT_CONFIGURED) membership = self.__cache.get(user_key) if membership is None: + user_hash = _hash_for_user_key(user_key) + log.warn("*** querying Big Segments for user hash: %s" % user_hash) try: - membership = self.__store.get_membership(_hash_for_user_key(user_key)) + membership = self.__store.get_membership(user_hash) if membership is None: membership = self.EMPTY_MEMBERSHIP self.__cache[user_key] = membership diff --git a/testing/impl/test_big_segments.py b/testing/impl/test_big_segments.py index 7e24c726..9cb8e3fd 100644 --- a/testing/impl/test_big_segments.py +++ b/testing/impl/test_big_segments.py @@ -50,6 +50,21 @@ def test_membership_query_can_cache_result_of_none(): manager.stop() assert store.membership_queries == [ user_hash ] # only 1 query done rather than 2, due to caching +def test_membership_query_cache_can_expire(): + expected_membership = { "key1": True, "key2": False } + store = MockBigSegmentStore() + store.setup_metadata_always_up_to_date() + store.setup_membership(user_hash, expected_membership) + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store, user_cache_time=0.005)) + try: + expected_result = (expected_membership, BigSegmentsStatus.HEALTHY) + assert manager.get_user_membership(user_key) == expected_result + time.sleep(0.1) + assert manager.get_user_membership(user_key) == expected_result + finally: + manager.stop() + assert store.membership_queries == [ user_hash, user_hash ] # cache expired after 1st query + def test_membership_query_stale_status(): expected_membership = { "key1": True, "key2": False } store = MockBigSegmentStore() From d593adedd66b74c315296ee54c2e1d6e1fc4edc9 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Thu, 9 Dec 2021 20:45:34 +0000 Subject: [PATCH 261/356] fixed structure of fallthrough variation in result of FlagBuilder.build() --- ldclient/integrations/test_data.py | 6 ++-- testing/test_test_data_source.py | 46 +++++++++++++++++++----------- 2 files changed, 33 insertions(+), 19 deletions(-) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 6e3274d9..1f49841b 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -411,8 +411,10 @@ def build(self, version): 'variations': self._variations } - base_flag_object['off_variation'] = self._off_variation - base_flag_object['fallthrough_variation'] = self._fallthrough_variation + base_flag_object['offVariation'] = self._off_variation + base_flag_object['fallthrough'] = { + 'variation': self._fallthrough_variation + } targets = [] for var_index, user_keys in self._targets.items(): diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index c33f1f53..1c5681e6 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -16,6 +16,8 @@ def teardown_function(): print("Teardown") +TestData.__test__ = False + ## Test Data + Data Source def test_makes_valid_datasource(): @@ -90,9 +92,11 @@ def test_can_handle_multiple_clients(): client2 = LDClient(config=config2) assert store.get(FEATURES, 'flag') == { - 'fallthrough_variation': 0, + 'fallthrough': { + 'variation': 0, + }, 'key': 'flag', - 'off_variation': 1, + 'offVariation': 1, 'on': True, 'rules': [], 'targets': [], @@ -101,9 +105,11 @@ def test_can_handle_multiple_clients(): } assert store2.get(FEATURES, 'flag') == { - 'fallthrough_variation': 0, + 'fallthrough': { + 'variation': 0, + }, 'key': 'flag', - 'off_variation': 1, + 'offVariation': 1, 'on': True, 'rules': [], 'targets': [], @@ -114,9 +120,11 @@ def test_can_handle_multiple_clients(): td.update(td.flag('flag').variation_for_all_users(False)) assert store.get(FEATURES, 'flag') == { - 'fallthrough_variation': 1, + 'fallthrough': { + 'variation': 1, + }, 'key': 'flag', - 'off_variation': 1, + 'offVariation': 1, 'on': True, 'rules': [], 'targets': [], @@ -125,9 +133,11 @@ def test_can_handle_multiple_clients(): } assert store2.get(FEATURES, 'flag') == { - 'fallthrough_variation': 1, + 'fallthrough': { + 'variation': 1, + }, 'key': 'flag', - 'off_variation': 1, + 'offVariation': 1, 'on': True, 'rules': [], 'targets': [], @@ -145,8 +155,8 @@ def test_flagbuilder_defaults_to_boolean_flag(): td = TestData.data_source() flag = td.flag('empty-flag') assert flag.build(0)['variations'] == [True, False] - assert flag.build(0)['fallthrough_variation'] == 0 - assert flag.build(0)['off_variation'] == 1 + assert flag.build(0)['fallthrough'] == {'variation': 0} + assert flag.build(0)['offVariation'] == 1 def test_flagbuilder_can_turn_flag_off(): td = TestData.data_source() @@ -160,22 +170,22 @@ def test_flagbuilder_can_set_fallthrough_variation(): flag = td.flag('test-flag') flag.fallthrough_variation(2) - assert flag.build(0)['fallthrough_variation'] == 2 + assert flag.build(0)['fallthrough'] == {'variation': 2} def test_flagbuilder_can_set_off_variation(): td = TestData.data_source() flag = td.flag('test-flag') flag.off_variation(2) - assert flag.build(0)['off_variation'] == 2 + assert flag.build(0)['offVariation'] == 2 def test_flagbuilder_can_make_boolean_flag(): td = TestData.data_source() flag = td.flag('boolean-flag').boolean_flag() builtFlag = flag.build(0) - assert builtFlag['fallthrough_variation'] == 0 - assert builtFlag['off_variation'] == 1 + assert builtFlag['fallthrough'] == {'variation': 0} + assert builtFlag['offVariation'] == 1 def test_flagbuilder_can_set_variation_when_targeting_is_off(): td = TestData.data_source() @@ -189,7 +199,7 @@ def test_flagbuilder_can_set_variation_for_all_users(): td = TestData.data_source() flag = td.flag('test-flag') flag.variation_for_all_users(True) - assert flag.build(0)['fallthrough_variation'] == 0 + assert flag.build(0)['fallthrough'] == {'variation': 0} def test_flagbuilder_clears_existing_rules_and_targets_when_setting_variation_for_all_users(): td = TestData.data_source() @@ -246,9 +256,11 @@ def test_flagbuilder_can_build(): flag = td.flag('some-flag') flag.if_match('country', 'fr').then_return(True) expected_result = { - 'fallthrough_variation': 0, + 'fallthrough': { + 'variation': 0, + }, 'key': 'some-flag', - 'off_variation': 1, + 'offVariation': 1, 'on': True, 'targets': [], 'variations': [True, False], From 45f1e23189a936567f9ea437bce73fabcba1a39c Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Thu, 9 Dec 2021 21:19:25 +0000 Subject: [PATCH 262/356] moved __test__ attribute into TestData class definition to prevent mypy from complaining about a missing class attribute --- ldclient/integrations/test_data.py | 4 ++++ testing/test_test_data_source.py | 6 ------ 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 1f49841b..19c04a11 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -39,6 +39,10 @@ class TestData(): """ + + # Prevent pytest from treating this as a test class + __test__ = False + def __init__(self): self._flag_builders = {} self._current_flags = {} diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index 57ff97e8..602776d1 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -9,12 +9,6 @@ from ldclient.integrations.test_data import TestData -# Filter warning arising from Pytest treating classes starting -# with the word 'Test' as part of the test suite -warnings.filterwarnings("ignore", message="cannot collect test class 'TestData'") - - -TestData.__test__ = False ## Test Data + Data Source From 5f127003dbc300f61f9c73fcc4b2c5f50991a3df Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 10 Dec 2021 14:26:44 -0800 Subject: [PATCH 263/356] minor doc comment fix --- ldclient/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/config.py b/ldclient/config.py index 8d5e8118..c18ef20f 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -29,7 +29,7 @@ class BigSegmentsConfig: from ldclient.config import Config, BigSegmentsConfig from ldclient.integrations import Redis - store = Redis.new_big_segment_store("my-table-name") + store = Redis.new_big_segment_store(url='redis://localhost:6379') config = Config(big_segments=BigSegmentsConfig(store = store)) """ def __init__(self, From 3b1d740f0e0ffc61652f0b6e0f5ad62965ca6b8d Mon Sep 17 00:00:00 2001 From: Christian Charukiewicz Date: Mon, 13 Dec 2021 14:45:02 -0600 Subject: [PATCH 264/356] Apply suggestions related to Sphinx docstring formatting from code review Co-authored-by: Eli Bishop --- docs/api-testing.rst | 2 +- ldclient/integrations/test_data.py | 49 +++++++++++++++--------------- 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/docs/api-testing.rst b/docs/api-testing.rst index d42e9c68..4e765228 100644 --- a/docs/api-testing.rst +++ b/docs/api-testing.rst @@ -2,7 +2,7 @@ Testing Integrations ==================== ldclient.integrations.test_data module ----------------------------- +-------------------------------------- .. automodule:: ldclient.integrations.test_data :members: diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 19c04a11..f61f1f88 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -18,16 +18,16 @@ class TestData(): Unlike ``Files``, this mechanism does not use any external resources. It provides only the data that the application has put into it using the ``update`` method. - :: + :: td = TestData.data_source() td.update(td.flag('flag-key-1').variation_for_all_users(True)) client = LDClient(config=Config('SDK_KEY', update_processor_class = td)) # flags can be updated at any time: - td.update(td.flag('flag-key-1').variation_for_user('some-user-key', True) - .fallthrough_variation(False)) + td.update(td.flag('flag-key-1').variation_for_user('some-user-key', True) \ + .fallthrough_variation(False)) The above example uses a simple boolean flag, but more complex configurations are possible using the methods of the ``FlagBuilder`` that is returned by ``flag``. ``FlagBuilder`` @@ -35,8 +35,7 @@ class TestData(): currently support 1. rule operators other than "in" and "not in", or 2. percentage rollouts. If the same `TestData` instance is used to configure multiple `LDClient` instances, - any changes made to the data will propagate to all of the `LDClient`s. - + any changes made to the data will propagate to all of the `LDClient` instances. """ @@ -261,13 +260,15 @@ def variations(self, *variations): ``'red', 'green'``; etc. **Example:** A single variation + :: - td.flag('new-flag') + td.flag('new-flag') \ .variations(True) **Example:** Multiple variations - :: - td.flag('new-flag') + + :: + td.flag('new-flag') \ .variations('red', 'green', 'blue') :param variations: the the desired variations @@ -349,12 +350,11 @@ def if_match(self, attribute, *values): """Starts defining a flag rule, using the "is one of" operator. **Example:** create a rule that returns ``True`` if the name is "Patsy" or "Edina" - :: - td.flag("flag") - .if_match('name', 'Patsy', 'Edina') - .then_return(True); - + :: + td.flag("flag") \ + .if_match('name', 'Patsy', 'Edina') \ + .then_return(True) :param str attribute: the user attribute to match against :param values: values to compare to @@ -367,12 +367,11 @@ def if_not_match(self, attribute, *values): """Starts defining a flag rule, using the "is not one of" operator. **Example:** create a rule that returns ``True`` if the name is neither "Saffron" nor "Bubble" - :: - td.flag("flag") - .if_not_match('name', 'Saffron', 'Bubble') - .then_return(True); - + :: + td.flag("flag") \ + .if_not_match('name', 'Saffron', 'Bubble') \ + .then_return(True) :param str attribute: the user attribute to match against :param values: values to compare to @@ -461,10 +460,11 @@ def and_match(self, attribute, *values): """Adds another clause, using the "is one of" operator. **Example:** create a rule that returns ``True`` if the name is "Patsy" and the country is "gb" + :: - td.flag('flag') - .if_match('name', 'Patsy') - .and_match('country', 'gb') + td.flag('flag') \ + .if_match('name', 'Patsy') \ + .and_match('country', 'gb') \ .then_return(True) :param str attribute: the user attribute to match against @@ -483,10 +483,11 @@ def and_not_match(self, attribute, *values): """Adds another clause, using the "is not one of" operator. **Example:** create a rule that returns ``True`` if the name is "Patsy" and the country is not "gb" + :: - td.flag('flag') - .if_match('name', 'Patsy') - .and_not_match('country', 'gb') + td.flag('flag') \ + .if_match('name', 'Patsy') \ + .and_not_match('country', 'gb') \ .then_return(True) :param str attribute: the user attribute to match against From 3baa8431181f35ce91c5670ad4ddf9150667f5ea Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Fri, 14 Jan 2022 17:32:04 +0000 Subject: [PATCH 265/356] fixed errors in the implementation of FlagBuilder's fallthrough_variation and off_variation when passing boolean variation values; updated tests to assert the expected behavior --- ldclient/integrations/test_data.py | 6 +++--- testing/test_test_data_source.py | 8 ++++++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index f61f1f88..f0e13b94 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -204,7 +204,7 @@ def fallthrough_variation(self, variation): :return: the flag builder """ if isinstance(variation, bool): - self._boolean_flag(self)._fallthrough_variation = variation + self.boolean_flag()._fallthrough_variation = _variation_for_boolean(variation) return self else: self._fallthrough_variation = variation @@ -222,7 +222,7 @@ def off_variation(self, variation) : :return: the flag builder """ if isinstance(variation, bool): - self._boolean_flag(self)._off_variation = variation + self.boolean_flag()._off_variation = _variation_for_boolean(variation) return self else: self._off_variation = variation @@ -267,7 +267,7 @@ def variations(self, *variations): **Example:** Multiple variations - :: + :: td.flag('new-flag') \ .variations('red', 'green', 'blue') diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index 602776d1..a97a3bb3 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -164,6 +164,10 @@ def test_flagbuilder_can_set_fallthrough_variation(): assert flag.build(0)['fallthrough'] == {'variation': 2} + flag.fallthrough_variation(True) + + assert flag.build(0)['fallthrough'] == {'variation': 0} + def test_flagbuilder_can_set_off_variation(): td = TestData.data_source() flag = td.flag('test-flag') @@ -171,6 +175,10 @@ def test_flagbuilder_can_set_off_variation(): assert flag.build(0)['offVariation'] == 2 + flag.off_variation(True) + + assert flag.build(0)['offVariation'] == 0 + def test_flagbuilder_can_make_boolean_flag(): td = TestData.data_source() flag = td.flag('boolean-flag').boolean_flag() From 9d49fefec30113c6ae10af1dea1eaae4785dd859 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Mon, 24 Jan 2022 21:52:56 +0000 Subject: [PATCH 266/356] added missing value_for_all_users() method to FlagBuilder class --- ldclient/integrations/test_data.py | 14 ++++++++++++++ testing/test_test_data_source.py | 16 ++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index f0e13b94..3073c67d 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -297,6 +297,20 @@ def variation_for_all_users(self, variation): else: return self.clear_rules().clear_targets().on(True).fallthrough_variation(variation) + def value_for_all_users(self, value): + """ + Sets the flag to always return the specified variation value for all users. + + The value may be of any JSON type. This method changes the flag to have only + a single variation, which is this value, and to return the same variation + regardless of whether targeting is on or off. Any existing targets or rules + are removed. + + :param value the desired value to be returned for all users + :return the flag builder + """ + return self.variations(value).variation_for_all_users(0) + def variation_for_user(self, user_key, variation): """Sets the flag to return the specified variation for a specific user key when targeting is on. diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index a97a3bb3..9f0a2e37 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -251,6 +251,22 @@ def test_flagbuilder_can_set_numerical_variation_for_user(): ] assert flag.build(1)['targets'] == expected_targets +def test_flagbuilder_can_set_value_for_all_users(): + td = TestData.data_source() + flag = td.flag('user-value-flag') + flag.variation_for_user('john', 1) + + built_flag = flag.build(0) + assert built_flag['targets'] == [{'values': ['john'], 'variation': 1}] + assert built_flag['variations'] == [True, False] + + flag.value_for_all_users('yes') + + built_flag2 = flag.build(0) + assert built_flag2['targets'] == [] + assert built_flag2['variations'] == ['yes'] + + def test_flagbuilder_can_build(): td = TestData.data_source() flag = td.flag('some-flag') From d235047306230d1e0744cd39eed15ed656d23067 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Thu, 27 Jan 2022 10:02:04 -0500 Subject: [PATCH 267/356] Fix operator parsing errors (#169) --- ldclient/operators.py | 4 +++- testing/test_operators.py | 6 ++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/ldclient/operators.py b/ldclient/operators.py index 8bf95f86..0fb45c68 100644 --- a/ldclient/operators.py +++ b/ldclient/operators.py @@ -54,7 +54,7 @@ def _parse_time(input): log.warning("Couldn't parse timestamp:" + str(input) + " with message: " + str(e)) return None - log.warning("Got unexpected type: " + type(input) + " with value: " + str(input) + " when attempting to parse time") + log.warning("Got unexpected type: " + str(type(input)) + " with value: " + str(input) + " when attempting to parse time") return None def _time_operator(u, c, fn): @@ -69,6 +69,8 @@ def _parse_semver(input): try: VersionInfo.parse(input) return input + except TypeError: + return None except ValueError as e: try: input = _add_zero_version_component(input) diff --git a/testing/test_operators.py b/testing/test_operators.py index e2b3dc22..bfd72162 100644 --- a/testing/test_operators.py +++ b/testing/test_operators.py @@ -59,7 +59,11 @@ [ "before", True, 1000, False ], # wrong type [ "after", "1970-01-01T00:00:02.500Z", 1000, True ], [ "after", "1970-01-01 00:00:02.500Z", 1000, False ], # malformed timestamp + [ "after", "1970-01-01T00:00:02+01:00", None, False ], + [ "after", None, "1970-01-01T00:00:02+01:00", False ], [ "before", "1970-01-01T00:00:02+01:00", 1000, True ], + [ "before", "1970-01-01T00:00:02+01:00", None, False ], + [ "before", None, "1970-01-01T00:00:02+01:00", False ], [ "before", -1000, 1000, True ], [ "after", "1970-01-01T00:00:01.001Z", 1000, True ], [ "after", "1970-01-01T00:00:00-01:00", 1000, True ], @@ -68,6 +72,8 @@ [ "semVerEqual", "2.0.1", "2.0.1", True ], [ "semVerEqual", "2.0", "2.0.0", True ], [ "semVerEqual", "2", "2.0.0", True ], + [ "semVerEqual", 2, "2.0.0", False ], + [ "semVerEqual", "2.0.0", 2, False ], [ "semVerEqual", "2.0-rc1", "2.0.0-rc1", True ], [ "semVerLessThan", "2.0.0", "2.0.1", True ], [ "semVerLessThan", "2.0", "2.0.1", True ], From 81711366fb8c2a6be755c228c023302a90e55ee5 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Thu, 27 Jan 2022 10:40:10 -0500 Subject: [PATCH 268/356] identify should not emit event if user key is empty (#164) --- ldclient/client.py | 2 +- testing/test_ldclient.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ldclient/client.py b/ldclient/client.py index 2bb98aaa..b42ea7cb 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -226,7 +226,7 @@ def identify(self, user: dict): :param user: attributes of the user to register """ - if user is None or user.get('key') is None: + if user is None or user.get('key') is None or len(str(user.get('key'))) == 0: log.warning("Missing user or user key when calling identify().") else: self._send_event(self._event_factory_default.new_identify_event(user)) diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 86cc319e..e9a19c9a 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -143,6 +143,12 @@ def test_identify_no_user_key(): assert count_events(client) == 0 +def test_identify_blank_user_key(): + with make_client() as client: + client.identify({ 'key': '' }) + assert count_events(client) == 0 + + def test_track(): with make_client() as client: client.track('my_event', user) From 5c1ce47e48eb3afe79d85d1018ab7b6b5e51c8ca Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Thu, 27 Jan 2022 10:41:43 -0500 Subject: [PATCH 269/356] secondary should be treated as built-in attribute (#168) --- ldclient/impl/evaluator.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 2bc7667b..d019f10d 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -14,7 +14,7 @@ __LONG_SCALE__ = float(0xFFFFFFFFFFFFFFF) -__BUILTINS__ = ["key", "ip", "country", "email", +__BUILTINS__ = ["key", "secondary", "ip", "country", "email", "firstName", "lastName", "avatar", "name", "anonymous"] __USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__ = [ "key", "secondary" ] @@ -182,8 +182,6 @@ def _get_value_for_variation_or_rollout(flag, vr, user, reason): return _get_variation(flag, index, reason) def _get_user_attribute(user, attr): - if attr == 'secondary': - return None, True if attr in __BUILTINS__: return user.get(attr), False else: # custom attribute From 926f94fc11084cde4e2f6e0e1568cd77a23a2377 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Thu, 27 Jan 2022 10:44:33 -0500 Subject: [PATCH 270/356] URIs should have trailing slashes trimmed (#165) --- ldclient/config.py | 6 +++--- testing/test_config.py | 11 +++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/ldclient/config.py b/ldclient/config.py index c18ef20f..9a19c264 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -240,9 +240,9 @@ def __init__(self, """ self.__sdk_key = sdk_key - self.__base_uri = base_uri.rstrip('\\') - self.__events_uri = events_uri.rstrip('\\') - self.__stream_uri = stream_uri.rstrip('\\') + self.__base_uri = base_uri.rstrip('/') + self.__events_uri = events_uri.rstrip('/') + self.__stream_uri = stream_uri.rstrip('/') self.__update_processor_class = update_processor_class self.__stream = stream self.__initial_reconnect_delay = initial_reconnect_delay diff --git a/testing/test_config.py b/testing/test_config.py index 88add31c..701e70e5 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -29,3 +29,14 @@ def test_can_set_valid_diagnostic_interval(): def test_minimum_diagnostic_interval_is_enforced(): config = Config(sdk_key = "SDK_KEY", diagnostic_recording_interval=59) assert config.diagnostic_recording_interval == 60 + +def test_trims_trailing_slashes_on_uris(): + config = Config( + sdk_key = "SDK_KEY", + base_uri = "https://launchdarkly.com/", + events_uri = "https://docs.launchdarkly.com/", + stream_uri = "https://blog.launchdarkly.com/") + + assert config.base_uri == "https://launchdarkly.com" + assert config.events_uri == "https://docs.launchdarkly.com/bulk" + assert config.stream_base_uri == "https://blog.launchdarkly.com" From 387d7ba8651600e330d6ec771e9cb8ddb7ac1256 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Thu, 27 Jan 2022 10:45:03 -0500 Subject: [PATCH 271/356] all_flags_state should always include flag version (#166) --- ldclient/evaluation.py | 4 ++-- testing/test_ldclient_evaluation.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ldclient/evaluation.py b/ldclient/evaluation.py index 77e409fa..5dd811d1 100644 --- a/ldclient/evaluation.py +++ b/ldclient/evaluation.py @@ -122,14 +122,14 @@ def __init__(self, valid: bool): def add_flag(self, flag, value, variation, reason, details_only_if_tracked): key = flag['key'] self.__flag_values[key] = value - meta = {} + meta = {'version': flag.get('version')} with_details = (not details_only_if_tracked) or flag.get('trackEvents') if not with_details: if flag.get('debugEventsUntilDate'): now = int(time.time() * 1000) with_details = (flag.get('debugEventsUntilDate') > now) + if with_details: - meta['version'] = flag.get('version') if reason is not None: meta['reason'] = reason if variation is not None: diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 346e1aad..3ce87e11 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -350,7 +350,8 @@ def test_all_flags_state_can_omit_details_for_untracked_flags(): 'key3': 'value3', '$flagsState': { 'key1': { - 'variation': 0 + 'variation': 0, + 'version': 100 }, 'key2': { 'variation': 1, From 515a05e1e709be3b252bb48c6aa3e6b6e536c710 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Thu, 27 Jan 2022 10:45:31 -0500 Subject: [PATCH 272/356] output event should not include a null prereqOf key (#167) --- ldclient/event_processor.py | 5 +++-- testing/test_event_processor.py | 37 +++++++++++++++++++-------------- 2 files changed, 24 insertions(+), 18 deletions(-) diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index 7d39078f..e13a0d2d 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -55,9 +55,10 @@ def make_output_event(self, e): 'version': e.get('version'), 'variation': e.get('variation'), 'value': e.get('value'), - 'default': e.get('default'), - 'prereqOf': e.get('prereqOf') + 'default': e.get('default') } + if 'prereqOf' in e: + out['prereqOf'] = e.get('prereqOf') if self._inline_users or is_debug: out['user'] = self._process_user(e) else: diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 0946b583..363d980e 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -128,21 +128,22 @@ def test_individual_feature_event_is_queued_with_index_event(): output = flush_and_get_events(ep) assert len(output) == 3 check_index_event(output[0], e, user) - check_feature_event(output[1], e, False, None) + check_feature_event(output[1], e, False, None, None) check_summary_event(output[2]) def test_user_is_filtered_in_index_event(): with DefaultTestProcessor(all_attributes_private = True) as ep: e = { 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True + 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True, + 'prereqOf': 'prereqFlagKey' } ep.send_event(e) output = flush_and_get_events(ep) assert len(output) == 3 check_index_event(output[0], e, filtered_user) - check_feature_event(output[1], e, False, None) + check_feature_event(output[1], e, False, None, 'prereqFlagKey') check_summary_event(output[2]) def test_user_attrs_are_stringified_in_index_event(): @@ -156,7 +157,7 @@ def test_user_attrs_are_stringified_in_index_event(): output = flush_and_get_events(ep) assert len(output) == 3 check_index_event(output[0], e, stringified_numeric_user) - check_feature_event(output[1], e, False, None) + check_feature_event(output[1], e, False, None, None) check_summary_event(output[2]) def test_feature_event_can_contain_inline_user(): @@ -169,7 +170,7 @@ def test_feature_event_can_contain_inline_user(): output = flush_and_get_events(ep) assert len(output) == 2 - check_feature_event(output[0], e, False, user) + check_feature_event(output[0], e, False, user, None) check_summary_event(output[1]) def test_user_is_filtered_in_feature_event(): @@ -182,7 +183,7 @@ def test_user_is_filtered_in_feature_event(): output = flush_and_get_events(ep) assert len(output) == 2 - check_feature_event(output[0], e, False, filtered_user) + check_feature_event(output[0], e, False, filtered_user, None) check_summary_event(output[1]) def test_user_attrs_are_stringified_in_feature_event(): @@ -195,7 +196,7 @@ def test_user_attrs_are_stringified_in_feature_event(): output = flush_and_get_events(ep) assert len(output) == 2 - check_feature_event(output[0], e, False, stringified_numeric_user) + check_feature_event(output[0], e, False, stringified_numeric_user, None) check_summary_event(output[1]) def test_index_event_is_still_generated_if_inline_users_is_true_but_feature_event_is_not_tracked(): @@ -224,8 +225,8 @@ def test_two_events_for_same_user_only_produce_one_index_event(): output = flush_and_get_events(ep) assert len(output) == 4 check_index_event(output[0], e0, user) - check_feature_event(output[1], e0, False, None) - check_feature_event(output[2], e1, False, None) + check_feature_event(output[1], e0, False, None, None) + check_feature_event(output[2], e1, False, None, None) check_summary_event(output[3]) def test_new_index_event_is_added_if_user_cache_has_been_cleared(): @@ -242,9 +243,9 @@ def test_new_index_event_is_added_if_user_cache_has_been_cleared(): output = flush_and_get_events(ep) assert len(output) == 5 check_index_event(output[0], e0, user) - check_feature_event(output[1], e0, False, None) + check_feature_event(output[1], e0, False, None, None) check_index_event(output[2], e1, user) - check_feature_event(output[3], e1, False, None) + check_feature_event(output[3], e1, False, None, None) check_summary_event(output[4]) def test_event_kind_is_debug_if_flag_is_temporarily_in_debug_mode(): @@ -260,7 +261,7 @@ def test_event_kind_is_debug_if_flag_is_temporarily_in_debug_mode(): output = flush_and_get_events(ep) assert len(output) == 3 check_index_event(output[0], e, user) - check_feature_event(output[1], e, True, user) + check_feature_event(output[1], e, True, user, None) check_summary_event(output[2]) def test_event_can_be_both_tracked_and_debugged(): @@ -276,8 +277,8 @@ def test_event_can_be_both_tracked_and_debugged(): output = flush_and_get_events(ep) assert len(output) == 4 check_index_event(output[0], e, user) - check_feature_event(output[1], e, False, None) - check_feature_event(output[2], e, True, user) + check_feature_event(output[1], e, False, None, None) + check_feature_event(output[2], e, True, user, None) check_summary_event(output[3]) def test_debug_mode_does_not_expire_if_both_client_time_and_server_time_are_before_expiration_time(): @@ -304,7 +305,7 @@ def test_debug_mode_does_not_expire_if_both_client_time_and_server_time_are_befo output = flush_and_get_events(ep) assert len(output) == 3 check_index_event(output[0], e, user) - check_feature_event(output[1], e, True, user) # debug event + check_feature_event(output[1], e, True, user, None) # debug event check_summary_event(output[2]) def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_server_time(): @@ -660,7 +661,7 @@ def check_index_event(data, source, user): assert data['creationDate'] == source['creationDate'] assert data['user'] == user -def check_feature_event(data, source, debug, inline_user): +def check_feature_event(data, source, debug, inline_user, prereq_of): assert data['kind'] == ('debug' if debug else 'feature') assert data['creationDate'] == source['creationDate'] assert data['key'] == source['key'] @@ -672,6 +673,10 @@ def check_feature_event(data, source, debug, inline_user): assert data['userKey'] == str(source['user']['key']) else: assert data['user'] == inline_user + if prereq_of is None: + assert "prereqOf" not in data + else: + assert data['prereqOf'] == prereq_of def check_custom_event(data, source, inline_user): assert data['kind'] == 'custom' From e22d5eec0aa37988d2e38bb17f1a49cc92c1244e Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Thu, 3 Feb 2022 12:36:48 -0500 Subject: [PATCH 273/356] Account for traffic allocation on all flags (#171) --- ldclient/client.py | 18 +++++++++-- ldclient/evaluation.py | 47 ++++++++++++++++++----------- ldclient/impl/event_factory.py | 5 +-- testing/test_flags_state.py | 36 +++++++++++----------- testing/test_ldclient_evaluation.py | 19 +++++++++--- 5 files changed, 79 insertions(+), 46 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index b42ea7cb..86a45e06 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -395,13 +395,25 @@ def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: continue try: detail = self._evaluator.evaluate(flag, user, self._event_factory_default).detail - state.add_flag(flag, detail.value, detail.variation_index, - detail.reason if with_reasons else None, details_only_if_tracked) except Exception as e: log.error("Error evaluating flag \"%s\" in all_flags_state: %s" % (key, repr(e))) log.debug(traceback.format_exc()) reason = {'kind': 'ERROR', 'errorKind': 'EXCEPTION'} - state.add_flag(flag, None, None, reason if with_reasons else None, details_only_if_tracked) + detail = EvaluationDetail(None, None, reason) + + requires_experiment_data = _EventFactory.is_experiment(flag, detail.reason) + flag_state = { + 'key': flag['key'], + 'value': detail.value, + 'variation': detail.variation_index, + 'reason': detail.reason, + 'version': flag['version'], + 'trackEvents': flag['trackEvents'] or requires_experiment_data, + 'trackReason': requires_experiment_data, + 'debugEventsUntilDate': flag.get('debugEventsUntilDate', None), + } + + state.add_flag(flag_state, with_reasons, details_only_if_tracked) return state diff --git a/ldclient/evaluation.py b/ldclient/evaluation.py index 5dd811d1..6bc786cf 100644 --- a/ldclient/evaluation.py +++ b/ldclient/evaluation.py @@ -119,25 +119,36 @@ def __init__(self, valid: bool): self.__valid = valid # Used internally to build the state map - def add_flag(self, flag, value, variation, reason, details_only_if_tracked): - key = flag['key'] - self.__flag_values[key] = value - meta = {'version': flag.get('version')} - with_details = (not details_only_if_tracked) or flag.get('trackEvents') - if not with_details: - if flag.get('debugEventsUntilDate'): - now = int(time.time() * 1000) - with_details = (flag.get('debugEventsUntilDate') > now) - - if with_details: - if reason is not None: - meta['reason'] = reason - if variation is not None: - meta['variation'] = variation - if flag.get('trackEvents'): + def add_flag(self, flag_state, with_reasons, details_only_if_tracked): + key = flag_state['key'] + self.__flag_values[key] = flag_state['value'] + meta = {} + + trackEvents = flag_state.get('trackEvents', False) + trackReason = flag_state.get('trackReason', False) + + omit_details = False + if details_only_if_tracked: + now = int(time.time() * 1000) + if not trackEvents and not trackReason and not (flag_state.get('debugEventsUntilDate') is not None and flag_state['debugEventsUntilDate'] > now): + omit_details = True + + reason = None if not with_reasons and not trackReason else flag_state['reason'] + + if reason is not None and not omit_details: + meta['reason'] = reason + + if not omit_details: + meta['version'] = flag_state['version'] + + if flag_state['variation'] is not None: + meta['variation'] = flag_state['variation'] + if trackEvents: meta['trackEvents'] = True - if flag.get('debugEventsUntilDate') is not None: - meta['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') + if trackReason: + meta['trackReason'] = True + if flag_state.get('debugEventsUntilDate') is not None: + meta['debugEventsUntilDate'] = flag_state.get('debugEventsUntilDate') self.__flag_metadata[key] = meta @property diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index 062c9d02..12823bed 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -11,7 +11,7 @@ def __init__(self, with_reasons): self._with_reasons = with_reasons def new_eval_event(self, flag, user, detail, default_value, prereq_of_flag = None): - add_experiment_data = self._is_experiment(flag, detail.reason) + add_experiment_data = self.is_experiment(flag, detail.reason) e = { 'kind': 'feature', 'key': flag.get('key'), @@ -104,7 +104,8 @@ def _user_to_context_kind(self, user): else: return "user" - def _is_experiment(self, flag, reason): + @staticmethod + def is_experiment(flag, reason): if reason is not None: if reason.get('inExperiment'): return True diff --git a/testing/test_flags_state.py b/testing/test_flags_state.py index f8e6d464..1acdbaf8 100644 --- a/testing/test_flags_state.py +++ b/testing/test_flags_state.py @@ -5,8 +5,8 @@ def test_can_get_flag_value(): state = FeatureFlagsState(True) - flag = { 'key': 'key' } - state.add_flag(flag, 'value', 1, None, False) + flag_state = { 'key': 'key', 'version': 100, 'value': 'value', 'variation': 1, 'reason': None } + state.add_flag(flag_state, False, False) assert state.get_flag_value('key') == 'value' def test_returns_none_for_unknown_flag(): @@ -15,18 +15,18 @@ def test_returns_none_for_unknown_flag(): def test_can_convert_to_values_map(): state = FeatureFlagsState(True) - flag1 = { 'key': 'key1' } - flag2 = { 'key': 'key2' } - state.add_flag(flag1, 'value1', 0, None, False) - state.add_flag(flag2, 'value2', 1, None, False) + flag_state1 = { 'key': 'key1', 'version': 100, 'value': 'value1', 'variation': 0, 'reason': None } + flag_state2 = { 'key': 'key2', 'version': 200, 'value': 'value2', 'variation': 1, 'reason': None } + state.add_flag(flag_state1, False, False) + state.add_flag(flag_state2, False, False) assert state.to_values_map() == { 'key1': 'value1', 'key2': 'value2' } def test_can_convert_to_json_dict(): state = FeatureFlagsState(True) - flag1 = { 'key': 'key1', 'version': 100, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } - flag2 = { 'key': 'key2', 'version': 200, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True, 'debugEventsUntilDate': 1000 } - state.add_flag(flag1, 'value1', 0, None, False) - state.add_flag(flag2, 'value2', 1, None, False) + flag_state1 = { 'key': 'key1', 'version': 100, 'trackEvents': False, 'value': 'value1', 'variation': 0, 'reason': None } + flag_state2 = { 'key': 'key2', 'version': 200, 'trackEvents': True, 'debugEventsUntilDate': 1000, 'value': 'value2', 'variation': 1, 'reason': None } + state.add_flag(flag_state1, False, False) + state.add_flag(flag_state2, False, False) result = state.to_json_dict() assert result == { @@ -49,10 +49,10 @@ def test_can_convert_to_json_dict(): def test_can_convert_to_json_string(): state = FeatureFlagsState(True) - flag1 = { 'key': 'key1', 'version': 100, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } - flag2 = { 'key': 'key2', 'version': 200, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True, 'debugEventsUntilDate': 1000 } - state.add_flag(flag1, 'value1', 0, None, False) - state.add_flag(flag2, 'value2', 1, None, False) + flag_state1 = { 'key': 'key1', 'version': 100, 'trackEvents': False, 'value': 'value1', 'variation': 0, 'reason': None } + flag_state2 = { 'key': 'key2', 'version': 200, 'trackEvents': True, 'debugEventsUntilDate': 1000, 'value': 'value2', 'variation': 1, 'reason': None } + state.add_flag(flag_state1, False, False) + state.add_flag(flag_state2, False, False) obj = state.to_json_dict() str = state.to_json_string() @@ -62,10 +62,10 @@ def test_can_convert_to_json_string(): # behave correctly in case the application uses jsonpickle to serialize it. def test_can_serialize_with_jsonpickle(): state = FeatureFlagsState(True) - flag1 = { 'key': 'key1', 'version': 100, 'offVariation': 0, 'variations': [ 'value1' ], 'trackEvents': False } - flag2 = { 'key': 'key2', 'version': 200, 'offVariation': 1, 'variations': [ 'x', 'value2' ], 'trackEvents': True, 'debugEventsUntilDate': 1000 } - state.add_flag(flag1, 'value1', 0, None, False) - state.add_flag(flag2, 'value2', 1, None, False) + flag_state1 = { 'key': 'key1', 'version': 100, 'trackEvents': False, 'value': 'value1', 'variation': 0, 'reason': None } + flag_state2 = { 'key': 'key2', 'version': 200, 'trackEvents': True, 'debugEventsUntilDate': 1000, 'value': 'value2', 'variation': 1, 'reason': None } + state.add_flag(flag_state1, False, False) + state.add_flag(flag_state2, False, False) obj = state.to_json_dict() str = jsonpickle.encode(state, unpicklable=False) diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 3ce87e11..5ab4ed76 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -279,28 +279,37 @@ def test_all_flags_state_can_be_filtered_for_client_side_flags(): 'on': False, 'offVariation': 0, 'variations': [ 'a' ], - 'clientSide': False + 'clientSide': False, + 'version': 100, + 'trackEvents': False } flag2 = { 'key': 'server-side-2', 'on': False, 'offVariation': 0, 'variations': [ 'b' ], - 'clientSide': False + 'clientSide': False, + 'version': 200, + 'trackEvents': False } flag3 = { 'key': 'client-side-1', 'on': False, 'offVariation': 0, 'variations': [ 'value1' ], - 'clientSide': True + 'trackEvents': False, + 'clientSide': True, + 'version': 300, + 'trackEvents': False } flag4 = { 'key': 'client-side-2', 'on': False, 'offVariation': 0, 'variations': [ 'value2' ], - 'clientSide': True + 'clientSide': True, + 'version': 400, + 'trackEvents': False } store = InMemoryFeatureStore() @@ -336,6 +345,7 @@ def test_all_flags_state_can_omit_details_for_untracked_flags(): 'on': False, 'offVariation': 1, 'variations': [ 'x', 'value3' ], + 'trackEvents': False, 'debugEventsUntilDate': future_time } store = InMemoryFeatureStore() @@ -351,7 +361,6 @@ def test_all_flags_state_can_omit_details_for_untracked_flags(): '$flagsState': { 'key1': { 'variation': 0, - 'version': 100 }, 'key2': { 'variation': 1, From 3e1613ea91f44fcaffeb53162c8d6d992d6e4b49 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Thu, 3 Feb 2022 14:09:59 -0500 Subject: [PATCH 274/356] Add SDK contract tests (#170) --- .circleci/config.yml | 13 ++- Makefile | 23 +++++ contract-tests/README.md | 7 ++ contract-tests/client_entity.py | 86 +++++++++++++++++ contract-tests/requirements.txt | 2 + contract-tests/service.py | 142 ++++++++++++++++++++++++++++ testing/test_ldclient_evaluation.py | 12 +-- 7 files changed, 277 insertions(+), 8 deletions(-) create mode 100644 Makefile create mode 100644 contract-tests/README.md create mode 100644 contract-tests/client_entity.py create mode 100644 contract-tests/requirements.txt create mode 100644 contract-tests/service.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 345713f5..f4cf6591 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -95,7 +95,7 @@ jobs: export PATH="/home/circleci/.local/bin:$PATH" mypy --install-types --non-interactive ldclient testing mypy --config-file mypy.ini ldclient testing - + - unless: condition: <> steps: @@ -109,12 +109,21 @@ jobs: - run: name: run SSE contract tests command: cd sse-contract-tests && make run-contract-tests - + + - run: make build-contract-tests + - run: + command: make start-contract-test-service + background: true + - run: + name: run contract tests + command: TEST_HARNESS_PARAMS="-junit test-reports/contract-tests-junit.xml" make run-contract-tests + - store_test_results: path: test-reports - store_artifacts: path: test-reports + test-windows: executor: name: win/vs2019 diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..730218e3 --- /dev/null +++ b/Makefile @@ -0,0 +1,23 @@ +TEMP_TEST_OUTPUT=/tmp/contract-test-service.log + +# port 8000 and 9000 is already used in the CI environment because we're +# running a DynamoDB container and an SSE contract test +PORT=10000 + +build-contract-tests: + @cd contract-tests && pip install -r requirements.txt + +start-contract-test-service: + @cd contract-tests && python service.py $(PORT) + +start-contract-test-service-bg: + @echo "Test service output will be captured in $(TEMP_TEST_OUTPUT)" + @make start-contract-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & + +run-contract-tests: + @curl -s https://raw.githubusercontent.com/launchdarkly/sdk-test-harness/v1.0.0/downloader/run.sh \ + | VERSION=v1 PARAMS="-url http://localhost:$(PORT) -debug -stop-service-at-end $(TEST_HARNESS_PARAMS)" sh + +contract-tests: build-contract-tests start-contract-test-service-bg run-contract-tests + +.PHONY: build-contract-tests start-contract-test-service run-contract-tests contract-tests diff --git a/contract-tests/README.md b/contract-tests/README.md new file mode 100644 index 00000000..aa3942b8 --- /dev/null +++ b/contract-tests/README.md @@ -0,0 +1,7 @@ +# SDK contract test service + +This directory contains an implementation of the cross-platform SDK testing protocol defined by https://github.com/launchdarkly/sdk-test-harness. See that project's `README` for details of this protocol, and the kinds of SDK capabilities that are relevant to the contract tests. This code should not need to be updated unless the SDK has added or removed such capabilities. + +To run these tests locally, run `make contract-tests` from the SDK project root directory. This downloads the correct version of the test harness tool automatically. + +Or, to test against an in-progress local version of the test harness, run `make start-contract-test-service` from the SDK project root directory; then, in the root directory of the `sdk-test-harness` project, build the test harness and run it from the command line. diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py new file mode 100644 index 00000000..f3bf22fc --- /dev/null +++ b/contract-tests/client_entity.py @@ -0,0 +1,86 @@ +import logging +import os +import sys + +# Import ldclient from parent directory +sys.path.insert(1, os.path.join(sys.path[0], '..')) +from ldclient import * + +def millis_to_seconds(t): + return None if t is None else t / 1000 + + +class ClientEntity: + def __init__(self, tag, config): + self.log = logging.getLogger(tag) + opts = {"sdk_key": config["credential"]} + + if "streaming" in config: + streaming = config["streaming"] + if "baseUri" in streaming: + opts["stream_uri"] = streaming["baseUri"] + if streaming.get("initialRetryDelayMs") is not None: + opts["initial_reconnect_delay"] = streaming["initialRetryDelayMs"] / 1000.0 + + if "events" in config: + events = config["events"] + if "baseUri" in events: + opts["events_uri"] = events["baseUri"] + if events.get("capacity", None) is not None: + opts["events_max_pending"] = events["capacity"] + opts["diagnostic_opt_out"] = not events.get("enableDiagnostics", False) + opts["all_attributes_private"] = events.get("allAttributesPrivate", False) + opts["private_attribute_names"] = events.get("globalPrivateAttributes", {}) + if "flushIntervalMs" in events: + opts["flush_interval"] = events["flushIntervalMs"] / 1000.0 + if "inlineUsers" in events: + opts["inline_users_in_events"] = events["inlineUsers"] + else: + opts["send_events"] = False + + start_wait = config.get("startWaitTimeMs", 5000) + config = Config(**opts) + + self.client = client.LDClient(config, start_wait / 1000.0) + + def is_initializing(self) -> bool: + return self.client.is_initialized() + + def evaluate(self, params) -> dict: + response = {} + + if params.get("detail", False): + detail = self.client.variation_detail(params["flagKey"], params["user"], params["defaultValue"]) + response["value"] = detail.value + response["variationIndex"] = detail.variation_index + response["reason"] = detail.reason + else: + response["value"] = self.client.variation(params["flagKey"], params["user"], params["defaultValue"]) + + return response + + def evaluate_all(self, params): + opts = {} + opts["client_side_only"] = params.get("clientSideOnly", False) + opts["with_reasons"] = params.get("withReasons", False) + opts["details_only_for_tracked_flags"] = params.get("detailsOnlyForTrackedFlags", False) + + state = self.client.all_flags_state(params["user"], **opts) + + return {"state": state.to_json_dict()} + + def track(self, params): + self.client.track(params["eventKey"], params["user"], params["data"], params.get("metricValue", None)) + + def identify(self, params): + self.client.identify(params["user"]) + + def alias(self, params): + self.client.alias(params["user"], params["previousUser"]) + + def flush(self): + self.client.flush() + + def close(self): + self.client.close() + self.log.info('Test ended') diff --git a/contract-tests/requirements.txt b/contract-tests/requirements.txt new file mode 100644 index 00000000..f55a4204 --- /dev/null +++ b/contract-tests/requirements.txt @@ -0,0 +1,2 @@ +Flask==1.1.4 +urllib3>=1.22.0 diff --git a/contract-tests/service.py b/contract-tests/service.py new file mode 100644 index 00000000..b4728867 --- /dev/null +++ b/contract-tests/service.py @@ -0,0 +1,142 @@ +from client_entity import ClientEntity + +import json +import logging +import os +import sys +from flask import Flask, request, jsonify +from flask.logging import default_handler +from logging.config import dictConfig +from werkzeug.exceptions import HTTPException + + +default_port = 8000 + +# logging configuration +dictConfig({ + 'version': 1, + 'formatters': { + 'default': { + 'format': '[%(asctime)s] [%(name)s] %(levelname)s: %(message)s', + } + }, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'default' + } + }, + 'root': { + 'level': 'INFO', + 'handlers': ['console'] + }, + 'ldclient.util': { + 'level': 'INFO', + 'handlers': ['console'] + }, + 'loggers': { + 'werkzeug': { 'level': 'ERROR' } # disable irrelevant Flask app logging + } +}) + +app = Flask(__name__) +app.logger.removeHandler(default_handler) + +client_counter = 0 +clients = {} +global_log = logging.getLogger('testservice') + + +@app.errorhandler(Exception) +def handle_exception(e): + # pass through HTTP errors + if isinstance(e, HTTPException): + return e + + return str(e), 500 + +@app.route('/', methods=['GET']) +def status(): + body = { + 'capabilities': [ + 'server-side', + 'all-flags-with-reasons', + 'all-flags-client-side-only', + 'all-flags-details-only-for-tracked-flags', + ] + } + return (json.dumps(body), 200, {'Content-type': 'application/json'}) + +@app.route('/', methods=['DELETE']) +def delete_stop_service(): + global_log.info("Test service has told us to exit") + os._exit(0) + +@app.route('/', methods=['POST']) +def post_create_client(): + global client_counter, clients + + options = request.get_json() + + client_counter += 1 + client_id = str(client_counter) + resource_url = '/clients/%s' % client_id + + client = ClientEntity(options['tag'], options['configuration']) + + if client.is_initializing() is False and options['configuration'].get('initCanFail', False) is False: + client.close() + return ("Failed to initialize", 500) + + clients[client_id] = client + return ('', 201, {'Location': resource_url}) + + +@app.route('/clients/', methods=['POST']) +def post_client_command(id): + global clients + + params = request.get_json() + + client = clients[id] + if client is None: + return ('', 404) + + if params.get('command') == "evaluate": + response = client.evaluate(params.get("evaluate")) + return (json.dumps(response), 200) + elif params.get("command") == "evaluateAll": + response = client.evaluate_all(params.get("evaluateAll")) + return (json.dumps(response), 200) + elif params.get("command") == "customEvent": + client.track(params.get("customEvent")) + return ('', 201) + elif params.get("command") == "identifyEvent": + client.identify(params.get("identifyEvent")) + return ('', 201) + elif params.get("command") == "aliasEvent": + client.alias(params.get("aliasEvent")) + return ('', 201) + elif params.get('command') == "flushEvents": + client.flush() + return ('', 201) + + return ('', 400) + +@app.route('/clients/', methods=['DELETE']) +def delete_client(id): + global clients + + client = clients[id] + if client is None: + return ('', 404) + + client.close() + return ('', 204) + +if __name__ == "__main__": + port = default_port + if sys.argv[len(sys.argv) - 1] != 'service.py': + port = int(sys.argv[len(sys.argv) - 1]) + global_log.info('Listening on port %d', port) + app.run(host='0.0.0.0', port=port) diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 5ab4ed76..faa3f5b6 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -38,19 +38,19 @@ class ErroringFeatureStore(FeatureStore): def get(self, kind, key, callback=lambda x: x): raise NotImplementedError() - + def all(self, kind, callback=lambda x: x): raise NotImplementedError() - + def upsert(self, kind, item): pass - + def delete(self, key, version): pass - + def init(self, data): pass - + @property def initialized(self): return True @@ -360,7 +360,7 @@ def test_all_flags_state_can_omit_details_for_untracked_flags(): 'key3': 'value3', '$flagsState': { 'key1': { - 'variation': 0, + 'variation': 0 }, 'key2': { 'variation': 1, From 175697b929bad00ddfe16bd29789d8d7939ef111 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 14 Feb 2022 10:29:00 -0800 Subject: [PATCH 275/356] misc fixes to test data docs + add type hints --- docs/api-testing.rst | 6 +- ldclient/integrations/test_data.py | 136 ++++++++++++++--------------- testing/test_test_data_source.py | 3 +- 3 files changed, 70 insertions(+), 75 deletions(-) diff --git a/docs/api-testing.rst b/docs/api-testing.rst index 4e765228..c9faee05 100644 --- a/docs/api-testing.rst +++ b/docs/api-testing.rst @@ -1,9 +1,11 @@ -Testing Integrations -==================== +Test fixtures +============= ldclient.integrations.test_data module -------------------------------------- +The entry point for this feature is :class:`ldclient.integrations.test_data.TestData`. + .. automodule:: ldclient.integrations.test_data :members: :special-members: __init__ diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 3073c67d..2c1fa06a 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -18,16 +18,17 @@ class TestData(): Unlike ``Files``, this mechanism does not use any external resources. It provides only the data that the application has put into it using the ``update`` method. - :: + td = TestData.data_source() td.update(td.flag('flag-key-1').variation_for_all_users(True)) client = LDClient(config=Config('SDK_KEY', update_processor_class = td)) # flags can be updated at any time: - td.update(td.flag('flag-key-1').variation_for_user('some-user-key', True) \ - .fallthrough_variation(False)) + td.update(td.flag('flag-key-1'). \\ + variation_for_user('some-user-key', True). \\ + fallthrough_variation(False)) The above example uses a simple boolean flag, but more complex configurations are possible using the methods of the ``FlagBuilder`` that is returned by ``flag``. ``FlagBuilder`` @@ -38,7 +39,6 @@ class TestData(): any changes made to the data will propagate to all of the `LDClient` instances. """ - # Prevent pytest from treating this as a test class __test__ = False @@ -58,17 +58,15 @@ def __call__(self, config, store, ready): return data_source - @staticmethod - def data_source(): + def data_source() -> 'TestData': """Creates a new instance of the test data source. :return: a new configurable test data source """ return TestData() - - def flag(self, key): + def flag(self, key: str) -> 'FlagBuilder': """Creates or copies a ``FlagBuilder`` for building a test flag configuration. If this flag key has already been defined in this ``TestData`` instance, then the builder @@ -93,7 +91,7 @@ def flag(self, key): finally: self._lock.runlock() - def update(self, flag_builder): + def update(self, flag_builder: 'FlagBuilder') -> 'TestData': """Updates the test data with the specified flag configuration. This has the same effect as if a flag were added or modified on the LaunchDarkly dashboard. @@ -129,8 +127,7 @@ def update(self, flag_builder): return self - - def _make_init_data(self): + def _make_init_data(self) -> dict: return { FEATURES: copy.copy(self._current_flags) } def _closed_instance(self, instance): @@ -157,10 +154,9 @@ def __init__(self, key): self._targets = {} self._rules = [] - - def copy(self): + def copy(self) -> 'FlagBuilder': """Creates a deep copy of the flag builder. Subsequent updates to the - original ``FlagBuilder`` object will not update the copy and vise versa. + original ``FlagBuilder`` object will not update the copy and vise versa. :return: a copy of the flag builder object """ @@ -176,7 +172,7 @@ def copy(self): return to - def on(self, on): + def on(self, on: bool) -> 'FlagBuilder': """Sets targeting to be on or off for this flag. The effect of this depends on the rest of the flag configuration, just as it does on the @@ -185,13 +181,13 @@ def on(self, on): the flag will return ``False`` whenever targeting is off, and ``True`` when targeting is on. - :param bool on: ``True`` if targeting should be on + :param on: ``True`` if targeting should be on :return: the flag builder """ self._on = on return self - def fallthrough_variation(self, variation): + def fallthrough_variation(self, variation: bool|int) -> 'FlagBuilder': """Specifies the fallthrough variation. The fallthrough is the value that is returned if targeting is on and the user was not matched by a more specific target or rule. @@ -199,8 +195,8 @@ def fallthrough_variation(self, variation): If the flag was previously configured with other variations and the variation specified is a boolean, this also changes it to a boolean flag. - :param bool/int variation: ``True`` or ``False`` or the desired fallthrough variation index: - ``0`` for the first, ``1`` for the second, etc. + :param bool|int variation: ``True`` or ``False`` or the desired fallthrough variation index: + ``0`` for the first, ``1`` for the second, etc. :return: the flag builder """ if isinstance(variation, bool): @@ -210,15 +206,15 @@ def fallthrough_variation(self, variation): self._fallthrough_variation = variation return self - def off_variation(self, variation) : + def off_variation(self, variation: bool|int) -> 'FlagBuilder' : """Specifies the fallthrough variation. This is the variation that is returned whenever targeting is off. If the flag was previously configured with other variations and the variation specified is a boolean, this also changes it to a boolean flag. - :param bool/int variation: ``True`` or ``False`` or the desired off variation index: - ``0`` for the first, ``1`` for the second, etc. + :param bool|int variation: ``True`` or ``False`` or the desired off variation index: + ``0`` for the first, ``1`` for the second, etc. :return: the flag builder """ if isinstance(variation, bool): @@ -228,7 +224,7 @@ def off_variation(self, variation) : self._off_variation = variation return self - def boolean_flag(self): + def boolean_flag(self) -> 'FlagBuilder': """A shortcut for setting the flag to use the standard boolean configuration. This is the default for all new flags created with @@ -252,7 +248,7 @@ def _is_boolean_flag(self): and self._variations[TRUE_VARIATION_INDEX] == True and self._variations[FALSE_VARIATION_INDEX] == False) - def variations(self, *variations): + def variations(self, *variations) -> 'FlagBuilder': """Changes the allowable variation values for the flag. The value may be of any valid JSON type. For instance, a boolean flag @@ -260,16 +256,14 @@ def variations(self, *variations): ``'red', 'green'``; etc. **Example:** A single variation - :: - td.flag('new-flag') \ - .variations(True) - **Example:** Multiple variations + td.flag('new-flag').variations(True) + **Example:** Multiple variations :: - td.flag('new-flag') \ - .variations('red', 'green', 'blue') + + td.flag('new-flag').variations('red', 'green', 'blue') :param variations: the the desired variations :return: the flag builder @@ -278,8 +272,7 @@ def variations(self, *variations): return self - - def variation_for_all_users(self, variation): + def variation_for_all_users(self, variation: bool|int) -> 'FlagBuilder': """Sets the flag to always return the specified variation for all users. The variation is specified, Targeting is switched on, and any existing targets or rules are removed. @@ -288,8 +281,8 @@ def variation_for_all_users(self, variation): If the flag was previously configured with other variations and the variation specified is a boolean, this also changes it to a boolean flag. - :param bool/int variation: ``True`` or ``False`` or the desired variation index to return: - ``0`` for the first, ``1`` for the second, etc. + :param bool|int variation: ``True`` or ``False`` or the desired variation index to return: + ``0`` for the first, ``1`` for the second, etc. :return: the flag builder """ if isinstance(variation, bool): @@ -297,7 +290,7 @@ def variation_for_all_users(self, variation): else: return self.clear_rules().clear_targets().on(True).fallthrough_variation(variation) - def value_for_all_users(self, value): + def value_for_all_users(self, value) -> 'FlagBuilder': """ Sets the flag to always return the specified variation value for all users. @@ -311,7 +304,7 @@ def value_for_all_users(self, value): """ return self.variations(value).variation_for_all_users(0) - def variation_for_user(self, user_key, variation): + def variation_for_user(self, user_key: str, variation: bool|int) -> 'FlagBuilder': """Sets the flag to return the specified variation for a specific user key when targeting is on. @@ -320,9 +313,9 @@ def variation_for_user(self, user_key, variation): If the flag was previously configured with other variations and the variation specified is a boolean, this also changes it to a boolean flag. - :param str user_key: a user key - :param bool/int variation: ``True`` or ``False`` or the desired variation index to return: - ``0`` for the first, ``1`` for the second, etc. + :param user_key: a user key + :param bool|int variation: ``True`` or ``False`` or the desired variation index to return: + ``0`` for the first, ``1`` for the second, etc. :return: the flag builder """ if isinstance(variation, bool): @@ -360,41 +353,41 @@ def variation_for_user(self, user_key, variation): def _add_rule(self, flag_rule_builder): self._rules.append(flag_rule_builder) - def if_match(self, attribute, *values): + def if_match(self, attribute: str, *values) -> 'FlagBuilder': """Starts defining a flag rule, using the "is one of" operator. **Example:** create a rule that returns ``True`` if the name is "Patsy" or "Edina" - :: - td.flag("flag") \ - .if_match('name', 'Patsy', 'Edina') \ - .then_return(True) - :param str attribute: the user attribute to match against + td.flag("flag") \\ + .if_match('name', 'Patsy', 'Edina') \\ + .then_return(True) + + :param attribute: the user attribute to match against :param values: values to compare to :return: the flag rule builder """ flag_rule_builder = FlagRuleBuilder(self) return flag_rule_builder.and_match(attribute, *values) - def if_not_match(self, attribute, *values): + def if_not_match(self, attribute: str, *values) -> 'FlagBuilder': """Starts defining a flag rule, using the "is not one of" operator. **Example:** create a rule that returns ``True`` if the name is neither "Saffron" nor "Bubble" - :: - td.flag("flag") \ - .if_not_match('name', 'Saffron', 'Bubble') \ - .then_return(True) - :param str attribute: the user attribute to match against + td.flag("flag") \\ + .if_not_match('name', 'Saffron', 'Bubble') \\ + .then_return(True) + + :param attribute: the user attribute to match against :param values: values to compare to :return: the flag rule builder """ flag_rule_builder = FlagRuleBuilder(self) return flag_rule_builder.and_not_match(attribute, values) - def clear_rules(self): + def clear_rules(self) -> 'FlagBuilder': """Removes any existing rules from the flag. This undoes the effect of methods like :meth:`ldclient.integrations.test_data.FlagBuilder.if_match()` @@ -404,7 +397,7 @@ def clear_rules(self): self._rules = [] return self - def clear_targets(self): + def clear_targets(self) -> 'FlagBuilder': """Removes any existing targets from the flag. This undoes the effect of methods like :meth:`ldclient.integrations.test_data.FlagBuilder.variation_for_user()` @@ -414,11 +407,10 @@ def clear_targets(self): self._targets = {} return self - - def build(self, version): + def build(self, version: int) -> dict: """Creates a dictionary representation of the flag - :param int version: the version number of the rule + :param version: the version number of the rule :return: the dictionary representation of the flag """ base_flag_object = { @@ -470,18 +462,18 @@ def __init__(self, flag_builder): self._clauses = [] self._variation = None - def and_match(self, attribute, *values): + def and_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """Adds another clause, using the "is one of" operator. **Example:** create a rule that returns ``True`` if the name is "Patsy" and the country is "gb" - :: - td.flag('flag') \ - .if_match('name', 'Patsy') \ - .and_match('country', 'gb') \ + + td.flag('flag') \\ + .if_match('name', 'Patsy') \\ + .and_match('country', 'gb') \\ .then_return(True) - :param str attribute: the user attribute to match against + :param attribute: the user attribute to match against :param values: values to compare to :return: the flag rule builder """ @@ -493,18 +485,18 @@ def and_match(self, attribute, *values): }) return self - def and_not_match(self, attribute, *values): + def and_not_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """Adds another clause, using the "is not one of" operator. **Example:** create a rule that returns ``True`` if the name is "Patsy" and the country is not "gb" - :: - td.flag('flag') \ - .if_match('name', 'Patsy') \ - .and_not_match('country', 'gb') \ + + td.flag('flag') \\ + .if_match('name', 'Patsy') \\ + .and_not_match('country', 'gb') \\ .then_return(True) - :param str attribute: the user attribute to match against + :param attribute: the user attribute to match against :param values: values to compare to :return: the flag rule builder """ @@ -516,15 +508,15 @@ def and_not_match(self, attribute, *values): }) return self - def then_return(self, variation): + def then_return(self, variation: bool|int) -> 'FlagRuleBuilder': """Finishes defining the rule, specifying the result as either a boolean or a variation index. If the flag was previously configured with other variations and the variation specified is a boolean, this also changes it to a boolean flag. - :param bool/int variation: ``True`` or ``False`` or the desired variation index: - ``0`` for the first, ``1`` for the second, etc. + :param bool|int variation: ``True`` or ``False`` or the desired variation index: + ``0`` for the first, ``1`` for the second, etc. :return: the flag builder with this rule added """ if isinstance(variation, bool): @@ -535,7 +527,7 @@ def then_return(self, variation): self._flag_builder._add_rule(self) return self._flag_builder - def build(self, id): + def build(self, id: str) -> dict: """Creates a dictionary representation of the rule :param id: the rule id diff --git a/testing/test_test_data_source.py b/testing/test_test_data_source.py index 9f0a2e37..e2e426f7 100644 --- a/testing/test_test_data_source.py +++ b/testing/test_test_data_source.py @@ -189,7 +189,8 @@ def test_flagbuilder_can_make_boolean_flag(): def test_flagbuilder_can_set_variation_when_targeting_is_off(): td = TestData.data_source() - flag = td.flag('test-flag').on(False) + flag = td.flag('test-flag') \ + .on(False) assert flag.build(0)['on'] == False assert flag.build(0)['variations'] == [True,False] flag.variations('dog', 'cat') From 787c715883b20e9b0fd3939090a6f627bb5a134d Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 14 Feb 2022 10:33:44 -0800 Subject: [PATCH 276/356] more type hints --- ldclient/integrations/test_data.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 2c1fa06a..7bd532d4 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -1,4 +1,6 @@ import copy +from typing import Any + from ldclient.versioned_data_kind import FEATURES from ldclient.rwlock import ReadWriteLock from ldclient.impl.integrations.test_data.test_data_source import _TestDataSource @@ -143,7 +145,7 @@ class FlagBuilder(): :see: :meth:`ldclient.integrations.test_data.TestData.flag()` :see: :meth:`ldclient.integrations.test_data.TestData.update()` """ - def __init__(self, key): + def __init__(self, key: str): """:param str key: The name of the flag """ self._key = key @@ -290,7 +292,7 @@ def variation_for_all_users(self, variation: bool|int) -> 'FlagBuilder': else: return self.clear_rules().clear_targets().on(True).fallthrough_variation(variation) - def value_for_all_users(self, value) -> 'FlagBuilder': + def value_for_all_users(self, value: Any) -> 'FlagBuilder': """ Sets the flag to always return the specified variation value for all users. @@ -350,7 +352,7 @@ def variation_for_user(self, user_key: str, variation: bool|int) -> 'FlagBuilder return self - def _add_rule(self, flag_rule_builder): + def _add_rule(self, flag_rule_builder: 'FlagRuleBuilder'): self._rules.append(flag_rule_builder) def if_match(self, attribute: str, *values) -> 'FlagBuilder': @@ -457,7 +459,7 @@ class FlagRuleBuilder(): Finally, call :meth:`ldclient.integrations.test_data.FlagRuleBuilder.then_return()` to finish defining the rule. """ - def __init__(self, flag_builder): + def __init__(self, flag_builder: FlagBuilder): self._flag_builder = flag_builder self._clauses = [] self._variation = None From 65e733da0a853576141a75a39e52094bd972676c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 14 Feb 2022 10:42:43 -0800 Subject: [PATCH 277/356] remove some methods from the public test_data API --- ldclient/integrations/test_data.py | 20 ++++--- .../test_test_data_source.py | 53 +++++++++---------- 2 files changed, 39 insertions(+), 34 deletions(-) rename testing/{ => integrations}/test_test_data_source.py (86%) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 7bd532d4..08a4c793 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -87,7 +87,7 @@ def flag(self, key: str) -> 'FlagBuilder': try: self._lock.rlock() if key in self._flag_builders and self._flag_builders[key]: - return self._flag_builders[key].copy() + return self._flag_builders[key]._copy() else: return FlagBuilder(key).boolean_flag() finally: @@ -117,10 +117,10 @@ def update(self, flag_builder: 'FlagBuilder') -> 'TestData': if old_flag: old_version = old_flag['version'] - new_flag = flag_builder.build(old_version + 1) + new_flag = flag_builder._build(old_version + 1) self._current_flags[flag_builder._key] = new_flag - self._flag_builders[flag_builder._key] = flag_builder.copy() + self._flag_builders[flag_builder._key] = flag_builder._copy() finally: self._lock.unlock() @@ -156,7 +156,9 @@ def __init__(self, key: str): self._targets = {} self._rules = [] - def copy(self) -> 'FlagBuilder': + # Note that _copy is private by convention, because we don't want developers to + # consider it part of the public API, but it is still called from TestData. + def _copy(self) -> 'FlagBuilder': """Creates a deep copy of the flag builder. Subsequent updates to the original ``FlagBuilder`` object will not update the copy and vise versa. @@ -409,7 +411,9 @@ def clear_targets(self) -> 'FlagBuilder': self._targets = {} return self - def build(self, version: int) -> dict: + # Note that _build is private by convention, because we don't want developers to + # consider it part of the public API, but it is still called from TestData. + def _build(self, version: int) -> dict: """Creates a dictionary representation of the flag :param version: the version number of the rule @@ -437,7 +441,7 @@ def build(self, version: int) -> dict: base_flag_object['rules'] = [] for idx, rule in enumerate(self._rules): - base_flag_object['rules'].append(rule.build(idx)) + base_flag_object['rules'].append(rule._build(idx)) return base_flag_object @@ -529,7 +533,9 @@ def then_return(self, variation: bool|int) -> 'FlagRuleBuilder': self._flag_builder._add_rule(self) return self._flag_builder - def build(self, id: str) -> dict: + # Note that _build is private by convention, because we don't want developers to + # consider it part of the public API, but it is still called from FlagBuilder. + def _build(self, id: str) -> dict: """Creates a dictionary representation of the rule :param id: the rule id diff --git a/testing/test_test_data_source.py b/testing/integrations/test_test_data_source.py similarity index 86% rename from testing/test_test_data_source.py rename to testing/integrations/test_test_data_source.py index e2e426f7..e0db1208 100644 --- a/testing/test_test_data_source.py +++ b/testing/integrations/test_test_data_source.py @@ -9,7 +9,6 @@ from ldclient.integrations.test_data import TestData - ## Test Data + Data Source def test_makes_valid_datasource(): @@ -26,7 +25,7 @@ def test_makes_valid_datasource_with_flag(): flag = td.flag(key='test-flag') assert flag is not None - builtFlag = flag.build(0) + builtFlag = flag._build(0) assert builtFlag['key'] is 'test-flag' assert builtFlag['on'] is True assert builtFlag['variations'] == [True, False] @@ -40,7 +39,7 @@ def test_can_retrieve_flag_from_store(): client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store)) - assert store.get(FEATURES, 'some-flag') == td.flag('some-flag').build(1) + assert store.get(FEATURES, 'some-flag') == td.flag('some-flag')._build(1) client.close() @@ -53,7 +52,7 @@ def test_updates_to_flags_are_reflected_in_store(): td.update(td.flag('some-flag')) - assert store.get(FEATURES, 'some-flag') == td.flag('some-flag').build(1) + assert store.get(FEATURES, 'some-flag') == td.flag('some-flag')._build(1) client.close() @@ -146,44 +145,44 @@ def test_can_handle_multiple_clients(): def test_flagbuilder_defaults_to_boolean_flag(): td = TestData.data_source() flag = td.flag('empty-flag') - assert flag.build(0)['variations'] == [True, False] - assert flag.build(0)['fallthrough'] == {'variation': 0} - assert flag.build(0)['offVariation'] == 1 + assert flag._build(0)['variations'] == [True, False] + assert flag._build(0)['fallthrough'] == {'variation': 0} + assert flag._build(0)['offVariation'] == 1 def test_flagbuilder_can_turn_flag_off(): td = TestData.data_source() flag = td.flag('test-flag') flag.on(False) - assert flag.build(0)['on'] is False + assert flag._build(0)['on'] is False def test_flagbuilder_can_set_fallthrough_variation(): td = TestData.data_source() flag = td.flag('test-flag') flag.fallthrough_variation(2) - assert flag.build(0)['fallthrough'] == {'variation': 2} + assert flag._build(0)['fallthrough'] == {'variation': 2} flag.fallthrough_variation(True) - assert flag.build(0)['fallthrough'] == {'variation': 0} + assert flag._build(0)['fallthrough'] == {'variation': 0} def test_flagbuilder_can_set_off_variation(): td = TestData.data_source() flag = td.flag('test-flag') flag.off_variation(2) - assert flag.build(0)['offVariation'] == 2 + assert flag._build(0)['offVariation'] == 2 flag.off_variation(True) - assert flag.build(0)['offVariation'] == 0 + assert flag._build(0)['offVariation'] == 0 def test_flagbuilder_can_make_boolean_flag(): td = TestData.data_source() flag = td.flag('boolean-flag').boolean_flag() - builtFlag = flag.build(0) + builtFlag = flag._build(0) assert builtFlag['fallthrough'] == {'variation': 0} assert builtFlag['offVariation'] == 1 @@ -191,21 +190,21 @@ def test_flagbuilder_can_set_variation_when_targeting_is_off(): td = TestData.data_source() flag = td.flag('test-flag') \ .on(False) - assert flag.build(0)['on'] == False - assert flag.build(0)['variations'] == [True,False] + assert flag._build(0)['on'] == False + assert flag._build(0)['variations'] == [True,False] flag.variations('dog', 'cat') - assert flag.build(0)['variations'] == ['dog','cat'] + assert flag._build(0)['variations'] == ['dog','cat'] def test_flagbuilder_can_set_variation_for_all_users(): td = TestData.data_source() flag = td.flag('test-flag') flag.variation_for_all_users(True) - assert flag.build(0)['fallthrough'] == {'variation': 0} + assert flag._build(0)['fallthrough'] == {'variation': 0} def test_flagbuilder_clears_existing_rules_and_targets_when_setting_variation_for_all_users(): td = TestData.data_source() - flag = td.flag('test-flag').if_match('name', 'christian').then_return(False).variation_for_user('christian', False).variation_for_all_users(True).build(0) + flag = td.flag('test-flag').if_match('name', 'christian').then_return(False).variation_for_user('christian', False).variation_for_all_users(True)._build(0) assert flag['rules'] == [] assert flag['targets'] == [] @@ -214,18 +213,18 @@ def test_flagbuilder_can_set_variations(): td = TestData.data_source() flag = td.flag('test-flag') flag.variations(2,3,4,5) - assert flag.build(0)['variations'] == [2,3,4,5] + assert flag._build(0)['variations'] == [2,3,4,5] def test_flagbuilder_can_make_an_immutable_copy(): td = TestData.data_source() flag = td.flag('test-flag') flag.variations(1,2) - copy_of_flag = flag.copy() + copy_of_flag = flag._copy() flag.variations(3,4) - assert copy_of_flag.build(0)['variations'] == [1,2] + assert copy_of_flag._build(0)['variations'] == [1,2] copy_of_flag.variations(5,6) - assert flag.build(0)['variations'] == [3,4] + assert flag._build(0)['variations'] == [3,4] def test_flagbuilder_can_set_boolean_variation_for_user(): td = TestData.data_source() @@ -237,7 +236,7 @@ def test_flagbuilder_can_set_boolean_variation_for_user(): 'values': ['christian'] } ] - assert flag.build(0)['targets'] == expected_targets + assert flag._build(0)['targets'] == expected_targets def test_flagbuilder_can_set_numerical_variation_for_user(): td = TestData.data_source() @@ -250,20 +249,20 @@ def test_flagbuilder_can_set_numerical_variation_for_user(): 'values': ['christian'] } ] - assert flag.build(1)['targets'] == expected_targets + assert flag._build(1)['targets'] == expected_targets def test_flagbuilder_can_set_value_for_all_users(): td = TestData.data_source() flag = td.flag('user-value-flag') flag.variation_for_user('john', 1) - built_flag = flag.build(0) + built_flag = flag._build(0) assert built_flag['targets'] == [{'values': ['john'], 'variation': 1}] assert built_flag['variations'] == [True, False] flag.value_for_all_users('yes') - built_flag2 = flag.build(0) + built_flag2 = flag._build(0) assert built_flag2['targets'] == [] assert built_flag2['variations'] == ['yes'] @@ -297,4 +296,4 @@ def test_flagbuilder_can_build(): 'version': 1, } - assert flag.build(1) == expected_result + assert flag._build(1) == expected_result From 23f066c1ce739045e16992ffff9d897992dcbcaf Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 14 Feb 2022 10:48:39 -0800 Subject: [PATCH 278/356] can't use "x|y" shortcut in typehints in older Pythons; use Union --- ldclient/integrations/test_data.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 7bd532d4..895c24f0 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -1,5 +1,5 @@ import copy -from typing import Any +from typing import Any, Union from ldclient.versioned_data_kind import FEATURES from ldclient.rwlock import ReadWriteLock @@ -189,7 +189,7 @@ def on(self, on: bool) -> 'FlagBuilder': self._on = on return self - def fallthrough_variation(self, variation: bool|int) -> 'FlagBuilder': + def fallthrough_variation(self, variation: Union[bool, int]) -> 'FlagBuilder': """Specifies the fallthrough variation. The fallthrough is the value that is returned if targeting is on and the user was not matched by a more specific target or rule. @@ -208,7 +208,7 @@ def fallthrough_variation(self, variation: bool|int) -> 'FlagBuilder': self._fallthrough_variation = variation return self - def off_variation(self, variation: bool|int) -> 'FlagBuilder' : + def off_variation(self, variation: Union[bool, int]) -> 'FlagBuilder' : """Specifies the fallthrough variation. This is the variation that is returned whenever targeting is off. @@ -274,7 +274,7 @@ def variations(self, *variations) -> 'FlagBuilder': return self - def variation_for_all_users(self, variation: bool|int) -> 'FlagBuilder': + def variation_for_all_users(self, variation: Union[bool, int]) -> 'FlagBuilder': """Sets the flag to always return the specified variation for all users. The variation is specified, Targeting is switched on, and any existing targets or rules are removed. @@ -306,7 +306,7 @@ def value_for_all_users(self, value: Any) -> 'FlagBuilder': """ return self.variations(value).variation_for_all_users(0) - def variation_for_user(self, user_key: str, variation: bool|int) -> 'FlagBuilder': + def variation_for_user(self, user_key: str, variation: Union[bool, int]) -> 'FlagBuilder': """Sets the flag to return the specified variation for a specific user key when targeting is on. @@ -510,7 +510,7 @@ def and_not_match(self, attribute: str, *values) -> 'FlagRuleBuilder': }) return self - def then_return(self, variation: bool|int) -> 'FlagRuleBuilder': + def then_return(self, variation: Union[bool, int]) -> 'FlagRuleBuilder': """Finishes defining the rule, specifying the result as either a boolean or a variation index. From 063752a5006ed3839dc1268e048aa55a3ab42260 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 14 Feb 2022 10:55:14 -0800 Subject: [PATCH 279/356] fix misc type mistakes because I forgot to run the linter --- ldclient/integrations/test_data.py | 32 +++++++++++++++--------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 895c24f0..daf4e0f6 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -1,5 +1,5 @@ import copy -from typing import Any, Union +from typing import Any, Dict, List, Optional, Union from ldclient.versioned_data_kind import FEATURES from ldclient.rwlock import ReadWriteLock @@ -150,11 +150,11 @@ def __init__(self, key: str): """ self._key = key self._on = True - self._variations = [] - self._off_variation = None - self._fallthrough_variation = None - self._targets = {} - self._rules = [] + self._variations = [] # type: List[Any] + self._off_variation = None # type: Optional[int] + self._fallthrough_variation = None # type: Optional[int] + self._targets = {} # type: Dict[int, List[str]] + self._rules = [] # type: List[FlagRuleBuilder] def copy(self) -> 'FlagBuilder': """Creates a deep copy of the flag builder. Subsequent updates to the @@ -173,7 +173,6 @@ def copy(self) -> 'FlagBuilder': return to - def on(self, on: bool) -> 'FlagBuilder': """Sets targeting to be on or off for this flag. @@ -330,7 +329,7 @@ def variation_for_user(self, user_key: str, variation: Union[bool, int]) -> 'Fla for idx, var in enumerate(self._variations): if (idx == variation): # If there is no set at the current variation, set it to be empty - target_for_variation = [] + target_for_variation = [] # type: List[str] if idx in targets: target_for_variation = targets[idx] @@ -355,7 +354,7 @@ def variation_for_user(self, user_key: str, variation: Union[bool, int]) -> 'Fla def _add_rule(self, flag_rule_builder: 'FlagRuleBuilder'): self._rules.append(flag_rule_builder) - def if_match(self, attribute: str, *values) -> 'FlagBuilder': + def if_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """Starts defining a flag rule, using the "is one of" operator. **Example:** create a rule that returns ``True`` if the name is "Patsy" or "Edina" @@ -372,7 +371,7 @@ def if_match(self, attribute: str, *values) -> 'FlagBuilder': flag_rule_builder = FlagRuleBuilder(self) return flag_rule_builder.and_match(attribute, *values) - def if_not_match(self, attribute: str, *values) -> 'FlagBuilder': + def if_not_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """Starts defining a flag rule, using the "is not one of" operator. **Example:** create a rule that returns ``True`` if the name is neither "Saffron" nor "Bubble" @@ -435,9 +434,10 @@ def build(self, version: int) -> dict: }) base_flag_object['targets'] = targets - base_flag_object['rules'] = [] + rules = [] for idx, rule in enumerate(self._rules): - base_flag_object['rules'].append(rule.build(idx)) + rules.append(rule.build(str(idx))) + base_flag_object['rules'] = rules return base_flag_object @@ -461,8 +461,8 @@ class FlagRuleBuilder(): """ def __init__(self, flag_builder: FlagBuilder): self._flag_builder = flag_builder - self._clauses = [] - self._variation = None + self._clauses = [] # type: List[dict] + self._variation = None # type: Optional[int] def and_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """Adds another clause, using the "is one of" operator. @@ -510,7 +510,7 @@ def and_not_match(self, attribute: str, *values) -> 'FlagRuleBuilder': }) return self - def then_return(self, variation: Union[bool, int]) -> 'FlagRuleBuilder': + def then_return(self, variation: Union[bool, int]) -> 'FlagBuilder': """Finishes defining the rule, specifying the result as either a boolean or a variation index. @@ -536,7 +536,7 @@ def build(self, id: str) -> dict: :return: the dictionary representation of the rule """ return { - 'id': 'rule' + str(id), + 'id': 'rule' + id, 'variation': self._variation, 'clauses': self._clauses } From 4f6f6cf87b3a1ba4ce0bd2b3ee714e5aa31689a2 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 14 Feb 2022 14:05:07 -0800 Subject: [PATCH 280/356] update CONTRIBUTING.md and provide make targets --- .circleci/config.yml | 3 +-- CONTRIBUTING.md | 60 ++++++++++++++++++++++++++++++++++++++------ Makefile | 18 +++++++++++++ 3 files changed, 72 insertions(+), 9 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f4cf6591..e3d5b29c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -93,8 +93,7 @@ jobs: name: verify typehints command: | export PATH="/home/circleci/.local/bin:$PATH" - mypy --install-types --non-interactive ldclient testing - mypy --config-file mypy.ini ldclient testing + make lint - unless: condition: <> diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 32425905..8b956b9e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,18 +32,64 @@ The additional requirements files `consul-requirements.txt`, `dynamodb-requireme ### Testing -To run all unit tests: +To run all unit tests except for the database integrations: +```shell +make test ``` -pytest -``` -By default, the full unit test suite includes live tests of the integrations for Consul, DynamoDB, and Redis. Those tests expect you to have instances of all of those databases running locally. To skip them, set the environment variable `LD_SKIP_DATABASE_TESTS=1` before running the tests. +To run all unit tests including the database integrations (this requires you to have instances of Consul, DynamoDB, and Redis running locally): + +```shell +make test-all +``` There are also integration tests that can be run against the LaunchDarkly service. To enable them, set the environment variable `LD_SDK_KEY` to a valid production SDK Key. -### Portability +It is preferable to run tests against all supported minor versions of Python (as described in `README.md` under Requirements), or at least the lowest and highest versions, prior to submitting a pull request. However, LaunchDarkly's CI tests will run automatically against all supported versions. -Most portability issues are addressed by using the `six` package. We are avoiding the use of `__future__` imports, since they can easily be omitted by mistake causing code in one file to behave differently from another; instead, whenever possible, use an explicit approach that makes it clear what the desired behavior is in all Python versions (e.g. if you want to do floor division, use `//`; if you want to divide as floats, explicitly cast to floats). +### Building documentation -It is preferable to run tests against all supported minor versions of Python (as described in `README.md` under Requirements), or at least the lowest and highest versions, prior to submitting a pull request. However, LaunchDarkly's CI tests will run automatically against all supported versions. +See "Documenting types and methods" below. To build the documentation locally, so you can see the effects of any changes before a release: + +```shell +make docs +``` + +The output will appear in `docs/build/html`. Its formatting will be somewhat different since it does not have the same stylesheets used on readthedocs.io. + +### Running the linter + +The `mypy` tool is used in CI to verify type hints and warn of potential code problems. To run it locally: + +```shell +make lint +``` + +## Code organization + +The SDK's module structure is as follows: + +* `ldclient`: This module exports the most commonly used classes and methods in the SDK, such as `LDClient`. The implementations may live in other modules, but applications should not need to import a more specific module such as `ldclient.client` to get those symbols. +* `ldclient.integrations`: This module contains entry points for optional features that are related to how the SDK communicates with other systems, such as `Redis`. +* `ldclient.interfaces`: This namespace contains types that do not do anything by themselves, but may need to be referenced if you are using optional features or implementing a custom component. + +A special case is the module `ldclient.impl`, and any modules within it. Everything under `impl` is considered a private implementation detail: all files there are excluded from the generated documentation, and are considered subject to change at any time and not supported for direct use by application developers. Alternately, class names can be prefixed with an underscore to be "private by convention"; that will at least prevent them from being included in wildcard imports like `from ldclient import *`, but it is still preferable to avoid a proliferation of implementation-only modules within the main `ldclient` module, since developers may wrongly decide to reference such modules in imports. + +So, if there is a class whose existence is entirely an implementation detail, it should be in `impl`. Similarly, classes that are _not_ in `impl` must not expose any public members (i.e. symbols that do not have an underscore prefix) that are not meant to be part of the supported public API. This is important because of our guarantee of backward compatibility for all public APIs within a major version: we want to be able to change our implementation details to suit the needs of the code, without worrying about breaking a customer's code. Due to how the language works, we can't actually prevent an application developer from referencing those classes in their code, but this convention makes it clear that such use is discouraged and unsupported. + +### Type hints + +Python does not require the use of type hints, but they can be extremely helpful for spotting mistakes and for improving the IDE experience, so we should always use them in the SDK. Every method in the public API is expected to have type hints for all non-`self` parameters, and for its return value if any. + +It's also desirable to use type hints for private attributes, to catch possible mistakes in their use. Until all versions of Python that we support allow the PEP 526 syntax for doing this, we must do it via a comment in the format that `mypy` understands, for instance: + +```python + self._some_attribute = None # type: Optional[int] +``` + +## Documenting types and methods + +All classes and public methods outside of `ldclient.impl` should have docstrings in Sphinx format. These are used to build the documentation that is published on [readthedocs.io](https://launchdarkly-python-sdk.readthedocs.io/). See the [Sphinx documentation](https://www.sphinx-doc.org/en/master/) for details of the docstring format. + +Please try to make the style and terminology in documentation comments consistent with other documentation comments in the SDK. Also, if a class or method is being added that has an equivalent in other SDKs, and if we have described it in a consistent away in those other SDKs, please reuse the text whenever possible (with adjustments for anything language-specific) rather than writing new text. diff --git a/Makefile b/Makefile index 730218e3..ca4fa068 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,21 @@ + +PYTEST_FLAGS=-W error::SyntaxWarning + +test: + LD_SKIP_DATABASE_TESTS=1 pytest $(PYTEST_FLAGS) + +test-all: + pytest $(PYTEST_FLAGS) + +lint: + mypy --install-types --non-interactive --config-file mypy.ini ldclient testing + +docs: + cd docs && make html + +.PHONY: test test-all lint docs + + TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # port 8000 and 9000 is already used in the CI environment because we're From 9c152c309c6833fc65b9bafa5981baadb138d90c Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Tue, 15 Mar 2022 00:10:02 +0000 Subject: [PATCH 281/356] fixed a bug with flag rule clause builder internals; added unit test to verify rule evaluation --- ldclient/integrations/test_data.py | 4 +-- testing/integrations/test_test_data_source.py | 34 ++++++++++++++++++- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index a159eb12..752ed765 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -485,7 +485,7 @@ def and_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """ self._clauses.append({ 'attribute': attribute, - 'operator': 'in', + 'op': 'in', 'values': list(values), 'negate': False }) @@ -508,7 +508,7 @@ def and_not_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """ self._clauses.append({ 'attribute': attribute, - 'operator': 'in', + 'op': 'in', 'values': list(values), 'negate': True }) diff --git a/testing/integrations/test_test_data_source.py b/testing/integrations/test_test_data_source.py index e0db1208..47f0d025 100644 --- a/testing/integrations/test_test_data_source.py +++ b/testing/integrations/test_test_data_source.py @@ -285,7 +285,7 @@ def test_flagbuilder_can_build(): 'clauses': [ {'attribute': 'country', 'negate': False, - 'operator': 'in', + 'op': 'in', 'values': ['fr'] } ], @@ -297,3 +297,35 @@ def test_flagbuilder_can_build(): } assert flag._build(1) == expected_result + +def test_flag_can_evaluate_rules(): + td = TestData.data_source() + store = InMemoryFeatureStore() + + client = LDClient(config=Config('SDK_KEY', + update_processor_class = td, + send_events = False, + feature_store = store)) + + td.update(td.flag(key='test-flag') + .fallthrough_variation(False) + .if_match('firstName', 'Mike') + .and_not_match('country', 'gb') + .then_return(True)) + + # user1 should satisfy the rule (matching firstname, not matching country) + user1 = { 'key': 'user1', 'firstName': 'Mike', 'country': 'us' } + eval1 = client.variation_detail('test-flag', user1, default='default') + + assert eval1.value == True + assert eval1.variation_index == 0 + assert eval1.reason['kind'] == 'RULE_MATCH' + + # user2 should NOT satisfy the rule (not matching firstname despite not matching country) + user2 = { 'key': 'user2', 'firstName': 'Joe', 'country': 'us' } + eval2 = client.variation_detail('test-flag', user2, default='default') + + assert eval2.value == False + assert eval2.variation_index == 1 + assert eval2.reason['kind'] == 'FALLTHROUGH' + From 2d5744b423fe8f0e11586e27ff249a1f479d04e3 Mon Sep 17 00:00:00 2001 From: charukiewicz Date: Tue, 15 Mar 2022 15:59:19 +0000 Subject: [PATCH 282/356] added ready argument to _TestDataSource class and indicated ready upon start to avoid delays in TestData initialization --- ldclient/impl/integrations/test_data/test_data_source.py | 4 +++- ldclient/integrations/test_data.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py index db3ac729..e6272925 100644 --- a/ldclient/impl/integrations/test_data/test_data_source.py +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -5,11 +5,13 @@ class _TestDataSource(): - def __init__(self, feature_store, test_data): + def __init__(self, feature_store, test_data, ready): self._feature_store = feature_store self._test_data = test_data + self._ready = ready def start(self): + self._ready.set() self._feature_store.init(self._test_data._make_init_data()) def stop(self): diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 752ed765..0030cde6 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -51,7 +51,7 @@ def __init__(self): self._instances = [] def __call__(self, config, store, ready): - data_source = _TestDataSource(store, self) + data_source = _TestDataSource(store, self, ready) try: self._lock.lock() self._instances.append(data_source) From 1f21ca5b39129377e7d342ecbc85eff14995bf08 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Wed, 16 Mar 2022 14:30:17 -0400 Subject: [PATCH 283/356] Update contract tests to latest flask version (#176) Our contract tests depend on flask v1, which in turn depends on Jinja 2. Both of these are terribly dated and no longer supported. Jinja depends on markupsafe. markupsafe recently updated its code to no longer provide soft_unicode which in turn broke Jinja. Updating to the latest flask keeps all transitive dependencies better aligned and addresses this mismatch. --- .circleci/config.yml | 21 ++++++++++++------- contract-tests/requirements.txt | 2 +- .../redis/redis_big_segment_store.py | 5 ++++- sse-contract-tests/requirements.txt | 2 +- 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e3d5b29c..007b5fb2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,6 +10,7 @@ workflows: name: Python 3.5 docker-image: cimg/python:3.5 skip-sse-contract-tests: true # the test service app has dependencies that aren't available in 3.5, which is EOL anyway + skip-contract-tests: true # the test service app has dependencies that aren't available in 3.5, which is EOL anyway - test-linux: name: Python 3.6 docker-image: cimg/python:3.6 @@ -46,6 +47,9 @@ jobs: skip-sse-contract-tests: type: boolean default: false + skip-contract-tests: + type: boolean + default: false docker: - image: <> - image: redis @@ -109,13 +113,16 @@ jobs: name: run SSE contract tests command: cd sse-contract-tests && make run-contract-tests - - run: make build-contract-tests - - run: - command: make start-contract-test-service - background: true - - run: - name: run contract tests - command: TEST_HARNESS_PARAMS="-junit test-reports/contract-tests-junit.xml" make run-contract-tests + - unless: + condition: <> + steps: + - run: make build-contract-tests + - run: + command: make start-contract-test-service + background: true + - run: + name: run contract tests + command: TEST_HARNESS_PARAMS="-junit test-reports/contract-tests-junit.xml" make run-contract-tests - store_test_results: path: test-reports diff --git a/contract-tests/requirements.txt b/contract-tests/requirements.txt index f55a4204..0018e4c8 100644 --- a/contract-tests/requirements.txt +++ b/contract-tests/requirements.txt @@ -1,2 +1,2 @@ -Flask==1.1.4 +Flask==2.0.3 urllib3>=1.22.0 diff --git a/ldclient/impl/integrations/redis/redis_big_segment_store.py b/ldclient/impl/integrations/redis/redis_big_segment_store.py index 35b42b71..d3b4b767 100644 --- a/ldclient/impl/integrations/redis/redis_big_segment_store.py +++ b/ldclient/impl/integrations/redis/redis_big_segment_store.py @@ -26,7 +26,10 @@ def __init__(self, url: str, prefix: Optional[str], max_connections: int): def get_metadata(self) -> BigSegmentStoreMetadata: r = redis.Redis(connection_pool=self._pool) value = r.get(self._prefix + self.KEY_LAST_UP_TO_DATE) - return BigSegmentStoreMetadata(None if value is None else int(value)) + if value is None: + return BigSegmentStoreMetadata(None) + + return BigSegmentStoreMetadata(int(value)) def get_membership(self, user_hash: str) -> Optional[dict]: r = redis.Redis(connection_pool=self._pool) diff --git a/sse-contract-tests/requirements.txt b/sse-contract-tests/requirements.txt index 2d1d2a7b..0018e4c8 100644 --- a/sse-contract-tests/requirements.txt +++ b/sse-contract-tests/requirements.txt @@ -1,2 +1,2 @@ -Flask==2.0.2 +Flask==2.0.3 urllib3>=1.22.0 From 58d8af73af7575875fb71407035d5d35e1e7f5bd Mon Sep 17 00:00:00 2001 From: Ember Stevens Date: Tue, 19 Apr 2022 16:36:38 -0700 Subject: [PATCH 284/356] Adds link to Relay Proxy docs --- ldclient/config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ldclient/config.py b/ldclient/config.py index 9a19c264..dfe1a29a 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -201,10 +201,12 @@ def __init__(self, default values are returned for all flags and no remote network requests are made. By default, this is false. :param poll_interval: The number of seconds between polls for flag updates if streaming is off. - :param use_ldd: Whether you are using the LaunchDarkly relay proxy in daemon mode. In this + :param use_ldd: Whether you are using the LaunchDarkly Relay Proxy in daemon mode. In this configuration, the client will not use a streaming connection to listen for updates, but instead will get feature state from a Redis instance. The `stream` and `poll_interval` options will be ignored if this option is set to true. By default, this is false. + For more information, read the LaunchDarkly + documentation: https://docs.launchdarkly.com/home/relay-proxy/using#using-daemon-mode :param array private_attribute_names: Marks a set of attribute names private. Any users sent to LaunchDarkly with this configuration active will have attributes with these names removed. :param all_attributes_private: If true, all user attributes (other than the key) will be From 5c5a3e85b6033dd5204cbb2785af57464e61cf0a Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Wed, 20 Apr 2022 14:52:06 -0400 Subject: [PATCH 285/356] Handle explicit None values in test payload (#179) The test harness may send explicit None values which should be treated the same as if the value was omitted entirely. --- contract-tests/client_entity.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index f3bf22fc..5d2d5220 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -6,39 +6,36 @@ sys.path.insert(1, os.path.join(sys.path[0], '..')) from ldclient import * -def millis_to_seconds(t): - return None if t is None else t / 1000 - class ClientEntity: def __init__(self, tag, config): self.log = logging.getLogger(tag) opts = {"sdk_key": config["credential"]} - if "streaming" in config: + if config.get("streaming") is not None: streaming = config["streaming"] - if "baseUri" in streaming: + if streaming.get("baseUri") is not None: opts["stream_uri"] = streaming["baseUri"] if streaming.get("initialRetryDelayMs") is not None: opts["initial_reconnect_delay"] = streaming["initialRetryDelayMs"] / 1000.0 - if "events" in config: + if config.get("events") is not None: events = config["events"] - if "baseUri" in events: + if events.get("baseUri") is not None: opts["events_uri"] = events["baseUri"] - if events.get("capacity", None) is not None: + if events.get("capacity") is not None: opts["events_max_pending"] = events["capacity"] opts["diagnostic_opt_out"] = not events.get("enableDiagnostics", False) opts["all_attributes_private"] = events.get("allAttributesPrivate", False) opts["private_attribute_names"] = events.get("globalPrivateAttributes", {}) - if "flushIntervalMs" in events: + if events.get("flushIntervalMs") is not None: opts["flush_interval"] = events["flushIntervalMs"] / 1000.0 - if "inlineUsers" in events: + if events.get("inlineUsers") is not None: opts["inline_users_in_events"] = events["inlineUsers"] else: opts["send_events"] = False - start_wait = config.get("startWaitTimeMs", 5000) + start_wait = config.get("startWaitTimeMs") or 5000 config = Config(**opts) self.client = client.LDClient(config, start_wait / 1000.0) From c984111428997412f964ec7e9806651726b6761c Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Wed, 20 Apr 2022 15:23:28 -0400 Subject: [PATCH 286/356] Fix "unhandled response" error in test harness (#180) When we return a `('', 204)` response from the flask handler, [Werkzeug intentionally removes the 'Content-Type' header][1], which causes the response to be created as a chunked response. The test harness is likely seeing a 204 response and isn't trying to read anything more from the stream. But since we are re-using connections, the next time it reads from the stream, it sees the `0\r\n\r\n` chunk and outputs an error: > 2022/04/20 14:23:39 Unsolicited response received on idle HTTP channel starting with "0\r\n\r\n"; err= Changing this response to 202 causes Werkzeug to return an empty response and silences the error. [1]: https://github.com/pallets/werkzeug/blob/560dd5f320bff318175f209595d42f5a80045417/src/werkzeug/wrappers/response.py#L540 --- contract-tests/service.py | 4 ++-- sse-contract-tests/service.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/contract-tests/service.py b/contract-tests/service.py index b4728867..d9f8e0a5 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -4,7 +4,7 @@ import logging import os import sys -from flask import Flask, request, jsonify +from flask import Flask, request from flask.logging import default_handler from logging.config import dictConfig from werkzeug.exceptions import HTTPException @@ -132,7 +132,7 @@ def delete_client(id): return ('', 404) client.close() - return ('', 204) + return ('', 202) if __name__ == "__main__": port = default_port diff --git a/sse-contract-tests/service.py b/sse-contract-tests/service.py index 6d07fc59..389b1a1f 100644 --- a/sse-contract-tests/service.py +++ b/sse-contract-tests/service.py @@ -81,7 +81,7 @@ def delete_stream(id): if stream is None: return ('', 404) stream.close() - return ('', 204) + return ('', 202) if __name__ == "__main__": port = default_port From 0ce915358334cb2d2fc2230641254b32e356253f Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Wed, 20 Apr 2022 15:55:38 -0400 Subject: [PATCH 287/356] Exclude booleans when getting bucketable value (#181) When calculating a bucket, we get the bucketable value from the specified bucket by attribute. If this value is a string or an int, we can use it. Otherwise, we return None. Python considers a bool an instance of an int, which isn't what we want. So we need to add an explicit exclusion for this. --- ldclient/impl/evaluator.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index d019f10d..0fa9f088 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -243,7 +243,12 @@ def _bucket_user(seed, user, key, salt, bucket_by): return result def _bucketable_string_value(u_value): - return str(u_value) if isinstance(u_value, (str, int)) else None + if isinstance(u_value, bool): + return None + elif isinstance(u_value, (str, int)): + return str(u_value) + + return None def _clause_matches_user_no_segments(clause, user): u_value, should_pass = _get_user_attribute(user, clause.get('attribute')) From 443e0a6c9dcbf65d859ab257fd011e465651055e Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Fri, 6 May 2022 10:42:09 -0400 Subject: [PATCH 288/356] master -> main (#182) --- .github/pull_request_template.md | 2 +- .ldrelease/config.yml | 2 +- sse-contract-tests/Makefile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 19806760..fc89ce0f 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,7 +1,7 @@ **Requirements** - [ ] I have added test coverage for new or changed functionality -- [ ] I have followed the repository's [pull request submission guidelines](../blob/master/CONTRIBUTING.md#submitting-pull-requests) +- [ ] I have followed the repository's [pull request submission guidelines](../blob/main/CONTRIBUTING.md#submitting-pull-requests) - [ ] I have validated my changes against all supported platform versions **Related issues** diff --git a/.ldrelease/config.yml b/.ldrelease/config.yml index b7db59ad..9021210c 100644 --- a/.ldrelease/config.yml +++ b/.ldrelease/config.yml @@ -11,7 +11,7 @@ publications: description: documentation (readthedocs.io) branches: - - name: master + - name: main description: 7.x - name: 6.x diff --git a/sse-contract-tests/Makefile b/sse-contract-tests/Makefile index 37f69644..1d4da244 100644 --- a/sse-contract-tests/Makefile +++ b/sse-contract-tests/Makefile @@ -19,7 +19,7 @@ start-test-service-bg: @make start-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & run-contract-tests: - @curl -s https://raw.githubusercontent.com/launchdarkly/sse-contract-tests/master/downloader/run.sh \ + @curl -s https://raw.githubusercontent.com/launchdarkly/sse-contract-tests/v2.0.0/downloader/run.sh \ | VERSION=v1 PARAMS="-url http://localhost:$(PORT) -debug -stop-service-at-end $(EXTRA_TEST_PARAMS)" sh contract-tests: build-test-service start-test-service-bg run-contract-tests From d3eb286258c4d24e3eec5a5962512306879a9e99 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Wed, 15 Jun 2022 16:04:29 -0400 Subject: [PATCH 289/356] Loosen restriction on expiringdict (#183) Originally this was pinned to a max version to deal with the incompatibility of Python 3.3 and the `typing` package. See [this PR][1]. Now that we now only support >=3.5, we can safely relax this restriction again. [1]: https://github.com/launchdarkly/python-server-sdk-private/pull/120 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 449e3467..078df9e0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ certifi>=2018.4.16 -expiringdict>=1.1.4,<1.2.0 +expiringdict>=1.1.4 pyRFC3339>=1.0 semver>=2.10.2,<3.0.0 urllib3>=1.22.0 From 0b70f678acf0de4353a539e27eb8de200ed89de2 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Wed, 22 Jun 2022 15:58:52 -0400 Subject: [PATCH 290/356] Fix mypy type checking (#184) A [customer requested][original-pr] that we start including a py.typed file in our repository. This would enable mypy to take advantage of our typehints. Unfortunately, this didn't completely solve the customers issue. A [second pr][second-pr] was opened to address the missing step of including the py.typed file in the `Manifest.in` file. However, this change alone is not sufficient. According to the [documentation][include_package_data], you must also include the `include_package_data=True` directive so that files specified in the `Manifest.in` file are included in distribution. [original-pr]: https://github.com/launchdarkly/python-server-sdk/pull/166 [second-pr]: https://github.com/launchdarkly/python-server-sdk/pull/172 [include_package_data]: https://setuptools.pypa.io/en/latest/userguide/datafiles.html#include-package-data --- MANIFEST.in | 1 + setup.py | 1 + 2 files changed, 2 insertions(+) diff --git a/MANIFEST.in b/MANIFEST.in index 35367703..f376dd16 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,3 +4,4 @@ include test-requirements.txt include consul-requirements.txt include dynamodb-requirements.txt include redis-requirements.txt +include ldclient/py.typed diff --git a/setup.py b/setup.py index cf3312f8..6c2cbe43 100644 --- a/setup.py +++ b/setup.py @@ -54,6 +54,7 @@ def run(self): author='LaunchDarkly', author_email='team@launchdarkly.com', packages=find_packages(), + include_package_data=True, url='https://github.com/launchdarkly/python-server-sdk', description='LaunchDarkly SDK for Python', long_description='LaunchDarkly SDK for Python', From d42e60083718ae2039ff71fdcaab38c81b61f3c9 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Wed, 29 Jun 2022 11:38:50 -0400 Subject: [PATCH 291/356] Add support for extra Redis connection parameters (#185) --- .../redis/redis_big_segment_store.py | 6 +-- .../integrations/redis/redis_feature_store.py | 6 ++- ldclient/integrations/__init__.py | 42 +++++++++++++++---- 3 files changed, 42 insertions(+), 12 deletions(-) diff --git a/ldclient/impl/integrations/redis/redis_big_segment_store.py b/ldclient/impl/integrations/redis/redis_big_segment_store.py index d3b4b767..a831bc32 100644 --- a/ldclient/impl/integrations/redis/redis_big_segment_store.py +++ b/ldclient/impl/integrations/redis/redis_big_segment_store.py @@ -1,7 +1,7 @@ from ldclient import log from ldclient.interfaces import BigSegmentStore, BigSegmentStoreMetadata -from typing import Optional, Set, cast +from typing import Any, Optional, Dict, Set, cast have_redis = False try: @@ -16,11 +16,11 @@ class _RedisBigSegmentStore(BigSegmentStore): KEY_USER_INCLUDE = ':big_segment_include:' KEY_USER_EXCLUDE = ':big_segment_exclude:' - def __init__(self, url: str, prefix: Optional[str], max_connections: int): + def __init__(self, url: str, prefix: Optional[str], redis_opts: Dict[str, Any]): if not have_redis: raise NotImplementedError("Cannot use Redis Big Segment store because redis package is not installed") self._prefix = prefix or 'launchdarkly' - self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) + self._pool = redis.ConnectionPool.from_url(url=url, **redis_opts) log.info("Started RedisBigSegmentStore connected to URL: " + url + " using prefix: " + self._prefix) def get_metadata(self) -> BigSegmentStoreMetadata: diff --git a/ldclient/impl/integrations/redis/redis_feature_store.py b/ldclient/impl/integrations/redis/redis_feature_store.py index eebe205d..9bc5d13b 100644 --- a/ldclient/impl/integrations/redis/redis_feature_store.py +++ b/ldclient/impl/integrations/redis/redis_feature_store.py @@ -11,13 +11,15 @@ from ldclient.interfaces import DiagnosticDescription, FeatureStoreCore from ldclient.versioned_data_kind import FEATURES +from typing import Any, Dict + class _RedisFeatureStoreCore(DiagnosticDescription, FeatureStoreCore): - def __init__(self, url, prefix, max_connections): + def __init__(self, url, prefix, redis_opts: Dict[str, Any]): if not have_redis: raise NotImplementedError("Cannot use Redis feature store because redis package is not installed") self._prefix = prefix or 'launchdarkly' - self._pool = redis.ConnectionPool.from_url(url=url, max_connections=max_connections) + self._pool = redis.ConnectionPool.from_url(url=url, **redis_opts) self.test_update_hook = None # exposed for testing log.info("Started RedisFeatureStore connected to URL: " + url + " using prefix: " + self._prefix) diff --git a/ldclient/integrations/__init__.py b/ldclient/integrations/__init__.py index b2c8c6ad..b45b2cb3 100644 --- a/ldclient/integrations/__init__.py +++ b/ldclient/integrations/__init__.py @@ -13,7 +13,7 @@ from ldclient.impl.integrations.redis.redis_feature_store import _RedisFeatureStoreCore from ldclient.interfaces import BigSegmentStore -from typing import Any, List, Mapping, Optional +from typing import Any, Dict, List, Mapping, Optional class Consul: """Provides factory methods for integrations between the LaunchDarkly SDK and Consul. @@ -144,7 +144,8 @@ class Redis: def new_feature_store(url: str='redis://localhost:6379/0', prefix: str='launchdarkly', max_connections: int=16, - caching: CacheConfig=CacheConfig.default()) -> CachingStoreWrapper: + caching: CacheConfig=CacheConfig.default(), + redis_opts: Dict[str, Any] = {}) -> CachingStoreWrapper: """ Creates a Redis-backed implementation of :class:`~ldclient.interfaces.FeatureStore`. For more details about how and why you can use a persistent feature store, see the @@ -164,11 +165,24 @@ def new_feature_store(url: str='redis://localhost:6379/0', :param prefix: a namespace prefix to be prepended to all Redis keys; defaults to ``DEFAULT_PREFIX`` :param max_connections: the maximum number of Redis connections to keep in the - connection pool; defaults to ``DEFAULT_MAX_CONNECTIONS`` + connection pool; defaults to ``DEFAULT_MAX_CONNECTIONS``. This + parameter will later be dropped in favor of setting + redis_opts['max_connections'] :param caching: specifies whether local caching should be enabled and if so, sets the cache properties; defaults to :func:`ldclient.feature_store.CacheConfig.default()` + :param redis_opts: extra options for initializing Redis connection from the url, + see `redis.connection.ConnectionPool.from_url` for more details. Note that + if you set max_connections, this will take precedence over the + deprecated max_connections parameter. """ - core = _RedisFeatureStoreCore(url, prefix, max_connections) + + # WARN(deprecated): Remove the max_connection parameter from + # this signature and clean up this bit of code. + if 'max_connections' not in redis_opts: + redis_opts = redis_opts.copy() + redis_opts['max_connections'] = max_connections + + core = _RedisFeatureStoreCore(url, prefix, redis_opts) wrapper = CachingStoreWrapper(core, caching) wrapper._core = core # exposed for testing return wrapper @@ -176,7 +190,8 @@ def new_feature_store(url: str='redis://localhost:6379/0', @staticmethod def new_big_segment_store(url: str='redis://localhost:6379/0', prefix: str='launchdarkly', - max_connections: int=16) -> BigSegmentStore: + max_connections: int=16, + redis_opts: Dict[str, Any] = {}) -> BigSegmentStore: """ Creates a Redis-backed Big Segment store. @@ -197,9 +212,22 @@ def new_big_segment_store(url: str='redis://localhost:6379/0', :param prefix: a namespace prefix to be prepended to all Redis keys; defaults to ``DEFAULT_PREFIX`` :param max_connections: the maximum number of Redis connections to keep in the - connection pool; defaults to ``DEFAULT_MAX_CONNECTIONS`` + connection pool; defaults to ``DEFAULT_MAX_CONNECTIONS``. This + parameter will later be dropped in favor of setting + redis_opts['max_connections'] + :param redis_opts: extra options for initializing Redis connection from the url, + see `redis.connection.ConnectionPool.from_url` for more details. Note that + if you set max_connections, this will take precedence over the + deprecated max_connections parameter. """ - return _RedisBigSegmentStore(url, prefix, max_connections) + + # WARN(deprecated): Remove the max_connection parameter from + # this signature and clean up this bit of code. + if 'max_connections' not in redis_opts: + redis_opts = redis_opts.copy() + redis_opts['max_connections'] = max_connections + + return _RedisBigSegmentStore(url, prefix, redis_opts) class Files: """Provides factory methods for integrations with filesystem data. From 707dd6f61c8635ad2a56558f1f0d02774dc645b4 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Thu, 22 Sep 2022 22:46:23 -0400 Subject: [PATCH 292/356] Include wheel artifact when publishing package (#186) --- .ldrelease/build.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100755 .ldrelease/build.sh diff --git a/.ldrelease/build.sh b/.ldrelease/build.sh new file mode 100755 index 00000000..c826c9f1 --- /dev/null +++ b/.ldrelease/build.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +set -ue + +echo "Installing requirements" +pip install -r requirements.txt || { echo "installing requirements.txt failed" >&2; exit 1; } +pip install wheel || { echo "installing wheel failed" >&2; exit 1; } + +echo "Running setup.py sdist bdist_wheel" +python setup.py sdist bdist_wheel || { echo "setup.py sdist bdist_wheel failed" >&2; exit 1; } From 435e5a0551a8937c4494511602e2ff817ef02f45 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 8 Dec 2022 17:29:45 -0800 Subject: [PATCH 293/356] remove warn-level logging done for every Big Segments query --- ldclient/impl/big_segments.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index b6a013d3..bcd6e2b8 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -81,7 +81,6 @@ def get_user_membership(self, user_key: str) -> Tuple[Optional[dict], str]: membership = self.__cache.get(user_key) if membership is None: user_hash = _hash_for_user_key(user_key) - log.warn("*** querying Big Segments for user hash: %s" % user_hash) try: membership = self.__store.get_membership(user_hash) if membership is None: From 228123a6b7973456f0db7801e242064c21093a11 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 8 Dec 2022 17:32:01 -0800 Subject: [PATCH 294/356] skip tests that use a self-signed TLS cert in Python 3.7 --- testing/test_ldclient_end_to_end.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/testing/test_ldclient_end_to_end.py b/testing/test_ldclient_end_to_end.py index 7003805a..3f550d0f 100644 --- a/testing/test_ldclient_end_to_end.py +++ b/testing/test_ldclient_end_to_end.py @@ -102,12 +102,12 @@ def test_client_sends_diagnostics(): data = json.loads(r.body) assert data['kind'] == 'diagnostic-init' -# The TLS tests are skipped in Python 3.3 because the embedded HTTPS server does not work correctly, causing +# The TLS tests are skipped in Python 3.7 because the embedded HTTPS server does not work correctly, causing # a TLS handshake failure on the client side. It's unclear whether this is a problem with the self-signed # certificate we are using or with some other server settings, but it does not appear to be a client-side -# problem. +# problem since we know that the SDK is able to connect to secure LD endpoints. -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_cannot_connect_with_selfsigned_cert_by_default(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) @@ -120,7 +120,7 @@ def test_cannot_connect_with_selfsigned_cert_by_default(): with LDClient(config = config, start_wait = 1.5) as client: assert not client.is_initialized() -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) @@ -134,7 +134,7 @@ def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): with LDClient(config = config) as client: assert client.is_initialized() -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_can_connect_with_selfsigned_cert_if_disable_ssl_verification_is_true(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) @@ -148,7 +148,7 @@ def test_can_connect_with_selfsigned_cert_if_disable_ssl_verification_is_true(): with LDClient(config = config) as client: assert client.is_initialized() -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_can_connect_with_selfsigned_cert_by_setting_ca_certs(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) From 09d8b9629d1fec40d1cc6912969216266a865f2c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 09:31:06 -0800 Subject: [PATCH 295/356] (U2C 1) drop EOL Python versions (#189) * drop EOL Python versions * misc cleanup, show Python version in CI * add Python 3.11 CI job * add Python 3.11 to package metadata --- .circleci/config.yml | 13 +++++-------- .readthedocs.yml | 2 +- README.md | 2 +- ldclient/integrations/__init__.py | 3 --- setup.py | 3 +-- 5 files changed, 8 insertions(+), 15 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 007b5fb2..7b9bd2db 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,14 +6,6 @@ orbs: workflows: test: jobs: - - test-linux: - name: Python 3.5 - docker-image: cimg/python:3.5 - skip-sse-contract-tests: true # the test service app has dependencies that aren't available in 3.5, which is EOL anyway - skip-contract-tests: true # the test service app has dependencies that aren't available in 3.5, which is EOL anyway - - test-linux: - name: Python 3.6 - docker-image: cimg/python:3.6 - test-linux: name: Python 3.7 docker-image: cimg/python:3.7 @@ -26,6 +18,9 @@ workflows: - test-linux: name: Python 3.10 docker-image: cimg/python:3.10 + - test-linux: + name: Python 3.11 + docker-image: cimg/python:3.11 - test-windows: name: Windows Python 3 py3: true @@ -57,6 +52,7 @@ jobs: - image: consul steps: - checkout + - run: python --version - run: name: install requirements command: | @@ -142,6 +138,7 @@ jobs: - run: name: install Python 3 command: choco install python --no-progress + - run: python --version - run: name: set up DynamoDB command: | diff --git a/.readthedocs.yml b/.readthedocs.yml index 2739d0f6..56781a23 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -1,7 +1,7 @@ version: 2 python: - version: 3.5 + version: 3.7 install: - requirements: docs/requirements.txt - requirements: requirements.txt diff --git a/README.md b/README.md index 8ea3a283..d5edcbad 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ ## Supported Python versions -This version of the LaunchDarkly SDK is compatible with Python 3.5 through 3.10. It is tested with the most recent patch releases of those versions. Python versions 2.7 to 3.4 are no longer supported. +This version of the LaunchDarkly SDK is compatible with Python 3.7 through 3.11. It is tested with the most recent patch releases of those versions. Python versions 2.7 to 3.6 are no longer supported. ## Getting started diff --git a/ldclient/integrations/__init__.py b/ldclient/integrations/__init__.py index b45b2cb3..93d9a3eb 100644 --- a/ldclient/integrations/__init__.py +++ b/ldclient/integrations/__init__.py @@ -41,9 +41,6 @@ def new_feature_store(host: str=None, store = Consul.new_feature_store() config = Config(feature_store=store) - Note that ``python-consul`` is not available for Python 3.3 or 3.4, so this feature cannot be - used in those Python versions. - :param host: hostname of the Consul server (uses ``localhost`` if omitted) :param port: port of the Consul server (uses 8500 if omitted) :param prefix: a namespace prefix to be prepended to all Consul keys diff --git a/setup.py b/setup.py index 6c2cbe43..c96ef429 100644 --- a/setup.py +++ b/setup.py @@ -64,12 +64,11 @@ def run(self): 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', ], From d068fdbaaf1fa1deb444110eb6213c7f24f4be6f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 09:32:15 -0800 Subject: [PATCH 296/356] (U2C 2) remove alias event functionality (#187) --- Makefile | 8 +++++++- contract-tests/client_entity.py | 3 --- contract-tests/service.py | 3 --- ldclient/client.py | 19 ------------------- ldclient/impl/event_factory.py | 9 --------- testing/test_ldclient.py | 13 ------------- 6 files changed, 7 insertions(+), 48 deletions(-) diff --git a/Makefile b/Makefile index ca4fa068..7c5f235b 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,12 @@ docs: TEMP_TEST_OUTPUT=/tmp/contract-test-service.log +# TEST_HARNESS_PARAMS can be set to add -skip parameters for any contract tests that cannot yet pass +# Explanation of current skips: +# - "events/alias": preliminary removal of alias functionality before starting U2C implementation +TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ + -skip 'events/alias' + # port 8000 and 9000 is already used in the CI environment because we're # running a DynamoDB container and an SSE contract test PORT=10000 @@ -33,7 +39,7 @@ start-contract-test-service-bg: @make start-contract-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & run-contract-tests: - @curl -s https://raw.githubusercontent.com/launchdarkly/sdk-test-harness/v1.0.0/downloader/run.sh \ + curl -s https://raw.githubusercontent.com/launchdarkly/sdk-test-harness/main/downloader/run.sh \ | VERSION=v1 PARAMS="-url http://localhost:$(PORT) -debug -stop-service-at-end $(TEST_HARNESS_PARAMS)" sh contract-tests: build-contract-tests start-contract-test-service-bg run-contract-tests diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index 5d2d5220..ea4f34a7 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -72,9 +72,6 @@ def track(self, params): def identify(self, params): self.client.identify(params["user"]) - def alias(self, params): - self.client.alias(params["user"], params["previousUser"]) - def flush(self): self.client.flush() diff --git a/contract-tests/service.py b/contract-tests/service.py index d9f8e0a5..48340671 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -114,9 +114,6 @@ def post_client_command(id): elif params.get("command") == "identifyEvent": client.identify(params.get("identifyEvent")) return ('', 201) - elif params.get("command") == "aliasEvent": - client.alias(params.get("aliasEvent")) - return ('', 201) elif params.get('command') == "flushEvents": client.flush() return ('', 201) diff --git a/ldclient/client.py b/ldclient/client.py index 86a45e06..58ab766f 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -198,25 +198,6 @@ def track(self, event_name: str, user: dict, data: Optional[Any]=None, metric_va else: self._send_event(self._event_factory_default.new_custom_event(event_name, user, data, metric_value)) - def alias(self, current_user: dict, previous_user: dict): - """Associates two users for analytics purposes. - - This can be helpful in the situation where a person is represented by multiple - LaunchDarkly users. This may happen, for example, when a person initially logs into - an application, the person might be represented by an anonymous user prior to logging - in and a different user after logging in, as denoted by a different user key. - - :param current_user: The new version of a user. - :param previous_user: The old version of a user. - """ - if current_user is None or current_user.get('key') is None: - log.warning("Missing current_user or current_user key when calling alias().") - return None - if previous_user is None or previous_user.get('key') is None: - log.warning("Missing previous_user or previous_user key when calling alias().") - return None - self._send_event(self._event_factory_default.new_alias_event(current_user, previous_user)) - def identify(self, user: dict): """Registers the user. diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index 12823bed..d9ba5925 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -89,15 +89,6 @@ def new_custom_event(self, event_name, user, data, metric_value): e['contextKind'] = self._user_to_context_kind(user) return e - def new_alias_event(self, current_user, previous_user): - return { - 'kind': 'alias', - 'key': current_user.get('key'), - 'contextKind': self._user_to_context_kind(current_user), - 'previousKey': previous_user.get('key'), - 'previousContextKind': self._user_to_context_kind(previous_user) - } - def _user_to_context_kind(self, user): if user.get('anonymous'): return "anonymousUser" diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index e9a19c9a..4a708e4e 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -189,19 +189,6 @@ def test_track_anonymous_user(): assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == anonymous_user and e.get('data') is None and e.get('metricValue') is None and e.get('contextKind') == 'anonymousUser' -def test_alias(): - with make_client() as client: - client.alias(user, anonymous_user) - e = get_first_event(client) - assert e['kind'] == 'alias' and e['key'] == 'xyz' and e['contextKind'] == 'user' and e['previousKey'] == 'abc' and e['previousContextKind'] == 'anonymousUser' - - -def test_alias_no_user(): - with make_client() as client: - client.alias(None, None) - assert count_events(client) == 0 - - def test_defaults(): config=Config("SDK_KEY", base_uri="http://localhost:3000", defaults={"foo": "bar"}, offline=True) with LDClient(config=config) as client: From 8f9c22affc268dde280422ed6fa3b2b566e9f5ee Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 09:35:33 -0800 Subject: [PATCH 297/356] (U2C 3) remove inline users in events (#188) --- Makefile | 11 ++++- contract-tests/client_entity.py | 2 - ldclient/config.py | 10 ----- ldclient/diagnostics.py | 1 - ldclient/event_processor.py | 23 ++++------ testing/test_diagnostics.py | 8 ++-- testing/test_event_processor.py | 79 --------------------------------- 7 files changed, 21 insertions(+), 113 deletions(-) diff --git a/Makefile b/Makefile index 7c5f235b..f09ea3e5 100644 --- a/Makefile +++ b/Makefile @@ -20,9 +20,16 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # TEST_HARNESS_PARAMS can be set to add -skip parameters for any contract tests that cannot yet pass # Explanation of current skips: -# - "events/alias": preliminary removal of alias functionality before starting U2C implementation +# - We're preparing to migrate the SDK to U2C behavior, but so far we're still using the non-U2C contract +# tests (v1). +# - The non-U2C tests include alias events, which we have removed, so those tests are disabled. +# - Same for inline users in events. +# - Some custom event tests are disabled because in the v1 test suite, those require inline users. TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ - -skip 'events/alias' + -skip 'events/alias' \ + -skip 'events/user properties/inlineUsers=true' \ + -skip 'events/custom events/data and metricValue' \ + -skip 'events/custom events/basic properties/inline user' # port 8000 and 9000 is already used in the CI environment because we're # running a DynamoDB container and an SSE contract test diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index ea4f34a7..a100c245 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -30,8 +30,6 @@ def __init__(self, tag, config): opts["private_attribute_names"] = events.get("globalPrivateAttributes", {}) if events.get("flushIntervalMs") is not None: opts["flush_interval"] = events["flushIntervalMs"] / 1000.0 - if events.get("inlineUsers") is not None: - opts["inline_users_in_events"] = events["inlineUsers"] else: opts["send_events"] = False diff --git a/ldclient/config.py b/ldclient/config.py index dfe1a29a..feb4006f 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -167,7 +167,6 @@ def __init__(self, offline: bool=False, user_keys_capacity: int=1000, user_keys_flush_interval: float=300, - inline_users_in_events: bool=False, diagnostic_opt_out: bool=False, diagnostic_recording_interval: int=900, wrapper_name: Optional[str]=None, @@ -216,9 +215,6 @@ def __init__(self, one time, so that duplicate user details will not be sent in analytics events. :param user_keys_flush_interval: The interval in seconds at which the event processor will reset its set of known user keys. - :param inline_users_in_events: Whether to include full user details in every analytics event. - By default, events will only include the user key, except for one "index" event that provides the - full details for the user. :param feature_requester_class: A factory for a FeatureRequester implementation taking the sdk key and config :param event_processor_class: A factory for an EventProcessor implementation taking the config :param update_processor_class: A factory for an UpdateProcessor implementation taking the sdk key, @@ -264,7 +260,6 @@ def __init__(self, self.__offline = offline self.__user_keys_capacity = user_keys_capacity self.__user_keys_flush_interval = user_keys_flush_interval - self.__inline_users_in_events = inline_users_in_events self.__diagnostic_opt_out = diagnostic_opt_out self.__diagnostic_recording_interval = max(diagnostic_recording_interval, 60) self.__wrapper_name = wrapper_name @@ -298,7 +293,6 @@ def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': offline=self.__offline, user_keys_capacity=self.__user_keys_capacity, user_keys_flush_interval=self.__user_keys_flush_interval, - inline_users_in_events=self.__inline_users_in_events, diagnostic_opt_out=self.__diagnostic_opt_out, diagnostic_recording_interval=self.__diagnostic_recording_interval, wrapper_name=self.__wrapper_name, @@ -410,10 +404,6 @@ def user_keys_capacity(self) -> int: def user_keys_flush_interval(self) -> float: return self.__user_keys_flush_interval - @property - def inline_users_in_events(self) -> bool: - return self.__inline_users_in_events - @property def diagnostic_opt_out(self) -> bool: return self.__diagnostic_opt_out diff --git a/ldclient/diagnostics.py b/ldclient/diagnostics.py index e40b4ff0..d1c66ead 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/diagnostics.py @@ -78,7 +78,6 @@ def _create_diagnostic_config_object(config): 'pollingIntervalMillis': config.poll_interval * 1000, 'userKeysCapacity': config.user_keys_capacity, 'userKeysFlushIntervalMillis': config.user_keys_flush_interval * 1000, - 'inlineUsersInEvents': config.inline_users_in_events, 'diagnosticRecordingIntervalMillis': config.diagnostic_recording_interval * 1000, 'dataStoreType': _get_component_type_name(config.feature_store, config, 'memory')} diff --git a/ldclient/event_processor.py b/ldclient/event_processor.py index e13a0d2d..1f90f805 100644 --- a/ldclient/event_processor.py +++ b/ldclient/event_processor.py @@ -35,7 +35,6 @@ class EventOutputFormatter: def __init__(self, config): - self._inline_users = config.inline_users_in_events self._user_filter = UserFilter(config) def make_output_events(self, events, summary): @@ -59,7 +58,7 @@ def make_output_event(self, e): } if 'prereqOf' in e: out['prereqOf'] = e.get('prereqOf') - if self._inline_users or is_debug: + if is_debug: out['user'] = self._process_user(e) else: out['userKey'] = self._get_userkey(e) @@ -81,10 +80,7 @@ def make_output_event(self, e): 'creationDate': e['creationDate'], 'key': e['key'] } - if self._inline_users: - out['user'] = self._process_user(e) - else: - out['userKey'] = self._get_userkey(e) + out['userKey'] = self._get_userkey(e) if e.get('data') is not None: out['data'] = e['data'] if e.get('metricValue') is not None: @@ -310,14 +306,13 @@ def _process_event(self, event): # For each user we haven't seen before, we add an index event - unless this is already # an identify event for that user. - if not (add_full_event and self._config.inline_users_in_events): - user = event.get('user') - if user and 'key' in user: - is_index_event = event['kind'] == 'identify' - already_seen = self.notice_user(user) - add_index_event = not is_index_event and not already_seen - if not is_index_event and already_seen: - self._deduplicated_users += 1 + user = event.get('user') + if user and 'key' in user: + is_identify_event = event['kind'] == 'identify' + already_seen = self.notice_user(user) + add_index_event = not is_identify_event and not already_seen + if not is_identify_event and already_seen: + self._deduplicated_users += 1 if add_index_event: ie = { 'kind': 'index', 'creationDate': event['creationDate'], 'user': user } diff --git a/testing/test_diagnostics.py b/testing/test_diagnostics.py index c725e8d9..4423584e 100644 --- a/testing/test_diagnostics.py +++ b/testing/test_diagnostics.py @@ -42,7 +42,7 @@ def test_create_diagnostic_config_defaults(): test_config = Config("SDK_KEY") diag_config = _create_diagnostic_config_object(test_config) - assert len(diag_config) == 17 + assert len(diag_config) == 16 assert diag_config['customBaseURI'] is False assert diag_config['customEventsURI'] is False assert diag_config['customStreamURI'] is False @@ -57,7 +57,6 @@ def test_create_diagnostic_config_defaults(): assert diag_config['pollingIntervalMillis'] == 30000 assert diag_config['userKeysCapacity'] == 1000 assert diag_config['userKeysFlushIntervalMillis'] == 300000 - assert diag_config['inlineUsersInEvents'] is False assert diag_config['diagnosticRecordingIntervalMillis'] == 900000 assert diag_config['dataStoreType'] == 'memory' @@ -67,10 +66,10 @@ def test_create_diagnostic_config_custom(): events_max_pending=10, flush_interval=1, stream_uri='https://test.com', stream=False, poll_interval=60, use_ldd=True, feature_store=test_store, all_attributes_private=True, user_keys_capacity=10, user_keys_flush_interval=60, - inline_users_in_events=True, http=HTTPConfig(http_proxy = 'proxy', read_timeout=1, connect_timeout=1), diagnostic_recording_interval=60) + http=HTTPConfig(http_proxy = 'proxy', read_timeout=1, connect_timeout=1), diagnostic_recording_interval=60) diag_config = _create_diagnostic_config_object(test_config) - assert len(diag_config) == 17 + assert len(diag_config) == 16 assert diag_config['customBaseURI'] is True assert diag_config['customEventsURI'] is True assert diag_config['customStreamURI'] is True @@ -85,7 +84,6 @@ def test_create_diagnostic_config_custom(): assert diag_config['pollingIntervalMillis'] == 60000 assert diag_config['userKeysCapacity'] == 10 assert diag_config['userKeysFlushIntervalMillis'] == 60000 - assert diag_config['inlineUsersInEvents'] is True assert diag_config['diagnosticRecordingIntervalMillis'] == 60000 assert diag_config['dataStoreType'] == 'MyFavoriteStore' diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 363d980e..758f694f 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -160,58 +160,6 @@ def test_user_attrs_are_stringified_in_index_event(): check_feature_event(output[1], e, False, None, None) check_summary_event(output[2]) -def test_feature_event_can_contain_inline_user(): - with DefaultTestProcessor(inline_users_in_events = True) as ep: - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) - - output = flush_and_get_events(ep) - assert len(output) == 2 - check_feature_event(output[0], e, False, user, None) - check_summary_event(output[1]) - -def test_user_is_filtered_in_feature_event(): - with DefaultTestProcessor(inline_users_in_events = True, all_attributes_private = True) as ep: - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) - - output = flush_and_get_events(ep) - assert len(output) == 2 - check_feature_event(output[0], e, False, filtered_user, None) - check_summary_event(output[1]) - -def test_user_attrs_are_stringified_in_feature_event(): - with DefaultTestProcessor(inline_users_in_events = True) as ep: - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) - - output = flush_and_get_events(ep) - assert len(output) == 2 - check_feature_event(output[0], e, False, stringified_numeric_user, None) - check_summary_event(output[1]) - -def test_index_event_is_still_generated_if_inline_users_is_true_but_feature_event_is_not_tracked(): - with DefaultTestProcessor(inline_users_in_events = True) as ep: - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': False - } - ep.send_event(e) - - output = flush_and_get_events(ep) - assert len(output) == 2 - check_index_event(output[0], e, user) - check_summary_event(output[1]) - def test_two_events_for_same_user_only_produce_one_index_event(): with DefaultTestProcessor(user_keys_flush_interval = 300) as ep: e0 = { @@ -419,33 +367,6 @@ def test_custom_event_is_queued_with_user(): check_index_event(output[0], e, user) check_custom_event(output[1], e, None) -def test_custom_event_can_contain_inline_user(): - with DefaultTestProcessor(inline_users_in_events = True) as ep: - e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } - ep.send_event(e) - - output = flush_and_get_events(ep) - assert len(output) == 1 - check_custom_event(output[0], e, user) - -def test_user_is_filtered_in_custom_event(): - with DefaultTestProcessor(inline_users_in_events = True, all_attributes_private = True) as ep: - e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '} } - ep.send_event(e) - - output = flush_and_get_events(ep) - assert len(output) == 1 - check_custom_event(output[0], e, filtered_user) - -def test_user_attrs_are_stringified_in_custom_event(): - with DefaultTestProcessor(inline_users_in_events = True) as ep: - e = { 'kind': 'custom', 'key': 'eventkey', 'user': numeric_user, 'data': { 'thing': 'stuff '} } - ep.send_event(e) - - output = flush_and_get_events(ep) - assert len(output) == 1 - check_custom_event(output[0], e, stringified_numeric_user) - def test_nothing_is_sent_if_there_are_no_events(): with DefaultTestProcessor() as ep: ep.flush() From fb544cd7281e634b5b34a6a80a144fa700b9711a Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 09:36:20 -0800 Subject: [PATCH 298/356] (U2C 4) remove deprecated things (#192) --- docs/api-deprecated.rst | 12 -- docs/index.rst | 1 - ldclient/client.py | 2 +- ldclient/flag.py | 27 ---- ldclient/flags_state.py | 8 -- ldclient/impl/evaluator.py | 2 +- ldclient/integrations/__init__.py | 28 +--- ldclient/interfaces.py | 4 +- ldclient/repeating_timer.py | 16 --- ldclient/sse_client.py | 213 ---------------------------- testing/test_flags_state.py | 2 +- testing/test_ldclient_evaluation.py | 3 +- 12 files changed, 8 insertions(+), 310 deletions(-) delete mode 100644 docs/api-deprecated.rst delete mode 100644 ldclient/flag.py delete mode 100644 ldclient/flags_state.py delete mode 100644 ldclient/repeating_timer.py delete mode 100644 ldclient/sse_client.py diff --git a/docs/api-deprecated.rst b/docs/api-deprecated.rst deleted file mode 100644 index 4b24254b..00000000 --- a/docs/api-deprecated.rst +++ /dev/null @@ -1,12 +0,0 @@ -Deprecated modules -=============================== - -ldclient.flag module --------------------- - -This module is deprecated. For the :class:`~ldclient.evaluation.EvaluationDetail` type, please use :mod:`ldclient.evaluation`. - -ldclient.flags_state module ---------------------------- - -This module is deprecated. For the :class:`~ldclient.evaluation.FeatureFlagsState` type, please use :mod:`ldclient.evaluation`. diff --git a/docs/index.rst b/docs/index.rst index aa03075e..8c601890 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -20,4 +20,3 @@ For more information, see LaunchDarkly's `Quickstart Any: def variation_detail(self, key: str, user: dict, default: Any) -> EvaluationDetail: """Determines the variation of a feature flag for a user, like :func:`variation()`, but also provides additional information about how this value was calculated, in the form of an - :class:`ldclient.flag.EvaluationDetail` object. + :class:`ldclient.evaluation.EvaluationDetail` object. Calling this method also causes the "reason" data to be included in analytics events, if you are capturing detailed event data for this flag. diff --git a/ldclient/flag.py b/ldclient/flag.py deleted file mode 100644 index 67dfa838..00000000 --- a/ldclient/flag.py +++ /dev/null @@ -1,27 +0,0 @@ - -# This module exists only for historical reasons. Previously, ldclient.flag contained a -# combination of public API types (EvaluationDetail) and implementation details (the evaluate() -# function, etc.). Our new convention is to keep all such implementation details within -# ldclient.impl and its submodules, to make it clear that applications should never try to -# reference them directly. Since some application code may have done so in the past, and since -# we do not want to move anything in the public API yet, we are retaining this module as a -# deprecated entry point and re-exporting some symbols. -# -# In the future, ldclient.evaluation will be the preferred entry point for the public types and -# ldclient.flag will be removed. - -from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail -from ldclient.impl.evaluator import Evaluator, EvalResult, error_reason -from ldclient.versioned_data_kind import FEATURES, SEGMENTS - -# Deprecated internal function for evaluating flags. -def evaluate(flag, user, store, event_factory) -> EvalResult: - evaluator = Evaluator( - lambda key: store.get(FEATURES, key), - lambda key: store.get(SEGMENTS, key), - lambda key: (None, BigSegmentsStatus.NOT_CONFIGURED) - ) - return evaluator.evaluate(flag, user, event_factory) - - -__all__ = ['EvaluationDetail', 'evaluate', 'error_reason', 'EvalResult'] diff --git a/ldclient/flags_state.py b/ldclient/flags_state.py deleted file mode 100644 index 4701031e..00000000 --- a/ldclient/flags_state.py +++ /dev/null @@ -1,8 +0,0 @@ - -# This module exists only for historical reasons. It only contained the FeatureFlagsState class, -# which is now in the ldclient.evaluation module. We are retaining this module as a deprecated -# entry point and re-exporting the class from ldclient.evaluation. -# -# In the future, ldclient.evaluation will be the preferred entry point and ldclient.flags_state -# will be removed. -from ldclient.evaluation import FeatureFlagsState diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 0fa9f088..4e4cc46f 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -9,7 +9,7 @@ from typing import Callable, Optional, Tuple # For consistency with past logging behavior, we are pretending that the evaluation logic still lives in -# the ldclient.flag module. +# the ldclient.evaluation module. log = logging.getLogger('ldclient.flag') __LONG_SCALE__ = float(0xFFFFFFFFFFFFFFF) diff --git a/ldclient/integrations/__init__.py b/ldclient/integrations/__init__.py index 93d9a3eb..de2b10f8 100644 --- a/ldclient/integrations/__init__.py +++ b/ldclient/integrations/__init__.py @@ -161,24 +161,12 @@ def new_feature_store(url: str='redis://localhost:6379/0', :param url: the URL of the Redis host; defaults to ``DEFAULT_URL`` :param prefix: a namespace prefix to be prepended to all Redis keys; defaults to ``DEFAULT_PREFIX`` - :param max_connections: the maximum number of Redis connections to keep in the - connection pool; defaults to ``DEFAULT_MAX_CONNECTIONS``. This - parameter will later be dropped in favor of setting - redis_opts['max_connections'] :param caching: specifies whether local caching should be enabled and if so, sets the cache properties; defaults to :func:`ldclient.feature_store.CacheConfig.default()` :param redis_opts: extra options for initializing Redis connection from the url, - see `redis.connection.ConnectionPool.from_url` for more details. Note that - if you set max_connections, this will take precedence over the - deprecated max_connections parameter. + see `redis.connection.ConnectionPool.from_url` for more details. """ - # WARN(deprecated): Remove the max_connection parameter from - # this signature and clean up this bit of code. - if 'max_connections' not in redis_opts: - redis_opts = redis_opts.copy() - redis_opts['max_connections'] = max_connections - core = _RedisFeatureStoreCore(url, prefix, redis_opts) wrapper = CachingStoreWrapper(core, caching) wrapper._core = core # exposed for testing @@ -208,22 +196,10 @@ def new_big_segment_store(url: str='redis://localhost:6379/0', :param url: the URL of the Redis host; defaults to ``DEFAULT_URL`` :param prefix: a namespace prefix to be prepended to all Redis keys; defaults to ``DEFAULT_PREFIX`` - :param max_connections: the maximum number of Redis connections to keep in the - connection pool; defaults to ``DEFAULT_MAX_CONNECTIONS``. This - parameter will later be dropped in favor of setting - redis_opts['max_connections'] :param redis_opts: extra options for initializing Redis connection from the url, - see `redis.connection.ConnectionPool.from_url` for more details. Note that - if you set max_connections, this will take precedence over the - deprecated max_connections parameter. + see `redis.connection.ConnectionPool.from_url` for more details. """ - # WARN(deprecated): Remove the max_connection parameter from - # this signature and clean up this bit of code. - if 'max_connections' not in redis_opts: - redis_opts = redis_opts.copy() - redis_opts['max_connections'] = max_connections - return _RedisBigSegmentStore(url, prefix, redis_opts) class Files: diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index a863319f..84fe92d7 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -341,7 +341,7 @@ def available(self) -> bool: If this property is False, the store is not able to make queries (for instance, it may not have a valid database connection). In this case, the SDK will treat any reference to a Big Segment - as if no users are included in that segment. Also, the :func:`ldclient.flag.EvaluationDetail.reason` + as if no users are included in that segment. Also, the :func:`ldclient.evaluation.EvaluationDetail.reason` associated with with any flag evaluation that references a Big Segment when the store is not available will have a `bigSegmentsStatus` of `"STORE_ERROR"`. """ @@ -356,7 +356,7 @@ def stale(self) -> bool: This may indicate that the LaunchDarkly Relay Proxy, which populates the store, has stopped running or has become unable to receive fresh data from LaunchDarkly. Any feature flag evaluations that reference a Big Segment will be using the last known data, which may be out - of date. Also, the :func:`ldclient.flag.EvaluationDetail.reason` associated with those evaluations + of date. Also, the :func:`ldclient.evaluation.EvaluationDetail.reason` associated with those evaluations will have a `bigSegmentsStatus` of `"STALE"`. """ return self.__stale diff --git a/ldclient/repeating_timer.py b/ldclient/repeating_timer.py deleted file mode 100644 index 1f160c63..00000000 --- a/ldclient/repeating_timer.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -Internal helper class for repeating tasks. -""" -# currently excluded from documentation - see docs/README.md - -from ldclient.impl.repeating_task import RepeatingTask - -class RepeatingTimer(RepeatingTask): - """ - Deprecated internal class, retained until the next major version in case any application code was - referencing it. This was used in situations where we did not want the callback to execute - immediately, but to always wait for the interval first, so we are setting both the interval - parameter and the initial_delay parameter of RepeatingTask to the same value. - """ - def __init__(self, interval, callable): - super().init(self, interval, interval, callable) diff --git a/ldclient/sse_client.py b/ldclient/sse_client.py deleted file mode 100644 index 80dea242..00000000 --- a/ldclient/sse_client.py +++ /dev/null @@ -1,213 +0,0 @@ -# -# This deprecated implementation was based on: -# https://bitbucket.org/btubbs/sseclient/src/a47a380a3d7182a205c0f1d5eb470013ce796b4d/sseclient.py?at=default&fileviewer=file-view-default -# -# It has the following known issues: -# - It does not properly handle line terminators other than \n. -# - It does not properly handle multi-line data that starts with a blank line. -# - It fails if a multi-byte character is split across chunks of the stream. -# -# It is replaced by the ldclient.impl.sse module. -# -import re -import time - -import urllib3 - -from ldclient.config import HTTPConfig -from ldclient.impl.http import HTTPFactory -from ldclient.util import log -from ldclient.util import throw_if_unsuccessful_response - -# Technically, we should support streams that mix line endings. This regex, -# however, assumes that a system will provide consistent line endings. -end_of_field = re.compile(r'\r\n\r\n|\r\r|\n\n') - - -class SSEClient: - """ - This class is deprecated and no longer used in the SDK. It is retained here for backward compatibility in case - any external code was referencing it, but it will be removed in a future major version. - """ - def __init__(self, url, last_id=None, retry=3000, connect_timeout=10, read_timeout=300, chunk_size=10000, - verify_ssl=False, http=None, http_proxy=None, http_factory=None, **kwargs): - self.url = url - self.last_id = last_id - self.retry = retry - self._chunk_size = chunk_size - - if http_factory: - self._timeout = http_factory.timeout - base_headers = http_factory.base_headers - else: - # for backward compatibility in case anyone else is using this class - self._timeout = urllib3.Timeout(connect=connect_timeout, read=read_timeout) - base_headers = {} - - # Optional support for passing in an HTTP client - if http: - self.http = http - else: - hf = http_factory - if hf is None: # build from individual parameters which we're only retaining for backward compatibility - hc = HTTPConfig( - connect_timeout=connect_timeout, - read_timeout=read_timeout, - disable_ssl_verification=not verify_ssl, - http_proxy=http_proxy - ) - hf = HTTPFactory({}, hc) - self.http = hf.create_pool_manager(1, url) - - # Any extra kwargs will be fed into the request call later. - self.requests_kwargs = kwargs - - # The SSE spec requires making requests with Cache-Control: nocache - if 'headers' not in self.requests_kwargs: - self.requests_kwargs['headers'] = {} - - self.requests_kwargs['headers'].update(base_headers) - - self.requests_kwargs['headers']['Cache-Control'] = 'no-cache' - - # The 'Accept' header is not required, but explicit > implicit - self.requests_kwargs['headers']['Accept'] = 'text/event-stream' - - # Keep data here as it streams in - self.buf = u'' - - self._connect() - - def _connect(self): - if self.last_id: - self.requests_kwargs['headers']['Last-Event-ID'] = self.last_id - - # Use session if set. Otherwise fall back to requests module. - self.resp = self.http.request( - 'GET', - self.url, - timeout=self._timeout, - preload_content=False, - retries=0, # caller is responsible for implementing appropriate retry semantics, e.g. backoff - **self.requests_kwargs) - - # Raw readlines doesn't work because we may be missing newline characters until the next chunk - # For some reason, we also need to specify a chunk size because stream=True doesn't seem to guarantee - # that we get the newlines in a timeline manner - self.resp_file = self.resp.stream(amt=self._chunk_size) - - # TODO: Ensure we're handling redirects. Might also stick the 'origin' - # attribute on Events like the Javascript spec requires. - throw_if_unsuccessful_response(self.resp) - - def _event_complete(self): - return re.search(end_of_field, self.buf[len(self.buf)-self._chunk_size-10:]) is not None # Just search the last chunk plus a bit - - def __iter__(self): - return self - - def __next__(self): - while not self._event_complete(): - try: - nextline = next(self.resp_file) - # There are some bad cases where we don't always get a line: https://github.com/requests/requests/pull/2431 - if not nextline: - raise EOFError() - self.buf += nextline.decode("utf-8") - except (StopIteration, EOFError) as e: - if self.retry: - # This retry logic is not what we want in the SDK. It's retained here for backward compatibility in case - # anyone else is using SSEClient. - time.sleep(self.retry / 1000.0) - self._connect() - - # The SSE spec only supports resuming from a whole message, so - # if we have half a message we should throw it out. - head, sep, tail = self.buf.rpartition('\n') - self.buf = head + sep - continue - else: - raise - - split = re.split(end_of_field, self.buf) - head = split[0] - tail = "".join(split[1:]) - - self.buf = tail - msg = Event.parse(head) - - # If the server requests a specific retry delay, we need to honor it. - if msg.retry: - self.retry = msg.retry - - # last_id should only be set if included in the message. It's not - # forgotten if a message omits it. - if msg.id: - self.last_id = msg.id - - return msg - - -class Event: - - sse_line_pattern = re.compile('(?P[^:]*):?( ?(?P.*))?') - - def __init__(self, data='', event='message', id=None, retry=None): - self.data = data - self.event = event - self.id = id - self.retry = retry - - def dump(self): - lines = [] - if self.id: - lines.append('id: %s' % self.id) - - # Only include an event line if it's not the default already. - if self.event != 'message': - lines.append('event: %s' % self.event) - - if self.retry: - lines.append('retry: %s' % self.retry) - - lines.extend('data: %s' % d for d in self.data.split('\n')) - return '\n'.join(lines) + '\n\n' - - @classmethod - def parse(cls, raw): - """ - Given a possibly-multiline string representing an SSE message, parse it - and return a Event object. - """ - msg = cls() - for line in raw.split('\n'): - m = cls.sse_line_pattern.match(line) - if m is None: - # Malformed line. Discard but warn. - log.warning('Invalid SSE line: "%s"' % line) - continue - - name = m.groupdict()['name'] - value = m.groupdict()['value'] - if name == '': - # line began with a ":", so is a comment. Ignore - continue - - if name == 'data': - # If we already have some data, then join to it with a newline. - # Else this is it. - if msg.data: - msg.data = '%s\n%s' % (msg.data, value) - else: - msg.data = value - elif name == 'event': - msg.event = value - elif name == 'id': - msg.id = value - elif name == 'retry': - msg.retry = int(value) - - return msg - - def __str__(self): - return self.data diff --git a/testing/test_flags_state.py b/testing/test_flags_state.py index 1acdbaf8..5a9f43b2 100644 --- a/testing/test_flags_state.py +++ b/testing/test_flags_state.py @@ -1,7 +1,7 @@ import pytest import json import jsonpickle -from ldclient.flags_state import FeatureFlagsState +from ldclient.evaluation import FeatureFlagsState def test_can_get_flag_value(): state = FeatureFlagsState(True) diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index faa3f5b6..6d3c0edf 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -3,9 +3,8 @@ import time from ldclient.client import LDClient, Config from ldclient.config import BigSegmentsConfig -from ldclient.evaluation import BigSegmentsStatus +from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail from ldclient.feature_store import InMemoryFeatureStore -from ldclient.flag import EvaluationDetail from ldclient.impl.big_segments import _hash_for_user_key from ldclient.impl.evaluator import _make_big_segment_ref from ldclient.interfaces import FeatureStore From a9525e001df6dab7b3ce9f2e2d2219fe6485e176 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 09:37:04 -0800 Subject: [PATCH 299/356] remove warn-level logging done for every Big Segments query (#190) * remove warn-level logging done for every Big Segments query * skip tests that use a self-signed TLS cert in Python 3.7 --- ldclient/impl/big_segments.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index b6a013d3..bcd6e2b8 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -81,7 +81,6 @@ def get_user_membership(self, user_key: str) -> Tuple[Optional[dict], str]: membership = self.__cache.get(user_key) if membership is None: user_hash = _hash_for_user_key(user_key) - log.warn("*** querying Big Segments for user hash: %s" % user_hash) try: membership = self.__store.get_membership(user_hash) if membership is None: From e9daa494fda84954fe2b158df2166ef0e303540b Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 12:03:38 -0800 Subject: [PATCH 300/356] implement context model --- ldclient/__init__.py | 10 + ldclient/context.py | 930 ++++++++++++++++++++++++++++++++++++++++ testing/test_context.py | 281 ++++++++++++ 3 files changed, 1221 insertions(+) create mode 100644 ldclient/context.py create mode 100644 testing/test_context.py diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 13f31802..6468033f 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -5,6 +5,7 @@ from ldclient.rwlock import ReadWriteLock from ldclient.version import VERSION from .client import * +from .context import * from .util import log __version__ = VERSION @@ -95,3 +96,12 @@ def _reset_client(): __BASE_TYPES__ = (str, float, int, bool) + + +__all__ = [ + 'Config', + 'Context', + 'ContextBuilder', + 'ContextMultiBuilder', + 'LDClient' +] diff --git a/ldclient/context.py b/ldclient/context.py new file mode 100644 index 00000000..f5db344f --- /dev/null +++ b/ldclient/context.py @@ -0,0 +1,930 @@ +""" +This submodule implements the SDK's evaluation context model. +""" + +from __future__ import annotations +from collections.abc import Iterable +import json +import re +from typing import Any, Optional, Union + + +__VALID_KIND_REGEX = re.compile('^[-a-zA-Z0-9._]+$') + +def _escape_key_for_fully_qualified_key(key: str) -> str: + # When building a fully-qualified key, ':' and '%' are percent-escaped; we do not use a full + # URL-encoding function because implementations of this are inconsistent across platforms. + return key.replace('%', '%25').replace(':', '%3A') + +def _validate_kind(kind: str) -> Optional[str]: + if kind == 'kind': + return '"kind" is not a valid context kind' + if kind == 'multi': + return 'context of kind "multi" must be created with create_multi or multi_builder' + if not __VALID_KIND_REGEX.match(kind): + return 'context kind contains disallowed characters' + return None + + +class Context: + """ + A collection of attributes that can be referenced in flag evaluations and analytics events. + This entity is also called an "evaluation context." + + To create a Context of a single kind, such as a user, you may use :func:`create()` when only the + key and the kind are relevant; or, to specify other attributes, use :func:`builder()`. + + To create a Context with multiple kinds (a multi-context), use :func:`create_multi()` or + :func:`multi_builder()`. + + A Context can be in an error state if it was built with invalid attributes. See :func:`valid()` + and :func:`error()`. + + A Context is immutable once created. + """ + + DEFAULT_KIND = 'user' + """A constant for the default context kind of "user".""" + + MULTI_KIND = 'multi' + """A constant for the kind that all multi-contexts have.""" + + def __init__( + self, + kind: Optional[str], + key: str, + name: Optional[str] = None, + anonymous: bool = False, + attributes: Optional[dict] = None, + private_attributes: Optional[list[str]] = None, + multi_contexts: Optional[list[Context]] = None, + allow_empty_key: bool = False, + error: Optional[str] = None + ): + """ + Constructs an instance, setting all properties. Avoid using this constructor directly. + + Applications should not normally use this constructor; the intended pattern is to use + factory methods or builders. Calling this constructor directly may result in some context + validation being skipped. + """ + if error is not None: + self.__make_invalid(error) + return + if multi_contexts is not None: + if len(multi_contexts) == 0: + self.__make_invalid('multi-context must contain at least one kind') + return + # Sort them by kind; they need to be sorted for computing a fully-qualified key, but even + # if fully_qualified_key is never used, this is helpful for __eq__ and determinacy. + multi_contexts = sorted(multi_contexts, key=lambda c: c.kind) + last_kind = None + errors = None # type: Optional[list[str]] + full_key = '' + for c in multi_contexts: + if c.error is not None: + if errors is None: + errors = [] + errors.append(c.error) + continue + if c.kind == last_kind: + self.__make_invalid('multi-kind context cannot have same kind more than once') + return + last_kind = c.kind + if full_key != '': + full_key += ':' + full_key += c.kind + ':' + _escape_key_for_fully_qualified_key(c.key) + if errors: + self.__make_invalid(', '.join(errors)) + return + self.__kind = 'multi' + self.__multi = multi_contexts # type: Optional[list[Context]] + self.__key = '' + self.__name = None + self.__anonymous = False + self.__attributes = None + self.__private = None + self.__full_key = full_key + self.__error = None # type: Optional[str] + return + if kind is None or kind == '': + kind = Context.DEFAULT_KIND + kind_error = _validate_kind(kind) + if kind_error: + self.__make_invalid(kind_error) + return + if key == '' and not allow_empty_key: + self.__make_invalid('context key must not be null or empty') + return + self.__key = key + self.__kind = kind + self.__name = name + self.__anonymous = anonymous + self.__attributes = attributes + self.__private = private_attributes + self.__multi = None + self.__full_key = key if kind == Context.DEFAULT_KIND else \ + '%s:%s' % (kind, _escape_key_for_fully_qualified_key(key)) + self.__error = None + + @classmethod + def create(cls, key: str, kind: Optional[str] = None) -> Context: + """ + Creates a single-kind Context with only the key and the kind specified. + + If you omit the kind, it defaults to "user" (:const:`DEFAULT_KIND1). + + :param key: the context key + :param kind: the context kind; if omitted, it is :const:`DEFAULT_KIND` ("user") + :return: a context + """ + return Context(kind, key, None, False, None, None, None, False) + + @classmethod + def create_multi(cls, *contexts: Context) -> Context: + """ + Creates a multi-context out of the specified single-kind Contexts. + + To create a Context for a single context kind, use :func:`create()` or + :func:`builder()`. + + For the returned Context to be valid, the contexts list must not be empty, and all of its + elements must be valid Contexts. Otherwise, the returned Context will be invalid as + reported by :func:`error()`. + + If only one context parameter is given, the method returns that same context. + + If a nested context is a multi-context, this is exactly equivalent to adding each of the + individual kinds from it separately. See :func:ldclient.context.ContextMultiBuilder.add()`. + + :param contexts: the individual contexts + :return: a multi-context + """ + return Context(None, '', None, False, None, None, list(contexts)) + + @classmethod + def from_dict(cls, props: dict) -> Context: + """ + Creates a Context from properties in a dictionary, corresponding to the JSON + representation of a context or a user. + + If the dictionary has a "kind" property, then it is interpreted as a context using + the LaunchDarkly JSON schema for contexts. If it does not have a "kind" property, it + is interpreted as a context with "user" kind using the somewhat different LaunchDarkly + JSON schema for users in older LaunchDarkly SDKs. + + :param props: the context/user properties + :return: a context + """ + if 'kind' not in props: + return Context.__from_dict_old_user(props) + kind = props['kind'] + if not isinstance(kind, str): + return Context.__create_with_schema_type_error('kind') + if kind == 'multi': + b = ContextMultiBuilder() + for k, v in props.items(): + if k != 'kind': + if not isinstance(v, dict): + return Context.__create_with_schema_type_error(k) + c = Context.__from_dict_single(v, k) + b.add(c) + return b.build() + return Context.__from_dict_single(props, props['kind']) + + @classmethod + def builder(cls, key: str) -> ContextBuilder: + """ + Creates a builder for building a Context. + + You may use :class:`ldclient.ContextBuilder` methods to set additional attributes and/or + change the context kind before calling :func:`ldclient.ContextBuilder.build()`. If you + do not change any values, the defaults for the LDContext are that its `kind` is :const:`DEFAULT_KIND`, + its `key` is set to the key parameter specified here, `anonymous` is False, and it has no values for + any other attributes. + + This method is for building a Context that has only a single kind. To define a multi-context, + use :func:`create_multi()` or :func:`multi_builder()`. + + :param key: the context key + :return: a new builder + """ + return ContextBuilder(key) + + @classmethod + def multi_builder(cls) -> ContextMultiBuilder: + """ + Creates a builder for building a multi-context. + + This method is for building a Context that contains multiple contexts, each for a different + context kind. To define a single context, use :func:`create()` or :func:`builder()` instead. + + The difference between this method and :func:`create_multi()` is simply that the builder + allows you to add contexts one at a time, if that is more convenient for your logic. + + :return: a new builder + """ + return ContextMultiBuilder() + + @property + def valid(self) -> bool: + """ + True for a valid Context, or False for an invalid one. + + A valid context is one that can be used in SDK operations. An invalid context is one that + is missing necessary attributes or has invalid attributes, indicating an incorrect usage + of the SDK API. The only ways for a context to be invalid are: + + * The `kind` property had a disallowed value. See :func:`kind()`. + * For a single context, the `key` property was null or empty. + * You tried to create a multi-context without specifying any contexts. + * You tried to create a multi-context using the same context kind more than once. + * You tried to create a multi-context where at least one of the individual Contexts was invalid. + + In any of these cases, `valid` will return false, and :func:`error()` will return a + description of the error. + + Since in normal usage it is easy for applications to be sure they are using context kinds + correctly, and because throwing an exception is undesirable in application code that uses + LaunchDarkly, the SDK stores the error state in the Context itself and checks for such + errors at the time the Context is used, such as in a flag evaluation. At that point, if + the context is invalid, the operation will fail in some well-defined way as described in + the documentation for that method, and the SDK will generally log a warning as well. But + in any situation where you are not sure if you have a valid Context, you can check + :func:`valid()` or :func:`error()`. + """ + return self.__error is None + + @property + def error(self) -> Optional[str]: + """ + Returns None for a valid Context, or an error message for an invalid one. + + If this is None, then :func:`valid()` is True. If it is not None, then :func:`valid()` is + False. + """ + return self.__error + + @property + def multiple(self) -> bool: + """ + True if this is a multi-context. + + If this value is True, then :func:`kind()` is guaranteed to be :const:`MULTI_KIND`, and + you can inspect the individual context for each kind with :func:`get_individual_context()`. + + If this value is False, then :func:`kind()` is guaranteed to return a value that is not + :const:`MULTI_KIND`. + """ + return self.__multi is not None + + @property + def kind(self) -> str: + """ + Returns the context's `kind` attribute. + + Every valid context has a non-empty kind. For multi-contexts, this value is + :const:`MULTI_KIND` and the kinds within the context can be inspected with + :func:`get_individual_context()`. + """ + return self.__kind + + @property + def key(self) -> str: + """ + Returns the context's `key` attribute. + + For a single context, this value is set by :func:`create`, or :func:`ldclient.ContextBuilder.key()`. + + For a multi-context, there is no single value and :func:`key()` returns an empty string. Use + :func:`get_individual_context()` to get the context for a particular kind, then call :func:`key()` + on it. + """ + return self.__key + + @property + def name(self) -> Optional[str]: + """ + Returns the context's `name` attribute. + + For a single context, this value is set by :func:`ldclient.ContextBuilder.name()`. It is + None if no value was set. + + For a multi-context, there is no single value and :func:`name()` returns null. Use + :func:`get_individual_context()` to get the context for a particular kind, then call :func:`name()` + on it. + """ + return self.__name + + @property + def anonymous(self) -> bool: + """ + Returns True if this context is only intended for flag evaluations and will not be + indexed by LaunchDarkly. + + The default value is False. False means that this Context represents an entity such as a + user that you want to be able to see on the LaunchDarkly dashboard. + + Setting `anonymous` to true excludes this context from the database that is + used by the dashboard. It does not exclude it from analytics event data, so it is + not the same as making attributes private; all non-private attributes will still be + included in events and data export. There is no limitation on what other attributes + may be included (so, for instance, `anonymous` does not mean there is no `name`), + and the context will still have whatever `key` you have given it. + + This value is also addressable in evaluations as the attribute name "anonymous". It + is always treated as a boolean true or false in evaluations. + """ + return self.__anonymous + + def get(self, attribute: str) -> Any: + """ + Looks up the value of any attribute of the context by name. + + For a single-kind context, the attribute name can be any custom attribute that was set + by :func:`ldclient.context.ContextBuilder.set()`. It can also be one of the built-in ones + like "kind", "key", or "name"; in such cases, it is equivalent to :func:`kind`, + :func:`key`, or :fund:`name`. + + For a multi-context, the only supported attribute name is "kind". Use + :func:`get_individual_context()` to get the context for a particular kind and then get + its attributes. + + If the value is found, the return value is the attribute value. If there is no such + attribute, the return value is None. An attribute that actually exists cannot have a + value of None. + + :param attribute: the desired attribute name + :return: the attribute value, or None if there is no such attribute + """ + if attribute == 'key': + return self.__key + if attribute == 'kind': + return self.__kind + if attribute == 'name': + return self.__name + if attribute == 'anonymous': + return self.__anonymous + if self.__attributes is None: + return None + return self.__attributes.get(attribute) + + @property + def individual_context_count(self) -> int: + """ + Returns the number of context kinds in this context. + + For a valid individual context, this returns 1. For a multi-context, it returns the number + of context kinds. For an invalid context, it returns zero. + + :return: the number of context kinds + """ + if self.__error is not None: + return 0 + if self.__multi is None: + return 1 + return len(self.__multi) + + def get_individual_context(self, kind: Union[int, str]) -> Optional[Context]: + """ + Returns the single-kind Context corresponding to one of the kinds in this context. + + The `kind` parameter can be either a number representing a zero-based index, or a string + representing a context kind. + + If this method is called on a single-kind Context, then the only allowable value for + `kind` is either zero or the same value as the Context's :func:`kind`, and the return + value on success is the same Context. + + If the method is called on a multi-context, and `kind` is a number, it must be a + non-negative index that is less than the number of kinds (that is, less than the return + value of :func:`individual_context_count`), and the return value on success is one of + the individual LDContexts within. Or, if `kind` is a string, it must match the context + kind of one of the individual contexts. + + If there is no context corresponding to `kind`, the method returns null. + + :param kind: the index or string value of a context kind + :return: the context corresponding to that index or kind, or null if none + """ + if self.__error is not None: + return None + if isinstance(kind, str): + if self.__multi is None: + return self if kind == self.__kind else None + for c in self.__multi: + if c.kind == kind: + return c + return None + if self.__multi is None: + return self if kind == 0 else None + if kind < 0 or kind >= len(self.__multi): + return None + return self.__multi[kind] + + @property + def custom_attributes(self) -> Iterable[str]: + """ + Gets the names of all non-built-in attributes that have been set in this context. + + For a single-kind context, this includes all the names that were passed to + :func:`ldclient.ContextBuilder.set()` as long as the values were not None (since a + value of None in LaunchDarkly is equivalent to the attribute not being set). + + For a multi-context, there are no such names. + + :return: an iterable + """ + return () if self.__attributes is None else self.__attributes + + @property + def private_attributes(self) -> Iterable[str]: + """ + Gets the list of all attribute references marked as private for this specific Context. + + This includes all attribute names/paths that were specified with + :func:`ldclient.ContextBuilder.private()`. + + :return: an iterable + """ + return () if self.__private is None else self.__private + + @property + def fully_qualified_key(self) -> str: + """ + A string that describes the Context uniquely based on `kind` and `key` values. + + This value is used whenever LaunchDarkly needs a string identifier based on all of the + `kind` and `key` values in the context. Applications typically do not need to use it. + """ + return self.__full_key + + def to_dict(self) -> dict[str, Any]: + """ + Returns a dictionary of properties corresponding to the JSON representation of the + context (as an associative array), in the standard format used by LaunchDarkly SDKs. + + Use this method if you are passing context data to the front end for use with the + LaunchDarkly JavaScript SDK. + + :return: a dictionary corresponding to the JSON representation + """ + if not self.valid: + return {} + ret = {"kind": self.__kind} # type: dict[str, Any] + if self.__multi is not None: + ret = {"kind": "multi"} + for c in self.__multi: + ret[c.kind] = c.__to_dict_single(False) + return ret + return self.__to_dict_single(True) + + def to_json_string(self) -> str: + """ + Returns the JSON representation of the context as a string, in the standard format + used by LaunchDarkly SDKs. + + This is equivalent to calling :func:`to_dict()` and then `json.dumps()`. + + :return: the JSON representation as a string + """ + return json.dumps(self.to_dict(), separators=(',', ':')) + + def __to_dict_single(self, with_kind: bool) -> dict[str, Any]: + ret = {"key": self.__key} # type: dict[str, Any] + if with_kind: + ret["kind"] = self.__kind + if self.__name is not None: + ret["name"] = self.__name + if self.__anonymous: + ret["anonymous"] = True + if self.__attributes is not None: + for k, v in self.__attributes.items(): + ret[k] = v + if self.__private is not None: + ret["_meta"] = {"privateAttributes": self.__private} + return ret + + @classmethod + def __from_dict_single(self, props: dict, kind: Optional[str]) -> Context: + b = ContextBuilder('') + if kind is not None: + b.kind(kind) + for k, v in props.items(): + if k == '_meta': + if not isinstance(v, dict): + return Context.__create_with_schema_type_error(k) + p = v.get("privateAttributes") + if p is not None: + if not isinstance(p, list): + return Context.__create_with_schema_type_error("privateAttributes") + for pa in p: + if not isinstance(pa, str): + return Context.__create_with_schema_type_error("privateAttributes") + b.private(pa) + else: + if not b.try_set(k, v): + return Context.__create_with_schema_type_error(k) + return b.build() + + @classmethod + def __from_dict_old_user(self, props: dict) -> Context: + b = ContextBuilder('').kind('user') + has_key = False + for k, v in props.items(): + if k == 'custom': + if not isinstance(v, dict): + return Context.__create_with_schema_type_error(k) + for k1, v1 in v.items(): + b.set(k1, v1) + elif k == 'privateAttributeNames': + if not isinstance(v, list): + return Context.__create_with_schema_type_error(k) + for pa in v: + if not isinstance(pa, str): + return Context.__create_with_schema_type_error(k) + b.private(pa) + else: + if not b.try_set(k, v): + return Context.__create_with_schema_type_error(k) + if k == 'key': + has_key = True + b._allow_empty_key(has_key) + return b.build() + + def __repr__(self) -> str: + """ + Returns a standard string representation of a context. + + For a valid Context, this is currently defined as being the same as the JSON representation, + since that is the simplest way to represent all of the Context properties. However, application + code should not rely on `__repr__` always being the same as the JSON representation. If you + specifically want the latter, use :func:`to_json_string()`. For an invalid Context, `__repr__` + returns a description of why it is invalid. + + :return: a string representation + """ + if not self.valid: + return "[invalid context: %s]" % self.__error + return self.to_json_string() + + def __eq__(self, other) -> bool: + """ + Compares contexts for deep equality of their attributes. + + :return: true if the Contexts are equal + """ + if not isinstance(other, Context): + return False + if self.__kind != other.__kind or self.__key != other.__key or self.__name != other.__name or \ + self.__anonymous != other.__anonymous or self.__attributes != other.__attributes or \ + self.__private != other.__private or self.__error != other.__error: + return False + # Note that it's OK to compare __attributes because Python does a deep-equality check for dicts, + # and it's OK to compare __private_attributes because we have canonicalized them by sorting. + if self.__multi is None: + return True # we already know the other context isn't a multi-context due to checking kind + if other.__multi is None or len(other.__multi) != len(self.__multi): + return False + for i in range(len(self.__multi)): + if other.__multi[i] != self.__multi[i]: + return False + return True + + def __ne__(self, other) -> bool: + return not self.__eq__(other) + + def __make_invalid(self, error: str): + self.__error = error + self.__kind = '' + self.__key = '' + self.__name = None + self.__anonymous = False + self.__attributes = None + self.__private = None + self.__multi = None + self.__full_key = '' + + @classmethod + def __create_with_error(cls, error: str) -> Context: + return Context('', '', None, False, None, None, None, False, error) + + @classmethod + def __create_with_schema_type_error(cls, propname: str) -> Context: + return Context.__create_with_error('invalid data type for "%s"' % propname) + + +class ContextBuilder: + """ + A mutable object that uses the builder pattern to specify properties for :class:`ldclient.Context`. + + Use this type if you need to construct a context that has only a single kind. To define a + multi-context, use :func:`ldclient.Context.create_multi()` or :func:`ldclient.Context.multi_builder()`. + + Obtain an instance of ContextBuilder by calling :func:`ldclient.Context.builder()`. Then, call + setter methods such as :func:`name()` or :func:`set()` to specify any additional attributes. Then, + call :func:`build()` to create the context. ContextBuilder setters return a reference to the same + builder, so calls can be chained: + :: + + context = Context.builder('user-key') \ + .name('my-name') \ + .set('country', 'us') \ + .build + + :param key: the context key + """ + def __init__(self, key: str): + self.__kind = Context.DEFAULT_KIND + self.__key = key + self.__name = None # type: Optional[str] + self.__anonymous = False + self.__attributes = None # type: Optional[dict[str, Any]] + self.__private = None # type: Optional[list[str]] + self.__allow_empty_key = False + self.__copy_on_write_attrs = False + self.__copy_on_write_private = False + + def build(self) -> Context: + """ + Creates a LDContext from the current builder properties. + + The Context is immutable and will not be affected by any subsequent actions on the builder. + + It is possible to specify invalid attributes for a ContextBuilder, such as an empty key. + Instead of throwing an exception, the ContextBuilder always returns an Context and you can + check :func:`ldclient.Context.valid()` or :func:`ldclient.Context.error()` to see if it has + an error. See :func:`ldclient.Context.valid()` for more information about invalid conditions. + If you pass an invalid LDContext to an SDK method, the SDK will detect this and will log a + description of the error. + + :return: a new :class:`ldclient.Context` + """ + self.__copy_on_write_attrs = (self.__attributes is not None) + self.__copy_on_write_private = (self.__private is not None) + return Context(self.__kind, self.__key, self.__name, self.__anonymous, self.__attributes, self.__private, + None, self.__allow_empty_key) + + def key(self, key: str) -> ContextBuilder: + """ + Sets the context's key attribute. + + Every context has a key, which is always a string. It cannot be an empty string, but + there are no other restrictions on its value. + + The key attribute can be referenced by flag rules, flag target lists, and segments. + + :param key: the context key + :return: the builder + """ + self.__key = key + return self + + def kind(self, kind: str) -> ContextBuilder: + """ + Sets the context's kind attribute. + + Every context has a kind. Setting it to an empty string or null is equivalent to + :const:`ldclient.context.DEFAULT_KIND` ("user"). This value is case-sensitive. + + The meaning of the context kind is completely up to the application. Validation rules are + as follows: + + * It may only contain letters, numbers, and the characters `.`, `_`, and `-`. + * It cannot equal the literal string "kind". + * For a single context, it cannot equal "multi". + + :param kind: the context kind + :return: the builder + """ + self.__kind = kind + return self + + def name(self, name: Optional[str]) -> ContextBuilder: + """ + Sets the context's name attribute. + + This attribute is optional. It has the following special rules: + + * Unlike most other attributes, it is always a string if it is specified. + * The LaunchDarkly dashboard treats this attribute as the preferred display name for + contexts. + + :param name: the context name (None to unset the attribute) + :return: the builder + """ + self.__name = name + return self + + def anonymous(self, anonymous: bool) -> ContextBuilder: + """ + Sets whether the context is only intended for flag evaluations and should not be + indexed by LaunchDarkly. + + The default value is False. False means that this Context represents an entity + such as a user that you want to be able to see on the LaunchDarkly dashboard. + + Setting `anonymous` to true excludes this context from the database that is + used by the dashboard. It does not exclude it from analytics event data, so it is + not the same as making attributes private; all non-private attributes will still be + included in events and data export. There is no limitation on what other attributes + may be included (so, for instance, `anonymous` does not mean there is no `name`), + and the context will still have whatever `key` you have given it. + + This value is also addressable in evaluations as the attribute name "anonymous". It + is always treated as a boolean true or false in evaluations. + + :param anonymous: true if the context should be excluded from the LaunchDarkly database + :return: the builder + """ + self.__anonymous = anonymous + return self + + def set(self, attribute: str, value: Any) -> ContextBuilder: + """ + Sets the value of any attribute for the context. + + This includes only attributes that are addressable in evaluations-- not metadata such + as :func:`private()`. If `attributeName` is `"private"`, you will be setting an attribute + with that name which you can use in evaluations or to record data for your own purposes, + but it will be unrelated to :func:`private()`. + + The allowable types for context attributes are equivalent to JSON types: boolean, number, + string, array (list), or object (dictionary). For all attribute names that do not have + special meaning to LaunchDarkly, you may use any of those types. Values of different JSON + types are always treated as different values: for instance, the number 1 is not the same + as the string "1". + + The following attribute names have special restrictions on their value types, and + any value of an unsupported type will be ignored (leaving the attribute unchanged): + + * `kind`, `key`: Must be a string. See :func:`kind()` and :func:`key()`. + * `name`: Must be a string or None. See :func:`name()`. + * `anonymous`: Must be a boolean. See :func:`anonymous()`. + + The attribute name "_meta" is not allowed, because it has special meaning in the + JSON schema for contexts; any attempt to set an attribute with this name has no + effect. + + Values that are JSON arrays or objects have special behavior when referenced in + flag/segment rules. + + A value of None is equivalent to removing any current non-default value of the + attribute. Null/None is not a valid attribute value in the LaunchDarkly model; any + expressions in feature flags that reference an attribute with a null value will + behave as if the attribute did not exist. + + :param attribute: the attribute name to set + :param value: the value to set + :return: the builder + """ + self.try_set(attribute, value) + return self + + def try_set(self, attribute: str, value: Any) -> bool: + """ + Same as :func:`set()`, but returns a boolean indicating whether the attribute was + successfully set. + + :param attribute: the attribute name to set + :param value: the value to set + :return: True if successful; False if the name was invalid or the value was not an + allowed type for that attribute + """ + if attribute == '' or attribute == '_meta': + return False + if attribute == 'key': + if isinstance(value, str): + self.__key = value + return True + return False + if attribute == 'kind': + if isinstance(value, str): + self.__kind = value + return True + return False + if attribute == 'name': + if value is None or isinstance(value, str): + self.__name = value + return True + return False + if attribute == 'anonymous': + if isinstance(value, bool): + self.__anonymous = value + return True + return False + if self.__copy_on_write_attrs: + self.__copy_on_write_attrs = False + self.__attributes = self.__attributes and self.__attributes.copy() + if self.__attributes is None: + self.__attributes = {} + self.__attributes[attribute] = value + return True + + def private(self, *attributes: str) -> ContextBuilder: + """ + Designates any number of Context attributes, or properties within them, as private: that is, + their values will not be sent to LaunchDarkly. + + Each parameter can be either a simple attribute name, or a slash-delimited path referring to + a JSON object property within an attribute. + + :param attributes: attribute names or references to mark as private + :return: the builder + """ + if len(attributes) != 0: + if self.__copy_on_write_private: + self.__copy_on_write_private = False + self.__private = self.__private and self.__private.copy() + if self.__private is None: + self.__private = [] + self.__private.extend(attributes) + return self + + def _allow_empty_key(self, allow: bool): + # This is used internally in Context.__from_dict_old_user to support old-style users with an + # empty key, which was allowed in the user model. + self.__allow_empty_key = allow + + +class ContextMultiBuilder: + """ + A mutable object that uses the builder pattern to specify properties for a multi-context. + + Use this builder if you need to construct a :class:`ldclient.Context` that contains multiple contexts, + each for a different context kind. To define a regular context for a single kind, use + :func:`ldclient.Context.create()` or :func:`ldclient.Context.builder()`. + + Obtain an instance of ContextMultiBuilder by calling :func:`ldclient.Context.multi_builder()`; + then, call :func:`add()` to specify the individual context for each kind. The method returns a + reference to the same builder, so calls can be chained: + :: + + context = Context.multi_builder() \ + .add(Context.new("my-user-key")) \ + .add(Context.new("my-org-key", "organization")) \ + .build + """ + def __init__(self): + self.__contexts = [] # type: list[Context] + self.__copy_on_write = False + + def build(self) -> Context: + """ + Creates a Context from the current builder properties. + + The Context is immutable and will not be affected by any subsequent actions on the builder. + + It is possible for a ContextMultiBuilder to represent an invalid state. Instead of throwing + an exception, the ContextMultiBuilder always returns a Context, and you can check + :func:`ldclient.Context.valid()` or :func:`ldclient.Context.error()` to see if it has an + error. See :func:`ldclient.Context.valid()` for more information about invalid context + conditions. If you pass an invalid context to an SDK method, the SDK will detect this and + will log a description of the error. + + If only one context was added to the builder, this method returns that context rather + than a multi-context. + + :return: a new Context + """ + if len(self.__contexts) == 1: + return self.__contexts[0] # multi-context with only one context is the same as just that context + self.__copy_on_write = True + # Context constructor will handle validation + return Context(None, '', None, False, None, None, self.__contexts) + + def add(self, context: Context) -> ContextMultiBuilder: + """ + Adds an individual Context for a specific kind to the builer. + + It is invalid to add more than one Context for the same kind, or to add an LContext + that is itself invalid. This error is detected when you call :func:`build()`. + + If the nested context is a multi-context, this is exactly equivalent to adding each of the + individual contexts from it separately. For instance, in the following example, `multi1` and + `multi2` end up being exactly the same: + :: + + c1 = Context.new("key1", "kind1") + c2 = Context.new("key2", "kind2") + c3 = Context.new("key3", "kind3") + + multi1 = Context.multi_builder().add(c1).add(c2).add(c3).build() + + c1plus2 = Context.multi_builder.add(c1).add(c2).build() + multi2 = Context.multi_builder().add(c1plus2).add(c3).build() + + :param context: the context to add + :return: the builder + """ + if context.multiple: + for i in range(context.individual_context_count): + c = context.get_individual_context(i) + if c is not None: + self.add(c) + else: + if self.__copy_on_write: + self.__copy_on_write = False + self.__contexts = self.__contexts.copy() + self.__contexts.append(context) + return self diff --git a/testing/test_context.py b/testing/test_context.py new file mode 100644 index 00000000..fba31459 --- /dev/null +++ b/testing/test_context.py @@ -0,0 +1,281 @@ +from ldclient.context import Context + +import json +import pytest + + +def assert_context_valid(c): + assert c.valid is True + assert c.error is None + +def assert_context_invalid(c): + assert c.valid is False + assert c.error is not None + + +class TestContext: + def test_create_default_kind(self): + c = Context.create('a') + assert_context_valid(c) + assert c.multiple is False + assert c.key == 'a' + assert c.kind == 'user' + assert c.name is None + assert c.anonymous is False + assert list(c.custom_attributes) == [] + + def test_create_non_default_kind(self): + c = Context.create('a', 'b') + assert_context_valid(c) + assert c.multiple is False + assert c.key == 'a' + assert c.kind == 'b' + assert c.name is None + assert c.anonymous is False + assert list(c.custom_attributes) == [] + + def test_builder_default_kind(self): + c = Context.builder('a').build() + assert_context_valid(c) + assert c.multiple is False + assert c.key == 'a' + assert c.kind == 'user' + assert c.name is None + assert c.anonymous is False + assert list(c.custom_attributes) == [] + + def test_builder_non_default_kind(self): + c = Context.builder('a').kind('b').build() + assert_context_valid(c) + assert c.multiple is False + assert c.key == 'a' + assert c.kind == 'b' + assert c.name is None + assert c.anonymous is False + assert list(c.custom_attributes) == [] + + def test_name(self): + c = Context.builder('a').name('b').build() + assert_context_valid(c) + assert c.key == 'a' + assert c.name == 'b' + assert list(c.custom_attributes) == [] + + def test_anonymous(self): + c = Context.builder('a').anonymous(True).build() + assert_context_valid(c) + assert c.key == 'a' + assert c.anonymous + assert list(c.custom_attributes) == [] + + def test_custom_attributes(self): + c = Context.builder('a').set('b', True).set('c', 'd').build() + assert_context_valid(c) + assert c.key == 'a' + assert c.get('b') is True + assert c.get('c') == 'd' + assert sorted(list(c.custom_attributes)) == ['b', 'c'] + + def test_set_built_in_attribute_by_name(self): + c = Context.builder('').set('key', 'a').set('kind', 'b').set('name', 'c').set('anonymous', True).build() + assert_context_valid(c) + assert c.key == 'a' + assert c.kind == 'b' + assert c.name == 'c' + assert c.anonymous + + def test_set_built_in_attribute_by_name_type_checking(self): + b = Context.builder('a').kind('b').name('c').anonymous(True) + + assert b.try_set('key', None) is False + assert b.try_set('key', 3) is False + assert b.build().key == 'a' + + assert b.try_set('kind', None) is False + assert b.try_set('kind', 3) is False + assert b.build().kind == 'b' + + assert b.try_set('name', 3) is False + assert b.build().name == 'c' + + assert b.try_set('anonymous', None) is False + assert b.try_set('anonymous', 3) is False + assert b.build().anonymous is True + + def test_get_built_in_attribute_by_name(self): + c = Context.builder('a').kind('b').name('c').anonymous(True).build() + assert c.get('key') == 'a' + assert c.get('kind') == 'b' + assert c.get('name') == 'c' + assert c.get('anonymous') is True + + def test_get_unknown_attribute(self): + c = Context.create('a') + assert c.get('b') is None + + def test_private_attributes(self): + assert list(Context.create('a').private_attributes) == [] + + c = Context.builder('a').private('b', '/c/d').private('e').build() + assert list(c.private_attributes) == ['b', '/c/d', 'e'] + + def test_fully_qualified_key(self): + assert Context.create('key1').fully_qualified_key == 'key1' + assert Context.create('key1', 'kind1').fully_qualified_key == 'kind1:key1' + assert Context.create('key%with:things', 'kind1').fully_qualified_key == 'kind1:key%25with%3Athings' + + def test_equality(self): + def _assert_contexts_from_factory_equal(fn): + c1, c2 = fn(), fn() + assert c1 == c2 + _assert_contexts_from_factory_equal(lambda: Context.create('a')) + _assert_contexts_from_factory_equal(lambda: Context.create('a', 'kind1')) + _assert_contexts_from_factory_equal(lambda: Context.builder('a').name('b').build()) + _assert_contexts_from_factory_equal(lambda: Context.builder('a').anonymous(True).build()) + _assert_contexts_from_factory_equal(lambda: Context.builder('a').set('b', True).set('c', 3).build()) + assert Context.builder('a').set('b', True).set('c', 3).build() == \ + Context.builder('a').set('c', 3).set('b', True).build() # order doesn't matter + + assert Context.create('a', 'kind1') != Context.create('b', 'kind1') + assert Context.create('a', 'kind1') != Context.create('a', 'kind2') + assert Context.builder('a').name('b').build() != Context.builder('a').name('c').build() + assert Context.builder('a').anonymous(True).build() != Context.builder('a').build() + assert Context.builder('a').set('b', True).build() != Context.builder('a').set('b', False).build() + assert Context.builder('a').set('b', True).build() != \ + Context.builder('a').set('b', True).set('c', False).build() + + _assert_contexts_from_factory_equal(lambda: \ + Context.create_multi(Context.create('a', 'kind1'), Context.create('b', 'kind2'))) + assert Context.create_multi(Context.create('a', 'kind1'), Context.create('b', 'kind2')) == \ + Context.create_multi(Context.create('b', 'kind2'), Context.create('a', 'kind1')) # order doesn't matter + + assert Context.create_multi(Context.create('a', 'kind1'), Context.create('b', 'kind2')) != \ + Context.create_multi(Context.create('a', 'kind1'), Context.create('c', 'kind2')) + assert Context.create_multi(Context.create('a', 'kind1'), Context.create('b', 'kind2'), Context.create('c', 'kind3')) != \ + Context.create_multi(Context.create('a', 'kind1'), Context.create('b', 'kind2')) + assert Context.create_multi(Context.create('a', 'kind1'), Context.create('b', 'kind2')) != \ + Context.create('a', 'kind1') + + _assert_contexts_from_factory_equal(lambda: Context.create('invalid', 'kind')) + assert Context.create('invalid', 'kind') != Context.create_multi() # different errors + + def test_json_encoding(self): + assert Context.create('a', 'kind1').to_dict() == {'kind': 'kind1', 'key': 'a'} + assert Context.builder('a').kind('kind1').name('b').build().to_dict() == \ + {'kind': 'kind1', 'key': 'a', 'name': 'b'} + assert Context.builder('a').kind('kind1').anonymous(True).build().to_dict() == \ + {'kind': 'kind1', 'key': 'a', 'anonymous': True} + assert Context.builder('a').kind('kind1').set('b', True).set('c', 3).build().to_dict() == \ + {'kind': 'kind1', 'key': 'a', 'b': True, 'c': 3} + assert Context.builder('a').kind('kind1').private('b').build().to_dict() == \ + {'kind': 'kind1', 'key': 'a', '_meta': {'privateAttributes': ['b']}} + + assert Context.create_multi(Context.create('key1', 'kind1'), Context.create('key2', 'kind2')).to_dict() == \ + {'kind': 'multi', 'kind1': {'key': 'key1'}, 'kind2': {'key': 'key2'}} + + assert json.loads(Context.create('a', 'kind1').to_json_string()) == {'kind': 'kind1', 'key': 'a'} + + def test_json_decoding(self): + assert Context.from_dict({'kind': 'kind1', 'key': 'key1'}) == Context.create('key1', 'kind1') + assert Context.from_dict({'kind': 'kind1', 'key': 'key1', 'name': 'a'}) == \ + Context.builder('key1').kind('kind1').name('a').build() + assert Context.from_dict({'kind': 'kind1', 'key': 'key1', 'anonymous': True}) == \ + Context.builder('key1').kind('kind1').anonymous(True).build() + assert Context.from_dict({'kind': 'kind1', 'key': 'key1', '_meta': {'privateAttributes': ['b']}}) == \ + Context.builder('key1').kind('kind1').private('b').build() + + assert Context.from_dict({'kind': 'multi', 'kind1': {'key': 'key1'}, 'kind2': {'key': 'key2'}}) == \ + Context.create_multi(Context.create('key1', 'kind1'), Context.create('key2', 'kind2')) + + assert_context_invalid(Context.from_dict({'kind': 'kind1'})) + assert_context_invalid(Context.from_dict({'kind': 'kind1', 'key': 3})) + assert_context_invalid(Context.from_dict({'kind': 'multi'})) + assert_context_invalid(Context.from_dict({'kind': 'multi', 'kind1': 'x'})) + + def test_json_decoding_old_user(self): + assert Context.from_dict({'key': 'key1'}) == Context.create('key1', 'user') + assert Context.from_dict({'key': 'key1', 'name': 'b'}) == Context.builder('key1').name('b').build() + assert Context.from_dict({'key': 'key1', 'custom': {'b': True}}) == \ + Context.builder('key1').set('b', True).build() + + assert_context_valid(Context.from_dict({'key': ''})) + assert_context_invalid(Context.from_dict({})) + assert_context_invalid(Context.from_dict({'key': None})) + assert_context_invalid(Context.from_dict({'key': 3})) + assert_context_invalid(Context.from_dict({'key': 'a', 'name': 3})) + + +class TestContextMulti: + def test_create_multi(self): + c1 = Context.create('a', 'kind1') + c2 = Context.create('b', 'kind2') + mc = Context.create_multi(c1, c2) + + assert mc.valid + assert mc.multiple + assert mc.kind == 'multi' + assert mc.key == '' + assert mc.name is None + assert mc.anonymous is False + assert mc.individual_context_count == 2 + assert mc.get_individual_context(0) is c1 + assert mc.get_individual_context(1) is c2 + assert mc.get_individual_context(-1) is None + assert mc.get_individual_context(2) is None + + def test_multi_builder(self): + c1 = Context.create('a', 'kind1') + c2 = Context.create('b', 'kind2') + mc = Context.multi_builder().add(c1).add(c2).build() + assert mc == Context.create_multi(c1, c2) + + def test_multi_builder_flattens_nested_multi_context(self): + c1 = Context.create('a', 'kind1') + c2 = Context.create('b', 'kind2') + c3 = Context.create('c', 'kind3') + c2plus3 = Context.create_multi(c2, c3) + mc = Context.multi_builder().add(c1).add(c2plus3).build() + assert mc == Context.create_multi(c1, c2, c3) + + def test_multi_fully_qualified_key(self): + c1 = Context.create('a', 'kind1') + c2 = Context.create('b', 'kind2') + mc = Context.create_multi(c2, c1) # deliberately in reverse order of kind - they should come out sorted + assert mc.fully_qualified_key == 'kind1:a:kind2:b' + + +class TestContextErrors: + def test_key_empty_string(self): + assert_context_invalid(Context.create('')) + assert_context_invalid(Context.builder('').build()) + + @pytest.mark.parametrize('kind', ['kind', 'multi', 'b$c']) + def test_kind_invalid_strings(self, kind): + assert_context_invalid(Context.create('a', kind)) + assert_context_invalid(Context.builder('a').kind(kind).build()) + + def test_create_multi_with_no_contexts(self): + assert_context_invalid(Context.create_multi()) + + def test_multi_builder_with_no_contexts(self): + assert_context_invalid(Context.multi_builder().build()) + + def test_create_multi_with_duplicate_kind(self): + c1 = Context.create('a', 'kind1') + c2 = Context.create('b', 'kind1') + assert_context_invalid(Context.create_multi(c1, c2)) + + def test_multi_builder_with_duplicate_kind(self): + c1 = Context.create('a', 'kind1') + c2 = Context.create('b', 'kind1') + assert_context_invalid(Context.multi_builder().add(c1).add(c2).build()) + + def test_create_multi_with_invalid_context(self): + c1 = Context.create('a', 'kind1') + c2 = Context.create('') + assert_context_invalid(Context.create_multi(c1, c2)) + + def test_multi_builder_with_invalid_context(self): + c1 = Context.create('a', 'kind1') + c2 = Context.create('') + assert_context_invalid(Context.multi_builder().add(c1).add(c2).build()) From f3933c13213889b08d011d957b6bcb284b5048fa Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 12:22:22 -0800 Subject: [PATCH 301/356] fix exports --- ldclient/__init__.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ldclient/__init__.py b/ldclient/__init__.py index 6468033f..dd60d11c 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -103,5 +103,10 @@ def _reset_client(): 'Context', 'ContextBuilder', 'ContextMultiBuilder', - 'LDClient' + 'LDClient', + 'client', + 'context', + 'evaluation', + 'integrations', + 'interfaces' ] From ae66589ffcf6aad650fd659f4196867450d737eb Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 13:27:44 -0800 Subject: [PATCH 302/356] specify exports --- ldclient/context.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ldclient/context.py b/ldclient/context.py index f5db344f..56fc7f58 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -928,3 +928,6 @@ def add(self, context: Context) -> ContextMultiBuilder: self.__contexts = self.__contexts.copy() self.__contexts.append(context) return self + + +__all__ = ['Context', 'ContextBuilder', 'ContextMultiBuilder'] From e54e5e781f57ff4bca6038515534b9ec201ee2a3 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 13:38:05 -0800 Subject: [PATCH 303/356] add copy constructor --- ldclient/context.py | 50 ++++++++++++++++++++++++++++++++++------- testing/test_context.py | 10 +++++++++ 2 files changed, 52 insertions(+), 8 deletions(-) diff --git a/ldclient/context.py b/ldclient/context.py index 56fc7f58..ad9af1e6 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -211,6 +211,19 @@ def builder(cls, key: str) -> ContextBuilder: """ return ContextBuilder(key) + @classmethod + def builder_from_context(cls, context: Context) -> ContextBuilder: + """ + Creates a builder whose properties are the same as an existing single-kind Context. + + You may then change the builder's state in any way and call :func:`ldclient.ContextBuilder.build()` + to create a new independent Context. + + :param context: the context to copy from + :return: a new builder + """ + return ContextBuilder(context.key, context) + @classmethod def multi_builder(cls) -> ContextMultiBuilder: """ @@ -437,6 +450,12 @@ def custom_attributes(self) -> Iterable[str]: """ return () if self.__attributes is None else self.__attributes + @property + def _attributes(self) -> Optional[dict[str, Any]]: + # for internal use by ContextBuilder - we don't want to expose the original dict otherwise + # since that would break immutability + return self.__attributes + @property def private_attributes(self) -> Iterable[str]: """ @@ -449,6 +468,12 @@ def private_attributes(self) -> Iterable[str]: """ return () if self.__private is None else self.__private + @property + def _private_attributes(self) -> Optional[list[str]]: + # for internal use by ContextBuilder - we don't want to expose the original list otherwise + # since that would break immutability + return self.__private + @property def fully_qualified_key(self) -> str: """ @@ -634,16 +659,25 @@ class ContextBuilder: :param key: the context key """ - def __init__(self, key: str): - self.__kind = Context.DEFAULT_KIND + def __init__(self, key: str, copy_from: Optional[Context] = None): self.__key = key - self.__name = None # type: Optional[str] - self.__anonymous = False - self.__attributes = None # type: Optional[dict[str, Any]] - self.__private = None # type: Optional[list[str]] + if copy_from is None: + self.__kind = Context.DEFAULT_KIND + self.__name = None # type: Optional[str] + self.__anonymous = False + self.__attributes = None # type: Optional[dict[str, Any]] + self.__private = None # type: Optional[list[str]] + self.__copy_on_write_attrs = False + self.__copy_on_write_private = False + else: + self.__kind = copy_from.kind + self.__name = copy_from.name + self.__anonymous = copy_from.anonymous + self.__attributes = copy_from._attributes + self.__private = copy_from._private_attributes + self.__copy_on_write_attrs = self.__attributes is not None + self.__copy_on_write_private = self.__private is not None self.__allow_empty_key = False - self.__copy_on_write_attrs = False - self.__copy_on_write_private = False def build(self) -> Context: """ diff --git a/testing/test_context.py b/testing/test_context.py index fba31459..7492d6b1 100644 --- a/testing/test_context.py +++ b/testing/test_context.py @@ -124,6 +124,16 @@ def test_fully_qualified_key(self): assert Context.create('key1', 'kind1').fully_qualified_key == 'kind1:key1' assert Context.create('key%with:things', 'kind1').fully_qualified_key == 'kind1:key%25with%3Athings' + def test_builder_from_context(self): + c1 = Context.builder('a').kind('kind1').name('b').set('c', True).private('d').build() + b = Context.builder_from_context(c1) + assert b.build() == c1 + b.set('c', False) + c2 = b.build() + assert c2 != c1 + assert c1.get('c') is True + assert c2.get('c') is False + def test_equality(self): def _assert_contexts_from_factory_equal(fn): c1, c2 = fn(), fn() From 3757a4a7c289ea05398b78c6209d06d071e5b93e Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 13:39:40 -0800 Subject: [PATCH 304/356] minimal changes for SDK methods & evaluator to accept Context --- contract-tests/service.py | 1 + ldclient/client.py | 159 +++++++++++++++++----------- ldclient/context.py | 2 + ldclient/impl/evaluator.py | 127 ++++++++++------------ testing/builders.py | 43 ++++++++ testing/impl/test_evaluator.py | 38 +++---- testing/test_ldclient.py | 68 ++++++------ testing/test_ldclient_evaluation.py | 28 ++++- 8 files changed, 285 insertions(+), 181 deletions(-) create mode 100644 testing/builders.py diff --git a/contract-tests/service.py b/contract-tests/service.py index 48340671..634abbaa 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -53,6 +53,7 @@ def handle_exception(e): if isinstance(e, HTTPException): return e + app.logger.exception(e) return str(e), 500 @app.route('/', methods=['GET']) diff --git a/ldclient/client.py b/ldclient/client.py index 199c538a..125e272d 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -2,7 +2,7 @@ This submodule contains the client class that provides most of the SDK functionality. """ -from typing import Optional, Any, Dict, Mapping +from typing import Optional, Any, Dict, Mapping, Union from .impl import AnyNum @@ -11,7 +11,8 @@ import threading import traceback -from ldclient.config import Config, HTTPConfig +from ldclient.config import Config +from ldclient.context import Context from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.event_processor import DefaultEventProcessor from ldclient.feature_requester import FeatureRequesterImpl @@ -32,6 +33,27 @@ from threading import Lock +def _context_to_user_dict(context: Context) -> dict: + # temporary helper to allow us to update some parts of the SDK to use Context while others are + # still using the user model + ret = {'key': context.key} + if context.name is not None: + ret['name'] = context.name + if context.anonymous: + ret['anonymous'] = True + custom = None + for attr in context.custom_attributes: + if custom is None: + custom = {} + custom[attr] = context.get(attr) + if custom is not None: + ret['custom'] = custom + private = list(context.private_attributes) + if len(private) != 0: + ret['privateAttributeNames'] = private + return ret + + class _FeatureStoreClientWrapper(FeatureStore): """Provides additional behavior that the client requires before or after feature store operations. Currently this just means sorting the data set for init(). In the future we may also use this @@ -179,38 +201,49 @@ def __exit__(self, type, value, traceback): def _send_event(self, event): self._event_processor.send_event(event) - def track(self, event_name: str, user: dict, data: Optional[Any]=None, metric_value: Optional[AnyNum]=None): - """Tracks that a user performed an event. + def track(self, event_name: str, context: Union[dict, Context], data: Optional[Any]=None, metric_value: Optional[AnyNum]=None): + """Tracks that an application-defined event occurred. + + This method creates a "custom" analytics event containing the specified event name (key) + and context properties. You may attach arbitrary data or a metric value to the event with the + optional `data` and `metric_value` parameters. - LaunchDarkly automatically tracks pageviews and clicks that are specified in the Goals - section of the dashboard. This can be used to track custom goals or other events that do - not currently have goals. + Note that event delivery is asynchronous, so the event may not actually be sent until later; + see :func:`flush()`. - :param event_name: the name of the event, which may correspond to a goal in A/B tests - :param user: the attributes of the user + :param event_name: the name of the event + :param context: the evaluation context associated with the event :param data: optional additional data associated with the event :param metric_value: a numeric value used by the LaunchDarkly experimentation feature in - numeric custom metrics. Can be omitted if this event is used by only non-numeric metrics. - This field will also be returned as part of the custom event for Data Export. + numeric custom metrics; can be omitted if this event is used by only non-numeric metrics """ - if user is None or user.get('key') is None: - log.warning("Missing user or user key when calling track().") + if not isinstance(context, Context): + context = Context.from_dict(context) + if not context.valid: + log.warning("Invalid context for track (%s)" % context.error) else: - self._send_event(self._event_factory_default.new_custom_event(event_name, user, data, metric_value)) + self._send_event(self._event_factory_default.new_custom_event(event_name, + _context_to_user_dict(context), data, metric_value)) - def identify(self, user: dict): - """Registers the user. + def identify(self, context: Union[Context, dict]): + """Reports details about an evaluation context. - This simply creates an analytics event that will transmit the given user properties to - LaunchDarkly, so that the user will be visible on your dashboard even if you have not - evaluated any flags for that user. It has no other effect. + This method simply creates an analytics event containing the context properties, to + that LaunchDarkly will know about that context if it does not already. - :param user: attributes of the user to register + Evaluating a flag, by calling :func:`variation()` or :func:`variation_detail()`, also + sends the context information to LaunchDarkly (if events are enabled), so you only + need to use :func:`identify()` if you want to identify the context without evaluating a + flag. + + :param context: the context to register """ - if user is None or user.get('key') is None or len(str(user.get('key'))) == 0: - log.warning("Missing user or user key when calling identify().") + if not isinstance(context, Context): + context = Context.from_dict(context) + if not context.valid: + log.warning("Invalid context for identify (%s)" % context.error) else: - self._send_event(self._event_factory_default.new_identify_event(user)) + self._send_event(self._event_factory_default.new_identify_event(_context_to_user_dict(context))) def is_offline(self) -> bool: """Returns true if the client is in offline mode. @@ -239,39 +272,42 @@ def flush(self): return return self._event_processor.flush() - def variation(self, key: str, user: dict, default: Any) -> Any: - """Determines the variation of a feature flag for a user. + def variation(self, key: str, context: Union[Context, dict], default: Any) -> Any: + """Calculates the value of a feature flag for a given context. :param key: the unique key for the feature flag - :param user: a dictionary containing parameters for the end user requesting the flag + :param context: the evaluation context or user :param default: the default value of the flag, to be used if the value is not available from LaunchDarkly - :return: one of the flag's variation values, or the default value + :return: the variation for the given context, or the `default` value if the flag cannot be evaluated """ - return self._evaluate_internal(key, user, default, self._event_factory_default).value - - def variation_detail(self, key: str, user: dict, default: Any) -> EvaluationDetail: - """Determines the variation of a feature flag for a user, like :func:`variation()`, but also - provides additional information about how this value was calculated, in the form of an - :class:`ldclient.evaluation.EvaluationDetail` object. + return self._evaluate_internal(key, context, default, self._event_factory_default).value - Calling this method also causes the "reason" data to be included in analytics events, - if you are capturing detailed event data for this flag. + def variation_detail(self, key: str, context: Union[Context, dict], default: Any) -> EvaluationDetail: + """Calculates the value of a feature flag for a given context, and returns an object that + describes the way the value was determined. + The `reason` property in the result will also be included in analytics events, if you are + capturing detailed event data for this flag. + :param key: the unique key for the feature flag - :param user: a dictionary containing parameters for the end user requesting the flag + :param context: the evaluation context or user :param default: the default value of the flag, to be used if the value is not available from LaunchDarkly - :return: an object describing the result + :return: an :class:`ldclient.evaluation.EvaluationDetail` object that includes the feature + flag value and evaluation reason """ - return self._evaluate_internal(key, user, default, self._event_factory_with_reasons) + return self._evaluate_internal(key, context, default, self._event_factory_with_reasons) - def _evaluate_internal(self, key, user, default, event_factory): + def _evaluate_internal(self, key: str, context: Union[Context, dict], default: Any, event_factory): default = self._config.get_default(key, default) if self._config.offline: return EvaluationDetail(default, None, error_reason('CLIENT_NOT_READY')) + user = context if isinstance(context, dict) or context is None \ + else _context_to_user_dict(context) # temporary until the event processor is updated to use contexts + if not self.is_initialized(): if self._store.initialized: log.warning("Feature Flag evaluation attempted before client has initialized - using last known values from feature store for feature key: " + key) @@ -282,8 +318,11 @@ def _evaluate_internal(self, key, user, default, event_factory): self._send_event(event_factory.new_unknown_flag_event(key, user, default, reason)) return EvaluationDetail(default, None, reason) - if user is not None and user.get('key', "") == "": - log.warning("User key is blank. Flag evaluation will proceed, but the user will not be stored in LaunchDarkly.") + if not isinstance(context, Context): + context = Context.from_dict(context) + if not context.valid: + log.warning("Context was invalid for flag evaluation (%s); returning default value" % context.error) + return EvaluationDetail(default, None, error_reason('USER_NOT_SPECIFIED')) try: flag = self._store.get(FEATURES, key, lambda x: x) @@ -298,13 +337,8 @@ def _evaluate_internal(self, key, user, default, event_factory): self._send_event(event_factory.new_unknown_flag_event(key, user, default, reason)) return EvaluationDetail(default, None, reason) else: - if user is None or user.get('key') is None: - reason = error_reason('USER_NOT_SPECIFIED') - self._send_event(event_factory.new_default_event(flag, user, default, reason)) - return EvaluationDetail(default, None, reason) - try: - result = self._evaluator.evaluate(flag, user, event_factory) + result = self._evaluator.evaluate(flag, context, event_factory) for event in result.events or []: self._send_event(event) detail = result.detail @@ -319,7 +353,7 @@ def _evaluate_internal(self, key, user, default, event_factory): self._send_event(event_factory.new_default_event(flag, user, default, reason)) return EvaluationDetail(default, None, reason) - def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: + def all_flags_state(self, context: Union[Context, dict], **kwargs) -> FeatureFlagsState: """Returns an object that encapsulates the state of all feature flags for a given user, including the flag values and also metadata that can be used on the front end. See the JavaScript SDK Reference Guide on @@ -355,8 +389,10 @@ def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: log.warning("all_flags_state() called before client has finished initializing! Feature store unavailable - returning empty state") return FeatureFlagsState(False) - if user is None or user.get('key') is None: - log.warning("User or user key is None when calling all_flags_state(). Returning empty state.") + if not isinstance(context, Context): + context = Context.from_dict(context) + if not context.valid: + log.warning("Context was invalid for all_flags_state (%s); returning default value" % context.error) return FeatureFlagsState(False) state = FeatureFlagsState(True) @@ -375,7 +411,7 @@ def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: if client_only and not flag.get('clientSide', False): continue try: - detail = self._evaluator.evaluate(flag, user, self._event_factory_default).detail + detail = self._evaluator.evaluate(flag, context, self._event_factory_default).detail except Exception as e: log.error("Error evaluating flag \"%s\" in all_flags_state: %s" % (key, repr(e))) log.debug(traceback.format_exc()) @@ -398,20 +434,21 @@ def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: return state - def secure_mode_hash(self, user: dict) -> str: - """Computes an HMAC signature of a user signed with the client's SDK key, - for use with the JavaScript SDK. + def secure_mode_hash(self, context: Union[Context, dict]) -> str: + """Creates a hash string that can be used by the JavaScript SDK to identify a context. - For more information, see the JavaScript SDK Reference Guide on - `Secure mode `_. + For more information, see the documentation on + `Secure mode `_. - :param user: the attributes of the user - :return: a hash string that can be passed to the front end + :param context: the evaluation context or user + :return: the hash string """ - key = user.get('key') - if key is None or self._config.sdk_key is None: + if not isinstance(context, Context): + context = Context.from_dict(context) + if not context.valid: + log.warning("Context was invalid for secure_mode_hash (%s); returning empty hash" % context.error) return "" - return hmac.new(self._config.sdk_key.encode(), key.encode(), hashlib.sha256).hexdigest() + return hmac.new(self._config.sdk_key.encode(), context.fully_qualified_key.encode(), hashlib.sha256).hexdigest() @property def big_segment_store_status_provider(self) -> BigSegmentStoreStatusProvider: diff --git a/ldclient/context.py b/ldclient/context.py index ad9af1e6..81fc56b0 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -176,6 +176,8 @@ def from_dict(cls, props: dict) -> Context: :param props: the context/user properties :return: a context """ + if props is None: + return Context.__create_with_error('Cannot use None as a context') if 'kind' not in props: return Context.__from_dict_old_user(props) kind = props['kind'] diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 4e4cc46f..2985a3db 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -1,4 +1,5 @@ from ldclient import operators +from ldclient.context import Context from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail from ldclient.impl.event_factory import _EventFactory from ldclient.util import stringify_attrs @@ -17,15 +18,11 @@ __BUILTINS__ = ["key", "secondary", "ip", "country", "email", "firstName", "lastName", "avatar", "name", "anonymous"] -__USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__ = [ "key", "secondary" ] -# Currently we are not stringifying the rest of the built-in attributes prior to evaluation, only for events. -# This is because it could affect evaluation results for existing users (ch35206). - # EvalResult is used internally to hold the EvaluationDetail result of an evaluation along with # other side effects that are not exposed to the application, such as events generated by # prerequisite evaluations, and the cached state of any Big Segments query that we may have -# ended up having to do for the user. +# ended up having to do for the context. class EvalResult: def __init__(self): self.detail = None @@ -55,7 +52,7 @@ def __init__( """ :param get_flag: function provided by LDClient that takes a flag key and returns either the flag or None :param get_segment: same as get_flag but for segments - :param get_big_segments_membership: takes a user key (not a user hash) and returns a tuple of + :param get_big_segments_membership: takes a context key (not a context hash) and returns a tuple of (membership, status) where membership is as defined in BigSegmentStore, and status is one of the BigSegmentStoreStatus constants """ @@ -63,39 +60,38 @@ def __init__( self.__get_segment = get_segment self.__get_big_segments_membership = get_big_segments_membership - def evaluate(self, flag: dict, user: dict, event_factory: _EventFactory) -> EvalResult: - sanitized_user = stringify_attrs(user, __USER_ATTRS_TO_STRINGIFY_FOR_EVALUATION__) + def evaluate(self, flag: dict, context: Context, event_factory: _EventFactory) -> EvalResult: state = EvalResult() - state.detail = self._evaluate(flag, sanitized_user, state, event_factory) + state.detail = self._evaluate(flag, context, state, event_factory) if state.big_segments_status is not None: state.detail.reason['bigSegmentsStatus'] = state.big_segments_status return state - def _evaluate(self, flag: dict, user: dict, state: EvalResult, event_factory: _EventFactory): + def _evaluate(self, flag: dict, context: Context, state: EvalResult, event_factory: _EventFactory): if not flag.get('on', False): return _get_off_value(flag, {'kind': 'OFF'}) - prereq_failure_reason = self._check_prerequisites(flag, user, state, event_factory) + prereq_failure_reason = self._check_prerequisites(flag, context, state, event_factory) if prereq_failure_reason is not None: return _get_off_value(flag, prereq_failure_reason) - # Check to see if any user targets match: + # Check to see if any context targets match: for target in flag.get('targets') or []: for value in target.get('values') or []: - if value == user['key']: + if value == context.key: return _get_variation(flag, target.get('variation'), {'kind': 'TARGET_MATCH'}) # Now walk through the rules to see if any match for index, rule in enumerate(flag.get('rules') or []): - if self._rule_matches_user(rule, user, state): - return _get_value_for_variation_or_rollout(flag, rule, user, + if self._rule_matches_context(rule, context, state): + return _get_value_for_variation_or_rollout(flag, rule, context, {'kind': 'RULE_MATCH', 'ruleIndex': index, 'ruleId': rule.get('id')}) # Walk through fallthrough and see if it matches if flag.get('fallthrough') is not None: - return _get_value_for_variation_or_rollout(flag, flag['fallthrough'], user, {'kind': 'FALLTHROUGH'}) + return _get_value_for_variation_or_rollout(flag, flag['fallthrough'], context, {'kind': 'FALLTHROUGH'}) - def _check_prerequisites(self, flag: dict, user: dict, state: EvalResult, event_factory: _EventFactory): + def _check_prerequisites(self, flag: dict, context: Context, state: EvalResult, event_factory: _EventFactory): failed_prereq = None prereq_res = None for prereq in flag.get('prerequisites') or []: @@ -104,40 +100,40 @@ def _check_prerequisites(self, flag: dict, user: dict, state: EvalResult, event_ log.warning("Missing prereq flag: " + prereq.get('key')) failed_prereq = prereq else: - prereq_res = self._evaluate(prereq_flag, user, state, event_factory) + prereq_res = self._evaluate(prereq_flag, context, state, event_factory) # Note that if the prerequisite flag is off, we don't consider it a match no matter what its # off variation was. But we still need to evaluate it in order to generate an event. if (not prereq_flag.get('on', False)) or prereq_res.variation_index != prereq.get('variation'): failed_prereq = prereq - event = event_factory.new_eval_event(prereq_flag, user, prereq_res, None, flag) + event = event_factory.new_eval_event(prereq_flag, context, prereq_res, None, flag) state.add_event(event) if failed_prereq: return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.get('key')} return None - def _rule_matches_user(self, rule: dict, user: dict, state: EvalResult): + def _rule_matches_context(self, rule: dict, context: Context, state: EvalResult): for clause in rule.get('clauses') or []: if clause.get('attribute') is not None: - if not self._clause_matches_user(clause, user, state): + if not self._clause_matches_context(clause, context, state): return False return True - def _clause_matches_user(self, clause: dict, user: dict, state: EvalResult): + def _clause_matches_context(self, clause: dict, context: Context, state: EvalResult): if clause.get('op') == 'segmentMatch': for seg_key in clause.get('values') or []: segment = self.__get_segment(seg_key) - if segment is not None and self._segment_matches_user(segment, user, state): + if segment is not None and self._segment_matches_context(segment, context, state): return _maybe_negate(clause, True) return _maybe_negate(clause, False) else: - return _clause_matches_user_no_segments(clause, user) + return _clause_matches_context_no_segments(clause, context) - def _segment_matches_user(self, segment: dict, user: dict, state: EvalResult): + def _segment_matches_context(self, segment: dict, context: Context, state: EvalResult): if segment.get('unbounded', False): - return self._big_segment_match_user(segment, user, state) - return _simple_segment_match_user(segment, user, True) + return self._big_segment_match_context(segment, context, state) + return _simple_segment_match_context(segment, context, True) - def _big_segment_match_user(self, segment: dict, user: dict, state: EvalResult): + def _big_segment_match_context(self, segment: dict, context: Context, state: EvalResult): generation = segment.get('generation', None) if generation is None: # Big segment queries can only be done if the generation is known. If it's unset, @@ -147,15 +143,14 @@ def _big_segment_match_user(self, segment: dict, user: dict, state: EvalResult): state.big_segments_status = BigSegmentsStatus.NOT_CONFIGURED return False if state.big_segments_status is None: - user_key = str(user.get('key')) - result = self.__get_big_segments_membership(user_key) + result = self.__get_big_segments_membership(context.key) state.big_segments_membership, state.big_segments_status = result segment_ref = _make_big_segment_ref(segment) membership = state.big_segments_membership included = None if membership is None else membership.get(segment_ref, None) if included is not None: return included - return _simple_segment_match_user(segment, user, False) + return _simple_segment_match_context(segment, context, False) # The following functions are declared outside Evaluator because they do not depend on any @@ -173,23 +168,15 @@ def _get_off_value(flag, reason): return EvaluationDetail(None, None, reason) return _get_variation(flag, off_var, reason) -def _get_value_for_variation_or_rollout(flag, vr, user, reason): - index, inExperiment = _variation_index_for_user(flag, vr, user) +def _get_value_for_variation_or_rollout(flag, vr, context, reason): + index, inExperiment = _variation_index_for_context(flag, vr, context) if index is None: return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) if inExperiment: reason['inExperiment'] = inExperiment return _get_variation(flag, index, reason) -def _get_user_attribute(user, attr): - if attr in __BUILTINS__: - return user.get(attr), False - else: # custom attribute - if user.get('custom') is None or user['custom'].get(attr) is None: - return None, True - return user['custom'][attr], False - -def _variation_index_for_user(feature, rule, user): +def _variation_index_for_context(feature, rule, context): if rule.get('variation') is not None: return (rule['variation'], False) @@ -202,7 +189,7 @@ def _variation_index_for_user(feature, rule, user): bucket_by = 'key' if rollout.get('bucketBy') is not None: bucket_by = rollout['bucketBy'] - bucket = _bucket_user(seed, user, feature['key'], feature['salt'], bucket_by) + bucket = _bucket_context(seed, context, feature['key'], feature['salt'], bucket_by) is_experiment = rollout.get('kind') == 'experiment' sum = 0.0 for wv in variations: @@ -211,26 +198,27 @@ def _variation_index_for_user(feature, rule, user): is_experiment_partition = is_experiment and not wv.get('untracked') return (wv.get('variation'), is_experiment_partition) - # The user's bucket value was greater than or equal to the end of the last bucket. This could happen due + # The context's bucket value was greater than or equal to the end of the last bucket. This could happen due # to a rounding error, or due to the fact that we are scaling to 100000 rather than 99999, or the flag # data could contain buckets that don't actually add up to 100000. Rather than returning an error in - # this case (or changing the scaling, which would potentially change the results for *all* users), we - # will simply put the user in the last bucket. + # this case (or changing the scaling, which would potentially change the results for *all* contexts), we + # will simply put the context in the last bucket. is_experiment_partition = is_experiment and not variations[-1].get('untracked') return (variations[-1].get('variation'), is_experiment_partition) return (None, False) -def _bucket_user(seed, user, key, salt, bucket_by): - u_value, should_pass = _get_user_attribute(user, bucket_by) - bucket_by_value = _bucketable_string_value(u_value) - - if should_pass or bucket_by_value is None: +def _bucket_context(seed, context, key, salt, bucket_by): + clause_value = context.get(bucket_by or 'key') + if clause_value is None: + return 0.0 + bucket_by_value = _bucketable_string_value(clause_value) + if bucket_by_value is None: return 0.0 - id_hash = u_value - if user.get('secondary') is not None: - id_hash = id_hash + '.' + user['secondary'] + id_hash = clause_value + if context.get('secondary') is not None: + id_hash = id_hash + '.' + context.get('secondary') if seed is not None: prefix = str(seed) @@ -250,24 +238,25 @@ def _bucketable_string_value(u_value): return None -def _clause_matches_user_no_segments(clause, user): - u_value, should_pass = _get_user_attribute(user, clause.get('attribute')) - if should_pass is True: +def _clause_matches_context_no_segments(clause, context): + attr = clause.get('attribute') + if attr is None: return False - if u_value is None: + context_value = context.get(attr) + if context_value is None: return None # is the attr an array? op_fn = operators.ops[clause['op']] - if isinstance(u_value, (list, tuple)): - for u in u_value: - if _match_any(op_fn, u, clause.get('values') or []): + if isinstance(context_value, (list, tuple)): + for v in context_value: + if _match_any(op_fn, v, clause.get('values') or []): return _maybe_negate(clause, True) return _maybe_negate(clause, False) else: - return _maybe_negate(clause, _match_any(op_fn, u_value, clause.get('values') or [])) + return _maybe_negate(clause, _match_any(op_fn, context_value, clause.get('values') or [])) -def _simple_segment_match_user(segment, user, use_includes_and_excludes): - key = user.get('key') +def _simple_segment_match_context(segment, context, use_includes_and_excludes): + key = context.key if key is not None: if use_includes_and_excludes: if key in segment.get('included', []): @@ -275,22 +264,22 @@ def _simple_segment_match_user(segment, user, use_includes_and_excludes): if key in segment.get('excluded', []): return False for rule in segment.get('rules', []): - if _segment_rule_matches_user(rule, user, segment.get('key'), segment.get('salt')): + if _segment_rule_matches_context(rule, context, segment.get('key'), segment.get('salt')): return True return False -def _segment_rule_matches_user(rule, user, segment_key, salt): +def _segment_rule_matches_context(rule, context, segment_key, salt): for clause in rule.get('clauses') or []: - if not _clause_matches_user_no_segments(clause, user): + if not _clause_matches_context_no_segments(clause, context): return False # If the weight is absent, this rule matches if 'weight' not in rule or rule['weight'] is None: return True - # All of the clauses are met. See if the user buckets in + # All of the clauses are met. See if the context buckets in bucket_by = 'key' if rule.get('bucketBy') is None else rule['bucketBy'] - bucket = _bucket_user(None, user, segment_key, salt, bucket_by) + bucket = _bucket_context(None, context, segment_key, salt, bucket_by) weight = rule['weight'] / 100000.0 return bucket < weight diff --git a/testing/builders.py b/testing/builders.py new file mode 100644 index 00000000..df0c212f --- /dev/null +++ b/testing/builders.py @@ -0,0 +1,43 @@ +from __future__ import annotations +from typing import Any, Optional + + +class FlagBuilder: + def __init__(self, key): + self.__data = { + 'key': key, + 'version': 1, + 'on': False, + 'variations': [], + 'offVariation': None, + 'fallthrough': {}, + 'prerequisites': [], + 'targets': [], + 'rules': [] + } + + def build(self): + return self.__data.copy() + + def _set(self, k: str, v: Any) -> FlagBuilder: + self.__data[k] = v + return self + + def key(self, key: str) -> FlagBuilder: + return self._set('key', key) + + def version(self, version: int) -> FlagBuilder: + return self._set('key', version) + + def on(self, on: bool) -> FlagBuilder: + return self._set('on', on) + + def variations(self, *variations: Any) -> FlagBuilder: + return self._set('variations', list(variations)) + + def offVariation(self, value: Optional[int]) -> FlagBuilder: + return self._set('offVariation', value) + + def target(self, variation: int, *keys: str) -> FlagBuilder: + self.__data['targets'].append({'variation': variation, 'values': list(keys)}) + return self diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index e48353ab..55189a2b 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -1,7 +1,7 @@ import math import pytest from ldclient.evaluation import EvaluationDetail -from ldclient.impl.evaluator import _bucket_user, _variation_index_for_user +from ldclient.impl.evaluator import _bucket_context, _variation_index_for_context from testing.impl.evaluator_util import * @@ -362,7 +362,7 @@ def test_variation_index_is_returned_for_bucket(): # First verify that with our test inputs, the bucket value will be greater than zero and less than 100000, # so we can construct a rollout whose second bucket just barely contains that value - bucket_value = math.trunc(_bucket_user(None, user, flag['key'], flag['salt'], 'key') * 100000) + bucket_value = math.trunc(_bucket_context(None, user, flag['key'], flag['salt'], 'key') * 100000) assert bucket_value > 0 and bucket_value < 100000 bad_variation_a = 0 @@ -377,7 +377,7 @@ def test_variation_index_is_returned_for_bucket(): ] } } - result_variation = _variation_index_for_user(flag, rule, user) + result_variation = _variation_index_for_context(flag, rule, user) assert result_variation == (matched_variation, False) def test_last_bucket_is_used_if_bucket_value_equals_total_weight(): @@ -385,7 +385,7 @@ def test_last_bucket_is_used_if_bucket_value_equals_total_weight(): flag = { 'key': 'flagkey', 'salt': 'salt' } # We'll construct a list of variations that stops right at the target bucket value - bucket_value = math.trunc(_bucket_user(None, user, flag['key'], flag['salt'], 'key') * 100000) + bucket_value = math.trunc(_bucket_context(None, user, flag['key'], flag['salt'], 'key') * 100000) rule = { 'rollout': { @@ -394,34 +394,34 @@ def test_last_bucket_is_used_if_bucket_value_equals_total_weight(): ] } } - result_variation = _variation_index_for_user(flag, rule, user) + result_variation = _variation_index_for_context(flag, rule, user) assert result_variation == (0, False) def test_bucket_by_user_key(): user = { u'key': u'userKeyA' } - bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'key') + bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'key') assert bucket == pytest.approx(0.42157587) user = { u'key': u'userKeyB' } - bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'key') + bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'key') assert bucket == pytest.approx(0.6708485) user = { u'key': u'userKeyC' } - bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'key') + bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'key') assert bucket == pytest.approx(0.10343106) def test_bucket_by_user_key_with_seed(): seed = 61 user = { u'key': u'userKeyA' } - point = _bucket_user(seed, user, 'hashKey', 'saltyA', 'key') + point = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') assert point == pytest.approx(0.09801207) user = { u'key': u'userKeyB' } - point = _bucket_user(seed, user, 'hashKey', 'saltyA', 'key') + point = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') assert point == pytest.approx(0.14483777) user = { u'key': u'userKeyC' } - point = _bucket_user(seed, user, 'hashKey', 'saltyA', 'key') + point = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') assert point == pytest.approx(0.9242641) def test_bucket_by_int_attr(): @@ -432,9 +432,9 @@ def test_bucket_by_int_attr(): u'stringAttr': u'33333' } } - bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'intAttr') + bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'intAttr') assert bucket == pytest.approx(0.54771423) - bucket2 = _bucket_user(None, user, 'hashKey', 'saltyA', 'stringAttr') + bucket2 = _bucket_context(None, user, 'hashKey', 'saltyA', 'stringAttr') assert bucket2 == bucket def test_bucket_by_float_attr_not_allowed(): @@ -444,15 +444,15 @@ def test_bucket_by_float_attr_not_allowed(): u'floatAttr': 33.5 } } - bucket = _bucket_user(None, user, 'hashKey', 'saltyA', 'floatAttr') + bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'floatAttr') assert bucket == 0.0 def test_seed_independent_of_salt_and_hashKey(): seed = 61 user = { u'key': u'userKeyA' } - point1 = _bucket_user(seed, user, 'hashKey', 'saltyA', 'key') - point2 = _bucket_user(seed, user, 'hashKey', 'saltyB', 'key') - point3 = _bucket_user(seed, user, 'hashKey2', 'saltyA', 'key') + point1 = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') + point2 = _bucket_context(seed, user, 'hashKey', 'saltyB', 'key') + point3 = _bucket_context(seed, user, 'hashKey2', 'saltyA', 'key') assert point1 == point2 assert point2 == point3 @@ -460,8 +460,8 @@ def test_seed_independent_of_salt_and_hashKey(): def test_seed_changes_hash_evaluation(): seed1 = 61 user = { u'key': u'userKeyA' } - point1 = _bucket_user(seed1, user, 'hashKey', 'saltyA', 'key') + point1 = _bucket_context(seed1, user, 'hashKey', 'saltyA', 'key') seed2 = 62 - point2 = _bucket_user(seed2, user, 'hashKey', 'saltyB', 'key') + point2 = _bucket_context(seed2, user, 'hashKey', 'saltyB', 'key') assert point1 != point2 diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 4a708e4e..a77203f7 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -1,4 +1,4 @@ -from ldclient.client import LDClient, Config +from ldclient.client import LDClient, Config, Context from ldclient.event_processor import DefaultEventProcessor from ldclient.feature_store import InMemoryFeatureStore from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor @@ -17,6 +17,7 @@ unreachable_uri="http://fake" +context = Context.builder('xyz').set('bizzle', 'def').build() user = { u'key': u'xyz', u'custom': { @@ -125,6 +126,13 @@ def test_toggle_offline(): def test_identify(): + with make_client() as client: + client.identify(context) + e = get_first_event(client) + assert e['kind'] == 'identify' and e['key'] == u'xyz' and e['user'] == user + + +def test_identify_with_user_dict(): with make_client() as client: client.identify(user) e = get_first_event(client) @@ -143,13 +151,20 @@ def test_identify_no_user_key(): assert count_events(client) == 0 -def test_identify_blank_user_key(): +def test_identify_invalid_context(): with make_client() as client: - client.identify({ 'key': '' }) + client.identify(Context.create('')) assert count_events(client) == 0 def test_track(): + with make_client() as client: + client.track('my_event', context) + e = get_first_event(client) + assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e.get('data') is None and e.get('metricValue') is None + + +def test_track_with_user_dict(): with make_client() as client: client.track('my_event', user) e = get_first_event(client) @@ -158,14 +173,14 @@ def test_track(): def test_track_with_data(): with make_client() as client: - client.track('my_event', user, 42) + client.track('my_event', context, 42) e = get_first_event(client) assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 and e.get('metricValue') is None def test_track_with_metric_value(): with make_client() as client: - client.track('my_event', user, 42, 1.5) + client.track('my_event', context, 42, 1.5) e = get_first_event(client) assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 and e.get('metricValue') == 1.5 @@ -182,6 +197,12 @@ def test_track_no_user_key(): assert count_events(client) == 0 +def test_track_invalid_context(): + with make_client() as client: + client.track('my_event', Context.create('')) + assert count_events(client) == 0 + + def test_track_anonymous_user(): with make_client() as client: client.track('my_event', anonymous_user) @@ -430,7 +451,7 @@ def test_event_for_unknown_feature(): e['default'] == 'default') -def test_event_for_existing_feature_with_no_user(): +def test_no_event_for_existing_feature_with_no_user(): feature = make_off_flag_with_value('feature.key', 'value') feature['trackEvents'] = True feature['debugEventsUntilDate'] = 1000 @@ -438,43 +459,28 @@ def test_event_for_existing_feature_with_no_user(): store.init({FEATURES: {'feature.key': feature}}) with make_client(store) as client: assert 'default' == client.variation('feature.key', None, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e.get('user') is None and - e['version'] == feature['version'] and - e['value'] == 'default' and - e.get('variation') is None and - e['default'] == 'default' and - e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000) + assert count_events(client) == 0 -def test_event_for_existing_feature_with_no_user_key(): +def test_no_event_for_existing_feature_with_invalid_context(): feature = make_off_flag_with_value('feature.key', 'value') feature['trackEvents'] = True feature['debugEventsUntilDate'] = 1000 store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) with make_client(store) as client: - bad_user = { u'name': u'Bob' } - assert 'default' == client.variation('feature.key', bad_user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == bad_user and - e['version'] == feature['version'] and - e['value'] == 'default' and - e.get('variation') is None and - e['default'] == 'default' and - e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000) + bad_context = Context.create('') + assert 'default' == client.variation('feature.key', bad_context, default='default') + assert count_events(client) == 0 def test_secure_mode_hash(): - user = {'key': 'Message'} + context_to_hash = Context.create('Message') + equivalent_user_to_hash = {'key': 'Message'} + expected_hash = "aa747c502a898200f9e4fa21bac68136f886a0e27aec70ba06daf2e2a5cb5597" with make_offline_client() as client: - assert client.secure_mode_hash(user) == "aa747c502a898200f9e4fa21bac68136f886a0e27aec70ba06daf2e2a5cb5597" + assert client.secure_mode_hash(context_to_hash) == expected_hash + assert client.secure_mode_hash(equivalent_user_to_hash) == expected_hash dependency_ordering_test_data = { diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 6d3c0edf..3f41e7e3 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -1,7 +1,7 @@ import pytest import json import time -from ldclient.client import LDClient, Config +from ldclient.client import LDClient, Config, Context from ldclient.config import BigSegmentsConfig from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail from ldclient.feature_store import InMemoryFeatureStore @@ -9,6 +9,8 @@ from ldclient.impl.evaluator import _make_big_segment_ref from ldclient.interfaces import FeatureStore from ldclient.versioned_data_kind import FEATURES, SEGMENTS + +from testing.builders import * from testing.impl.evaluator_util import make_boolean_flag_matching_segment from testing.mock_components import MockBigSegmentStore from testing.stub_util import MockEventProcessor, MockUpdateProcessor @@ -76,6 +78,22 @@ def test_variation_for_existing_feature(): client = make_client(store) assert 'value' == client.variation('feature.key', user, default='default') +def test_variation_passes_user_to_evaluator(): + u = {'key': 'userkey'} + feature = FlagBuilder('feature.key').on(True).variations('wrong', 'right').target(1, 'userkey').build() + store = InMemoryFeatureStore() + store.init({FEATURES: {'feature.key': feature}}) + client = make_client(store) + assert 'right' == client.variation('feature.key', u, default='default') + +def test_variation_passes_context_to_evaluator(): + c = Context.create('userkey') + feature = FlagBuilder('feature.key').on(True).variations('wrong', 'right').target(1, 'userkey').build() + store = InMemoryFeatureStore() + store.init({FEATURES: {'feature.key': feature}}) + client = make_client(store) + assert 'right' == client.variation('feature.key', c, default='default') + def test_variation_for_unknown_feature(): store = InMemoryFeatureStore() client = make_client(store) @@ -95,6 +113,14 @@ def test_variation_when_user_has_no_key(): client = make_client(store) assert 'default' == client.variation('feature.key', { }, default='default') +def test_variation_for_invalid_context(): + c = Context.create('') + feature = make_off_flag_with_value('feature.key', 'value') + store = InMemoryFeatureStore() + store.init({FEATURES: {'feature.key': feature}}) + client = make_client(store) + assert 'default' == client.variation('feature.key', c, default='default') + def test_variation_for_flag_that_evaluates_to_none(): empty_flag = { 'key': 'feature.key', From f49f33c981f464779a60407e99e7c8f376a0ca02 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 13:53:22 -0800 Subject: [PATCH 305/356] update tests, add subscript method --- ldclient/context.py | 6 ++ testing/impl/evaluator_util.py | 10 +- testing/impl/test_evaluator.py | 107 +++++++-------------- testing/impl/test_evaluator_big_segment.py | 2 +- testing/impl/test_evaluator_segment.py | 19 ++-- testing/test_context.py | 2 + 6 files changed, 62 insertions(+), 84 deletions(-) diff --git a/ldclient/context.py b/ldclient/context.py index 81fc56b0..7511ea85 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -369,6 +369,9 @@ def get(self, attribute: str) -> Any: attribute, the return value is None. An attribute that actually exists cannot have a value of None. + Context has a `__getitem__` magic method equivalent to `get`, so `context['attr']` + behaves the same as `context.get('attr')`. + :param attribute: the desired attribute name :return: the attribute value, or None if there is no such attribute """ @@ -579,6 +582,9 @@ def __from_dict_old_user(self, props: dict) -> Context: b._allow_empty_key(has_key) return b.build() + def __getitem__(self, attribute) -> Any: + return self.get(attribute) if isinstance(attribute, str) else None + def __repr__(self) -> str: """ Returns a standard string representation of a context. diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index 9bae2dc1..8195bf1d 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -1,10 +1,11 @@ +from ldclient import Context from ldclient.evaluation import BigSegmentsStatus from ldclient.impl.evaluator import Evaluator, _make_big_segment_ref from ldclient.impl.event_factory import _EventFactory -from typing import Optional, Tuple +from typing import Optional, Tuple, Union -basic_user = { "key": "user-key" } +basic_user = Context.create('user-key') event_factory = _EventFactory(False) class EvaluatorBuilder: @@ -95,5 +96,6 @@ def make_boolean_flag_matching_segment(segment: dict) -> dict: 'values': [ segment['key'] ] }) -def make_clause_matching_user(user: dict) -> dict: - return { 'attribute': 'key', 'op': 'in', 'values': [ user['key'] ] } +def make_clause_matching_user(user: Union[Context, dict]) -> dict: + key = user.key if isinstance(user, Context) else user['key'] + return { 'attribute': 'key', 'op': 'in', 'values': [ key ] } diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index 55189a2b..6192c0c4 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -1,5 +1,6 @@ import math import pytest +from ldclient.client import Context from ldclient.evaluation import EvaluationDetail from ldclient.impl.evaluator import _bucket_context, _variation_index_for_context from testing.impl.evaluator_util import * @@ -17,7 +18,7 @@ def test_flag_returns_off_variation_if_flag_is_off(): 'offVariation': 1, 'variations': ['a', 'b', 'c'] } - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'OFF'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -27,7 +28,7 @@ def test_flag_returns_none_if_flag_is_off_and_off_variation_is_unspecified(): 'on': False, 'variations': ['a', 'b', 'c'] } - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'OFF'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -38,7 +39,7 @@ def test_flag_returns_error_if_off_variation_is_too_high(): 'offVariation': 999, 'variations': ['a', 'b', 'c'] } - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -49,7 +50,7 @@ def test_flag_returns_error_if_off_variation_is_negative(): 'offVariation': -1, 'variations': ['a', 'b', 'c'] } - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -63,7 +64,7 @@ def test_flag_returns_off_variation_if_prerequisite_not_found(): 'variations': ['a', 'b', 'c'] } evaluator = EvaluatorBuilder().with_unknown_flag('badfeature').build() - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'}) assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, None) @@ -88,7 +89,7 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): 'trackEvents': False } evaluator = EvaluatorBuilder().with_flag(flag1).build() - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, 'version': 2, 'user': user, 'prereqOf': 'feature0'}] @@ -113,7 +114,7 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): 'trackEvents': False } evaluator = EvaluatorBuilder().with_flag(flag1).build() - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None, 'version': 2, 'user': user, 'prereqOf': 'feature0'}] @@ -138,7 +139,7 @@ def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_ru 'trackEvents': False } evaluator = EvaluatorBuilder().with_flag(flag1).build() - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, 'version': 2, 'user': user, 'prereqOf': 'feature0'}] @@ -151,7 +152,7 @@ def test_flag_returns_error_if_fallthrough_variation_is_too_high(): 'fallthrough': {'variation': 999}, 'variations': ['a', 'b', 'c'] } - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -162,7 +163,7 @@ def test_flag_returns_error_if_fallthrough_variation_is_negative(): 'fallthrough': {'variation': -1}, 'variations': ['a', 'b', 'c'] } - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -173,7 +174,7 @@ def test_flag_returns_error_if_fallthrough_has_no_variation_or_rollout(): 'fallthrough': {}, 'variations': ['a', 'b', 'c'] } - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -185,7 +186,7 @@ def test_flag_returns_error_if_fallthrough_has_rollout_with_no_variations(): 'variations': ['a', 'b', 'c'], 'salt': '' } - user = { 'key': 'x' } + user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -198,35 +199,35 @@ def test_flag_matches_user_from_targets(): 'offVariation': 1, 'variations': ['a', 'b', 'c'] } - user = { 'key': 'userkey' } + user = Context.create('userkey') detail = EvaluationDetail('c', 2, {'kind': 'TARGET_MATCH'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_matches_user_from_rules(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 1} flag = make_boolean_flag_with_rules([rule]) - user = { 'key': 'userkey' } + user = Context.create('userkey') detail = EvaluationDetail(True, 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0, 'ruleId': 'id'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_variation_is_too_high(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 999} flag = make_boolean_flag_with_rules([rule]) - user = { 'key': 'userkey' } + user = Context.create('userkey') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_variation_is_negative(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': -1} flag = make_boolean_flag_with_rules([rule]) - user = { 'key': 'userkey' } + user = Context.create('userkey') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_has_no_variation_or_rollout(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}]} flag = make_boolean_flag_with_rules([rule]) - user = { 'key': 'userkey' } + user = Context.create('userkey') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -234,33 +235,10 @@ def test_flag_returns_error_if_rule_has_rollout_with_no_variations(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'rollout': {'variations': []} } flag = make_boolean_flag_with_rules([rule]) - user = { 'key': 'userkey' } + user = Context.create('userkey') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) -def test_user_key_is_coerced_to_string_for_evaluation(): - clause = { 'attribute': 'key', 'op': 'in', 'values': [ '999' ] } - flag = make_boolean_flag_with_clause(clause) - user = { 'key': 999 } - assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True - -def test_secondary_key_is_coerced_to_string_for_evaluation(): - # We can't really verify that the rollout calculation works correctly, but we can at least - # make sure it doesn't error out if there's a non-string secondary value (ch35189) - rule = { - 'id': 'ruleid', - 'clauses': [ - { 'attribute': 'key', 'op': 'in', 'values': [ 'userkey' ] } - ], - 'rollout': { - 'salt': '', - 'variations': [ { 'weight': 100000, 'variation': 1 } ] - } - } - flag = make_boolean_flag_with_rules([rule]) - user = { 'key': 'userkey', 'secondary': 999 } - assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True - def test_segment_match_clause_retrieves_segment_from_store(): segment = { "key": "segkey", @@ -269,7 +247,7 @@ def test_segment_match_clause_retrieves_segment_from_store(): } evaluator = EvaluatorBuilder().with_segment(segment).build() - user = { "key": "foo" } + user = Context.create('foo') flag = { "key": "test", "variations": [ False, True ], @@ -292,7 +270,7 @@ def test_segment_match_clause_retrieves_segment_from_store(): assert evaluator.evaluate(flag, user, event_factory).detail.value == True def test_segment_match_clause_falls_through_with_no_errors_if_segment_not_found(): - user = { "key": "foo" } + user = Context.create('foo') flag = { "key": "test", "variations": [ False, True ], @@ -321,7 +299,7 @@ def test_clause_matches_builtin_attribute(): 'op': 'in', 'values': [ 'Bob' ] } - user = { 'key': 'x', 'name': 'Bob' } + user = Context.builder('x').name('Bob').build() flag = make_boolean_flag_with_clause(clause) assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True @@ -331,7 +309,7 @@ def test_clause_matches_custom_attribute(): 'op': 'in', 'values': [ 4 ] } - user = { 'key': 'x', 'name': 'Bob', 'custom': { 'legs': 4 } } + user = Context.builder('x').name('Bob').set('legs', 4).build() flag = make_boolean_flag_with_clause(clause) assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True @@ -341,7 +319,7 @@ def test_clause_returns_false_for_missing_attribute(): 'op': 'in', 'values': [ 4 ] } - user = { 'key': 'x', 'name': 'Bob' } + user = Context.builder('x').name('Bob').build() flag = make_boolean_flag_with_clause(clause) assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == False @@ -352,12 +330,12 @@ def test_clause_can_be_negated(): 'values': [ 'Bob' ], 'negate': True } - user = { 'key': 'x', 'name': 'Bob' } + user = Context.builder('x').name('Bob').build() flag = make_boolean_flag_with_clause(clause) assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == False def test_variation_index_is_returned_for_bucket(): - user = { 'key': 'userkey' } + user = Context.create('userkey') flag = { 'key': 'flagkey', 'salt': 'salt' } # First verify that with our test inputs, the bucket value will be greater than zero and less than 100000, @@ -381,7 +359,7 @@ def test_variation_index_is_returned_for_bucket(): assert result_variation == (matched_variation, False) def test_last_bucket_is_used_if_bucket_value_equals_total_weight(): - user = { 'key': 'userkey' } + user = Context.create('userkey') flag = { 'key': 'flagkey', 'salt': 'salt' } # We'll construct a list of variations that stops right at the target bucket value @@ -398,58 +376,47 @@ def test_last_bucket_is_used_if_bucket_value_equals_total_weight(): assert result_variation == (0, False) def test_bucket_by_user_key(): - user = { u'key': u'userKeyA' } + user = Context.create('userKeyA') bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'key') assert bucket == pytest.approx(0.42157587) - user = { u'key': u'userKeyB' } + user = Context.create('userKeyB') bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'key') assert bucket == pytest.approx(0.6708485) - user = { u'key': u'userKeyC' } + user = Context.create('userKeyC') bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'key') assert bucket == pytest.approx(0.10343106) def test_bucket_by_user_key_with_seed(): seed = 61 - user = { u'key': u'userKeyA' } + user = Context.create('userKeyA') point = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') assert point == pytest.approx(0.09801207) - user = { u'key': u'userKeyB' } + user = Context.create('userKeyB') point = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') assert point == pytest.approx(0.14483777) - user = { u'key': u'userKeyC' } + user = Context.create('userKeyC') point = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') assert point == pytest.approx(0.9242641) def test_bucket_by_int_attr(): - user = { - u'key': u'userKey', - u'custom': { - u'intAttr': 33333, - u'stringAttr': u'33333' - } - } + user = Context.builder('userKey').set('intAttr', 33333).set('stringAttr', '33333').build() bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'intAttr') assert bucket == pytest.approx(0.54771423) bucket2 = _bucket_context(None, user, 'hashKey', 'saltyA', 'stringAttr') assert bucket2 == bucket def test_bucket_by_float_attr_not_allowed(): - user = { - u'key': u'userKey', - u'custom': { - u'floatAttr': 33.5 - } - } + user = Context.builder('userKey').set('floatAttr', 33.5).build() bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'floatAttr') assert bucket == 0.0 def test_seed_independent_of_salt_and_hashKey(): seed = 61 - user = { u'key': u'userKeyA' } + user = Context.create('userKeyA') point1 = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') point2 = _bucket_context(seed, user, 'hashKey', 'saltyB', 'key') point3 = _bucket_context(seed, user, 'hashKey2', 'saltyA', 'key') @@ -459,7 +426,7 @@ def test_seed_independent_of_salt_and_hashKey(): def test_seed_changes_hash_evaluation(): seed1 = 61 - user = { u'key': u'userKeyA' } + user = Context.create('userKeyA') point1 = _bucket_context(seed1, user, 'hashKey', 'saltyA', 'key') seed2 = 62 point2 = _bucket_context(seed2, user, 'hashKey', 'saltyB', 'key') diff --git a/testing/impl/test_evaluator_big_segment.py b/testing/impl/test_evaluator_big_segment.py index 1c60bd2d..8d7eb403 100644 --- a/testing/impl/test_evaluator_big_segment.py +++ b/testing/impl/test_evaluator_big_segment.py @@ -7,7 +7,7 @@ def test_big_segment_with_no_generation_is_not_matched(): segment = { 'key': 'test', - 'included': [ basic_user['key'] ], # included should be ignored for a big segment + 'included': [ basic_user.key ], # included should be ignored for a big segment 'version': 1, 'unbounded': True } diff --git a/testing/impl/test_evaluator_segment.py b/testing/impl/test_evaluator_segment.py index 901aef1f..1d9e6ef7 100644 --- a/testing/impl/test_evaluator_segment.py +++ b/testing/impl/test_evaluator_segment.py @@ -1,5 +1,6 @@ import pytest +from ldclient import Context from testing.impl.evaluator_util import * @@ -16,7 +17,7 @@ def test_explicit_include_user(): "included": [ "foo" ], "version": 1 } - u = { "key": "foo" } + u = Context.create('foo') assert _segment_matches_user(s, u) is True def test_explicit_exclude_user(): @@ -25,7 +26,7 @@ def test_explicit_exclude_user(): "excluded": [ "foo" ], "version": 1 } - u = { "key": "foo" } + u = Context.create('foo') assert _segment_matches_user(s, u) is False def test_explicit_include_has_precedence(): @@ -35,7 +36,7 @@ def test_explicit_include_has_precedence(): "excluded": [ "foo" ], "version": 1 } - u = { "key": "foo" } + u = Context.create('foo') assert _segment_matches_user(s, u) is True def test_matching_rule_with_no_weight(): @@ -53,7 +54,7 @@ def test_matching_rule_with_no_weight(): } ] } - u = { "key": "foo", "email": "test@example.com" } + u = Context.builder('foo').set('email', 'test@example.com').build() assert _segment_matches_user(s, u) is True def test_matching_rule_with_none_weight(): @@ -72,7 +73,7 @@ def test_matching_rule_with_none_weight(): } ] } - u = { "key": "foo", "email": "test@example.com" } + u = Context.builder('foo').set('email', 'test@example.com').build() assert _segment_matches_user(s, u) is True def test_matching_rule_with_full_rollout(): @@ -91,7 +92,7 @@ def test_matching_rule_with_full_rollout(): } ] } - u = { "key": "foo", "email": "test@example.com" } + u = Context.builder('foo').set('email', 'test@example.com').build() assert _segment_matches_user(s, u) is True def test_matching_rule_with_zero_rollout(): @@ -110,7 +111,7 @@ def test_matching_rule_with_zero_rollout(): } ] } - u = { "key": "foo", "email": "test@example.com" } + u = Context.builder('foo').set('email', 'test@example.com').build() assert _segment_matches_user(s, u) is False def test_matching_rule_with_multiple_clauses(): @@ -134,7 +135,7 @@ def test_matching_rule_with_multiple_clauses(): } ] } - u = { "key": "foo", "email": "test@example.com", "name": "bob" } + u = Context.builder('foo').name('bob').set('email', 'test@example.com').build() assert _segment_matches_user(s, u) is True def test_non_matching_rule_with_multiple_clauses(): @@ -158,5 +159,5 @@ def test_non_matching_rule_with_multiple_clauses(): } ] } - u = { "key": "foo", "email": "test@example.com", "name": "bob" } + u = Context.builder('foo').name('bob').set('email', 'test@example.com').build() assert _segment_matches_user(s, u) is False diff --git a/testing/test_context.py b/testing/test_context.py index 7492d6b1..0822f5a3 100644 --- a/testing/test_context.py +++ b/testing/test_context.py @@ -74,6 +74,8 @@ def test_custom_attributes(self): assert c.key == 'a' assert c.get('b') is True assert c.get('c') == 'd' + assert c['b'] is True + assert c['c'] == 'd' assert sorted(list(c.custom_attributes)) == ['b', 'c'] def test_set_built_in_attribute_by_name(self): From 980ace00bcdb16d29c5c5efc18b7ddf921e23c5b Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 13:58:05 -0800 Subject: [PATCH 306/356] lint --- ldclient/client.py | 4 ++-- testing/impl/test_evaluator_segment.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 125e272d..dde8b793 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -36,7 +36,7 @@ def _context_to_user_dict(context: Context) -> dict: # temporary helper to allow us to update some parts of the SDK to use Context while others are # still using the user model - ret = {'key': context.key} + ret = {'key': context.key} # type: dict[str, Any] if context.name is not None: ret['name'] = context.name if context.anonymous: @@ -448,7 +448,7 @@ def secure_mode_hash(self, context: Union[Context, dict]) -> str: if not context.valid: log.warning("Context was invalid for secure_mode_hash (%s); returning empty hash" % context.error) return "" - return hmac.new(self._config.sdk_key.encode(), context.fully_qualified_key.encode(), hashlib.sha256).hexdigest() + return hmac.new(str(self._config.sdk_key).encode(), context.fully_qualified_key.encode(), hashlib.sha256).hexdigest() @property def big_segment_store_status_provider(self) -> BigSegmentStoreStatusProvider: diff --git a/testing/impl/test_evaluator_segment.py b/testing/impl/test_evaluator_segment.py index 1d9e6ef7..e61beb48 100644 --- a/testing/impl/test_evaluator_segment.py +++ b/testing/impl/test_evaluator_segment.py @@ -4,10 +4,10 @@ from testing.impl.evaluator_util import * -def _segment_matches_user(segment: dict, user: dict) -> bool: +def _segment_matches_user(segment: dict, context: Context) -> bool: e = EvaluatorBuilder().with_segment(segment).build() flag = make_boolean_flag_matching_segment(segment) - result = e.evaluate(flag, user, event_factory) + result = e.evaluate(flag, context, event_factory) return result.detail.value From cbb491cb941bcfec73890068f01c72f35ca0393c Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 14:02:39 -0800 Subject: [PATCH 307/356] in type hints, must use Dict[A, B] rather than dict[A, B] for Python <3.9 --- ldclient/client.py | 2 +- ldclient/context.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index dde8b793..a68c4180 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -36,7 +36,7 @@ def _context_to_user_dict(context: Context) -> dict: # temporary helper to allow us to update some parts of the SDK to use Context while others are # still using the user model - ret = {'key': context.key} # type: dict[str, Any] + ret = {'key': context.key} # type: Dict[str, Any] if context.name is not None: ret['name'] = context.name if context.anonymous: diff --git a/ldclient/context.py b/ldclient/context.py index 7511ea85..3860149a 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -6,7 +6,7 @@ from collections.abc import Iterable import json import re -from typing import Any, Optional, Union +from typing import Any, Dict, Optional, Union __VALID_KIND_REGEX = re.compile('^[-a-zA-Z0-9._]+$') @@ -501,7 +501,7 @@ def to_dict(self) -> dict[str, Any]: """ if not self.valid: return {} - ret = {"kind": self.__kind} # type: dict[str, Any] + ret = {"kind": self.__kind} # type: Dict[str, Any] if self.__multi is not None: ret = {"kind": "multi"} for c in self.__multi: @@ -521,7 +521,7 @@ def to_json_string(self) -> str: return json.dumps(self.to_dict(), separators=(',', ':')) def __to_dict_single(self, with_kind: bool) -> dict[str, Any]: - ret = {"key": self.__key} # type: dict[str, Any] + ret = {"key": self.__key} # type: Dict[str, Any] if with_kind: ret["kind"] = self.__kind if self.__name is not None: @@ -673,7 +673,7 @@ def __init__(self, key: str, copy_from: Optional[Context] = None): self.__kind = Context.DEFAULT_KIND self.__name = None # type: Optional[str] self.__anonymous = False - self.__attributes = None # type: Optional[dict[str, Any]] + self.__attributes = None # type: Optional[Dict[str, Any]] self.__private = None # type: Optional[list[str]] self.__copy_on_write_attrs = False self.__copy_on_write_private = False From 67f18aea143c8b4cb1e2b40b4739fee4c788929f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 16:19:56 -0800 Subject: [PATCH 308/356] support context kind in clauses + enable v2 contract tests --- Makefile | 30 ++++--- contract-tests/client_entity.py | 59 +++++++++++-- contract-tests/service.py | 42 +++++---- ldclient/context.py | 28 +++++- ldclient/impl/evaluator.py | 120 +++++++++++++++----------- testing/builders.py | 45 +++++++++- testing/impl/evaluator_util.py | 31 +++---- testing/impl/test_evaluator.py | 60 ++----------- testing/impl/test_evaluator_clause.py | 76 ++++++++++++++++ 9 files changed, 328 insertions(+), 163 deletions(-) create mode 100644 testing/impl/test_evaluator_clause.py diff --git a/Makefile b/Makefile index f09ea3e5..e15cca56 100644 --- a/Makefile +++ b/Makefile @@ -20,16 +20,24 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # TEST_HARNESS_PARAMS can be set to add -skip parameters for any contract tests that cannot yet pass # Explanation of current skips: -# - We're preparing to migrate the SDK to U2C behavior, but so far we're still using the non-U2C contract -# tests (v1). -# - The non-U2C tests include alias events, which we have removed, so those tests are disabled. -# - Same for inline users in events. -# - Some custom event tests are disabled because in the v1 test suite, those require inline users. +# - "evaluation/parameterized/prerequisites": Can't pass yet because prerequisite cycle detection is not implemented. +# - various other "evaluation" subtests: These tests require attribute reference support or targeting by kind. +# - "events": These test suites will be unavailable until more of the U2C implementation is done. TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ - -skip 'events/alias' \ - -skip 'events/user properties/inlineUsers=true' \ - -skip 'events/custom events/data and metricValue' \ - -skip 'events/custom events/basic properties/inline user' + -skip 'evaluation/bucketing/bucket by non-key attribute' \ + -skip 'evaluation/bucketing/secondary' \ + -skip 'evaluation/bucketing/selection of context' \ + -skip 'evaluation/parameterized/attribute references' \ + -skip 'evaluation/parameterized/bad attribute reference errors' \ + -skip 'evaluation/parameterized/prerequisites' \ + -skip 'evaluation/parameterized/segment match/included list is specific to user kind' \ + -skip 'evaluation/parameterized/segment match/includedContexts' \ + -skip 'evaluation/parameterized/segment match/excluded list is specific to user kind' \ + -skip 'evaluation/parameterized/segment match/excludedContexts' \ + -skip 'evaluation/parameterized/segment recursion' \ + -skip 'evaluation/parameterized/target match/context targets' \ + -skip 'evaluation/parameterized/target match/multi-kind' \ + -skip 'events' # port 8000 and 9000 is already used in the CI environment because we're # running a DynamoDB container and an SSE contract test @@ -46,8 +54,8 @@ start-contract-test-service-bg: @make start-contract-test-service >$(TEMP_TEST_OUTPUT) 2>&1 & run-contract-tests: - curl -s https://raw.githubusercontent.com/launchdarkly/sdk-test-harness/main/downloader/run.sh \ - | VERSION=v1 PARAMS="-url http://localhost:$(PORT) -debug -stop-service-at-end $(TEST_HARNESS_PARAMS)" sh + curl -s https://raw.githubusercontent.com/launchdarkly/sdk-test-harness/v2/downloader/run.sh \ + | VERSION=v2 PARAMS="-url http://localhost:$(PORT) -debug -stop-service-at-end $(TEST_HARNESS_PARAMS)" sh contract-tests: build-contract-tests start-contract-test-service-bg run-contract-tests diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index a100c245..5f94c75d 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -1,3 +1,4 @@ +import json import logging import os import sys @@ -41,38 +42,78 @@ def __init__(self, tag, config): def is_initializing(self) -> bool: return self.client.is_initialized() - def evaluate(self, params) -> dict: + def evaluate(self, params: dict) -> dict: response = {} if params.get("detail", False): - detail = self.client.variation_detail(params["flagKey"], params["user"], params["defaultValue"]) + detail = self.client.variation_detail(params["flagKey"], params["context"], params["defaultValue"]) response["value"] = detail.value response["variationIndex"] = detail.variation_index response["reason"] = detail.reason else: - response["value"] = self.client.variation(params["flagKey"], params["user"], params["defaultValue"]) + response["value"] = self.client.variation(params["flagKey"], params["context"], params["defaultValue"]) return response - def evaluate_all(self, params): + def evaluate_all(self, params: dict): opts = {} opts["client_side_only"] = params.get("clientSideOnly", False) opts["with_reasons"] = params.get("withReasons", False) opts["details_only_for_tracked_flags"] = params.get("detailsOnlyForTrackedFlags", False) - state = self.client.all_flags_state(params["user"], **opts) + state = self.client.all_flags_state(params["context"], **opts) return {"state": state.to_json_dict()} - def track(self, params): - self.client.track(params["eventKey"], params["user"], params["data"], params.get("metricValue", None)) + def track(self, params: dict): + self.client.track(params["eventKey"], params["context"], params["data"], params.get("metricValue", None)) - def identify(self, params): - self.client.identify(params["user"]) + def identify(self, params: dict): + self.client.identify(params["context"]) def flush(self): self.client.flush() + def secure_mode_hash(self, params: dict) -> dict: + return {"result": self.client.secure_mode_hash(params["context"])} + + def context_build(self, params: dict) -> dict: + if params.get("multi"): + b = Context.multi_builder() + for c in params.get("multi"): + b.add(self._context_build_single(c)) + return self._context_response(b.build()) + return self._context_response(self._context_build_single(params["single"])) + + def _context_build_single(self, params: dict) -> Context: + b = Context.builder(params["key"]) + if "kind" in params: + b.kind(params["kind"]) + if "name" in params: + b.name(params["name"]) + if "anonymous" in params: + b.anonymous(params["anonymous"]) + if "custom" in params: + for k, v in params.get("custom").items(): + b.set(k, v) + if "private" in params: + for attr in params.get("private"): + b.private(attr) + return b.build() + + def context_convert(self, params: dict) -> dict: + input = params["input"] + try: + props = json.loads(input) + return self._context_response(Context.from_dict(props)) + except Exception as e: + return {"error": str(e)} + + def _context_response(self, c: Context) -> dict: + if c.valid: + return {"output": c.to_json_string()} + return {"error": c.error} + def close(self): self.client.close() self.log.info('Test ended') diff --git a/contract-tests/service.py b/contract-tests/service.py index 634abbaa..70c923e7 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -64,6 +64,8 @@ def status(): 'all-flags-with-reasons', 'all-flags-client-side-only', 'all-flags-details-only-for-tracked-flags', + 'context-type', + 'secure-mode-hash', ] } return (json.dumps(body), 200, {'Content-type': 'application/json'}) @@ -103,23 +105,33 @@ def post_client_command(id): if client is None: return ('', 404) - if params.get('command') == "evaluate": - response = client.evaluate(params.get("evaluate")) - return (json.dumps(response), 200) - elif params.get("command") == "evaluateAll": - response = client.evaluate_all(params.get("evaluateAll")) - return (json.dumps(response), 200) - elif params.get("command") == "customEvent": - client.track(params.get("customEvent")) - return ('', 201) - elif params.get("command") == "identifyEvent": - client.identify(params.get("identifyEvent")) - return ('', 201) - elif params.get('command') == "flushEvents": + command = params.get('command') + sub_params = params.get(command) + + response = None + + if command == "evaluate": + response = client.evaluate(sub_params) + elif command == "evaluateAll": + response = client.evaluate_all(sub_params) + elif command == "customEvent": + client.track(sub_params) + elif command == "identifyEvent": + client.identify(sub_params) + elif command == "flushEvents": client.flush() + elif command == "secureModeHash": + response = client.secure_mode_hash(sub_params) + elif command == "contextBuild": + response = client.context_build(sub_params) + elif command == "contextConvert": + response = client.context_convert(sub_params) + else: + return ('', 400) + + if response is None: return ('', 201) - - return ('', 400) + return (json.dumps(response), 200) @app.route('/clients/', methods=['DELETE']) def delete_client(id): diff --git a/ldclient/context.py b/ldclient/context.py index 3860149a..4ee79f71 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -9,7 +9,8 @@ from typing import Any, Dict, Optional, Union -__VALID_KIND_REGEX = re.compile('^[-a-zA-Z0-9._]+$') +_INVALID_KIND_REGEX = re.compile('[^-a-zA-Z0-9._]') +_USER_STRING_ATTRS = {'name', 'firstName', 'lastName', 'email', 'country', 'avatar', 'ip'} def _escape_key_for_fully_qualified_key(key: str) -> str: # When building a fully-qualified key, ':' and '%' are percent-escaped; we do not use a full @@ -17,11 +18,13 @@ def _escape_key_for_fully_qualified_key(key: str) -> str: return key.replace('%', '%25').replace(':', '%3A') def _validate_kind(kind: str) -> Optional[str]: + if kind == '': + return 'context kind must not be empty' if kind == 'kind': return '"kind" is not a valid context kind' if kind == 'multi': return 'context of kind "multi" must be created with create_multi or multi_builder' - if not __VALID_KIND_REGEX.match(kind): + if _INVALID_KIND_REGEX.search(kind): return 'context kind contains disallowed characters' return None @@ -107,7 +110,7 @@ def __init__( self.__full_key = full_key self.__error = None # type: Optional[str] return - if kind is None or kind == '': + if kind is None: kind = Context.DEFAULT_KIND kind_error = _validate_kind(kind) if kind_error: @@ -542,6 +545,8 @@ def __from_dict_single(self, props: dict, kind: Optional[str]) -> Context: b.kind(kind) for k, v in props.items(): if k == '_meta': + if v is None: + continue if not isinstance(v, dict): return Context.__create_with_schema_type_error(k) p = v.get("privateAttributes") @@ -563,18 +568,30 @@ def __from_dict_old_user(self, props: dict) -> Context: has_key = False for k, v in props.items(): if k == 'custom': + if v is None: + continue if not isinstance(v, dict): return Context.__create_with_schema_type_error(k) for k1, v1 in v.items(): b.set(k1, v1) elif k == 'privateAttributeNames': + if v is None: + continue if not isinstance(v, list): return Context.__create_with_schema_type_error(k) for pa in v: if not isinstance(pa, str): return Context.__create_with_schema_type_error(k) b.private(pa) + elif k in _USER_STRING_ATTRS: + if v is None: + continue + if not isinstance(v, str): + return Context.__create_with_schema_type_error(k) + b.set(k, v) else: + if k == 'anonymous' and v is None: + v = False # anonymous: null was allowed in the old user model if not b.try_set(k, v): return Context.__create_with_schema_type_error(k) if k == 'key': @@ -860,7 +877,10 @@ def try_set(self, attribute: str, value: Any) -> bool: self.__attributes = self.__attributes and self.__attributes.copy() if self.__attributes is None: self.__attributes = {} - self.__attributes[attribute] = value + if value is None: + self.__attributes.pop(attribute, None) + else: + self.__attributes[attribute] = value return True def private(self, *attributes: str) -> ContextBuilder: diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 2985a3db..6407d206 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -4,10 +4,11 @@ from ldclient.impl.event_factory import _EventFactory from ldclient.util import stringify_attrs +import json from collections import namedtuple import hashlib import logging -from typing import Callable, Optional, Tuple +from typing import Any, Callable, List, Optional, Tuple # For consistency with past logging behavior, we are pretending that the evaluation logic still lives in # the ldclient.evaluation module. @@ -119,19 +120,67 @@ def _rule_matches_context(self, rule: dict, context: Context, state: EvalResult) return True def _clause_matches_context(self, clause: dict, context: Context, state: EvalResult): - if clause.get('op') == 'segmentMatch': + op = clause['op'] + if op == 'segmentMatch': for seg_key in clause.get('values') or []: segment = self.__get_segment(seg_key) if segment is not None and self._segment_matches_context(segment, context, state): return _maybe_negate(clause, True) return _maybe_negate(clause, False) - else: - return _clause_matches_context_no_segments(clause, context) + + attr = clause.get('attribute') + if attr is None: + return False + if attr == 'kind': + return _maybe_negate(clause, _match_clause_by_kind(clause, context)) + actual_context = context.get_individual_context(clause.get('contextKind') or Context.DEFAULT_KIND) + if actual_context is None: + return False + context_value = actual_context.get(attr) + if context_value is None: + return None + clause_values = clause.get('values') or [] + + # is the attr an array? + if isinstance(context_value, (list, tuple)): + for v in context_value: + if _match_single_context_value(op, v, clause_values): + return _maybe_negate(clause, True) + return _maybe_negate(clause, False) + return _maybe_negate(clause, _match_single_context_value(op, context_value, clause_values)) def _segment_matches_context(self, segment: dict, context: Context, state: EvalResult): if segment.get('unbounded', False): return self._big_segment_match_context(segment, context, state) - return _simple_segment_match_context(segment, context, True) + return self._simple_segment_match_context(segment, context, state, True) + + def _simple_segment_match_context(self, segment: dict, context: Context, state: EvalResult, use_includes_and_excludes: bool): + key = context.key + if key is not None: + if use_includes_and_excludes: + if key in segment.get('included', []): + return True + if key in segment.get('excluded', []): + return False + for rule in segment.get('rules', []): + if self._segment_rule_matches_context(rule, context, state, segment['key'], segment.get('salt', '')): + return True + return False + + def _segment_rule_matches_context(self, rule: dict, context: Context, state: EvalResult, segment_key: str, salt: str): + for clause in rule.get('clauses') or []: + if not self._clause_matches_context(clause, context, state): + return False + + # If the weight is absent, this rule matches + if 'weight' not in rule or rule['weight'] is None: + return True + + # All of the clauses are met. See if the context buckets in + bucket_by = 'key' if rule.get('bucketBy') is None else rule['bucketBy'] + bucket = _bucket_context(None, context, segment_key, salt, bucket_by) + weight = rule['weight'] / 100000.0 + return bucket < weight def _big_segment_match_context(self, segment: dict, context: Context, state: EvalResult): generation = segment.get('generation', None) @@ -150,7 +199,7 @@ def _big_segment_match_context(self, segment: dict, context: Context, state: Eva included = None if membership is None else membership.get(segment_ref, None) if included is not None: return included - return _simple_segment_match_context(segment, context, False) + return self._simple_segment_match_context(segment, context, state, False) # The following functions are declared outside Evaluator because they do not depend on any @@ -238,54 +287,23 @@ def _bucketable_string_value(u_value): return None -def _clause_matches_context_no_segments(clause, context): - attr = clause.get('attribute') - if attr is None: +def _match_single_context_value(op: str, context_value: Any, values: List[Any]) -> bool: + op_fn = operators.ops.get(op) + if op_fn is None: return False - context_value = context.get(attr) - if context_value is None: - return None - # is the attr an array? - op_fn = operators.ops[clause['op']] - if isinstance(context_value, (list, tuple)): - for v in context_value: - if _match_any(op_fn, v, clause.get('values') or []): - return _maybe_negate(clause, True) - return _maybe_negate(clause, False) - else: - return _maybe_negate(clause, _match_any(op_fn, context_value, clause.get('values') or [])) - -def _simple_segment_match_context(segment, context, use_includes_and_excludes): - key = context.key - if key is not None: - if use_includes_and_excludes: - if key in segment.get('included', []): - return True - if key in segment.get('excluded', []): - return False - for rule in segment.get('rules', []): - if _segment_rule_matches_context(rule, context, segment.get('key'), segment.get('salt')): - return True + for v in values: + if op_fn(context_value, v): + return True return False -def _segment_rule_matches_context(rule, context, segment_key, salt): - for clause in rule.get('clauses') or []: - if not _clause_matches_context_no_segments(clause, context): - return False - - # If the weight is absent, this rule matches - if 'weight' not in rule or rule['weight'] is None: - return True - - # All of the clauses are met. See if the context buckets in - bucket_by = 'key' if rule.get('bucketBy') is None else rule['bucketBy'] - bucket = _bucket_context(None, context, segment_key, salt, bucket_by) - weight = rule['weight'] / 100000.0 - return bucket < weight - -def _match_any(op_fn, u, vals): - for v in vals: - if op_fn(u, v): +def _match_clause_by_kind(clause: dict, context: Context) -> bool: + # If attribute is "kind", then we treat operator and values as a match expression against a list + # of all individual kinds in the context. That is, for a multi-kind context with kinds of "org" + # and "user", it is a match if either of those strings is a match with Operator and Values. + op = clause['op'] + for i in range(context.individual_context_count): + c = context.get_individual_context(i) + if c is not None and _match_single_context_value(op, c.kind, clause.get('values') or []): return True return False diff --git a/testing/builders.py b/testing/builders.py index df0c212f..2feaf94f 100644 --- a/testing/builders.py +++ b/testing/builders.py @@ -35,9 +35,52 @@ def on(self, on: bool) -> FlagBuilder: def variations(self, *variations: Any) -> FlagBuilder: return self._set('variations', list(variations)) - def offVariation(self, value: Optional[int]) -> FlagBuilder: + def off_variation(self, value: Optional[int]) -> FlagBuilder: return self._set('offVariation', value) + def fallthrough_variation(self, index: int) -> FlagBuilder: + return self._set('fallthrough', {'variation': index}) + def target(self, variation: int, *keys: str) -> FlagBuilder: self.__data['targets'].append({'variation': variation, 'values': list(keys)}) return self + + def rules(self, *rules: dict) -> FlagBuilder: + for r in rules: + self.__data['rules'].append(r) + return self + + +class FlagRuleBuilder: + def __init__(self): + self.__data = {'clauses': []} + + def build(self) -> dict: + return self.__data.copy() + + def variation(self, variation: int) -> FlagRuleBuilder: + self.__data['variation'] = variation + return self + + def clauses(self, *clauses: dict) -> FlagRuleBuilder: + for c in clauses: + self.__data['clauses'].append(c) + return self + + +def make_boolean_flag_with_clauses(*clauses: dict) -> dict: + return make_boolean_flag_with_rules(FlagRuleBuilder().clauses(*clauses).variation(0).build()) + +def make_boolean_flag_with_rules(*rules: dict) -> dict: + return FlagBuilder('flagkey').on(True).variations(True, False).fallthrough_variation(1).rules(*rules).build() + +def make_clause(context_kind: Optional[str], attr: str, op: str, *values: Any) -> dict: + ret = {'attribute': attr, 'op': op, 'values': list(values)} + if context_kind is not None: + ret['contextKind'] = context_kind + return ret + +def negate_clause(clause: dict) -> dict: + c = clause.copy() + c['negate'] = not c.get('negate') + return c diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index 8195bf1d..99367ee7 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -2,8 +2,9 @@ from ldclient.evaluation import BigSegmentsStatus from ldclient.impl.evaluator import Evaluator, _make_big_segment_ref from ldclient.impl.event_factory import _EventFactory +from testing.builders import * -from typing import Optional, Tuple, Union +from typing import Any, Optional, Tuple, Union basic_user = Context.create('user-key') event_factory = _EventFactory(False) @@ -71,26 +72,18 @@ def _get_big_segments_membership(self, key: str) -> Tuple[Optional[dict], str]: basic_evaluator = EvaluatorBuilder().build() -def make_boolean_flag_with_rules(rules) -> dict: - return { - 'key': 'feature', - 'on': True, - 'rules': rules, - 'fallthrough': { 'variation': 0 }, - 'variations': [ False, True ], - 'salt': '' - } - -def make_boolean_flag_with_clause(clause: dict) -> dict: - return make_boolean_flag_with_rules([ - { - 'clauses': [ clause ], - 'variation': 1 - } - ]) +def assert_eval_result(result, expected_detail, expected_events): + assert result.detail == expected_detail + assert result.events == expected_events + + +def assert_match(evaluator: Evaluator, flag: dict, context: Context, expect_value: Any): + result = evaluator.evaluate(flag, context, event_factory) + assert result.detail.value == expect_value + def make_boolean_flag_matching_segment(segment: dict) -> dict: - return make_boolean_flag_with_clause({ + return make_boolean_flag_with_clauses({ 'attribute': '', 'op': 'segmentMatch', 'values': [ segment['key'] ] diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index 6192c0c4..6591c383 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -6,11 +6,6 @@ from testing.impl.evaluator_util import * -def assert_eval_result(result, expected_detail, expected_events): - assert result.detail == expected_detail - assert result.events == expected_events - - def test_flag_returns_off_variation_if_flag_is_off(): flag = { 'key': 'feature', @@ -204,29 +199,29 @@ def test_flag_matches_user_from_targets(): assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_matches_user_from_rules(): - rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 1} - flag = make_boolean_flag_with_rules([rule]) + rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 0} + flag = make_boolean_flag_with_rules(rule) user = Context.create('userkey') - detail = EvaluationDetail(True, 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0, 'ruleId': 'id'}) + detail = EvaluationDetail(True, 0, {'kind': 'RULE_MATCH', 'ruleIndex': 0, 'ruleId': 'id'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_variation_is_too_high(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 999} - flag = make_boolean_flag_with_rules([rule]) + flag = make_boolean_flag_with_rules(rule) user = Context.create('userkey') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_variation_is_negative(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': -1} - flag = make_boolean_flag_with_rules([rule]) + flag = make_boolean_flag_with_rules(rule) user = Context.create('userkey') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_rule_has_no_variation_or_rollout(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}]} - flag = make_boolean_flag_with_rules([rule]) + flag = make_boolean_flag_with_rules(rule) user = Context.create('userkey') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -234,7 +229,7 @@ def test_flag_returns_error_if_rule_has_no_variation_or_rollout(): def test_flag_returns_error_if_rule_has_rollout_with_no_variations(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'rollout': {'variations': []} } - flag = make_boolean_flag_with_rules([rule]) + flag = make_boolean_flag_with_rules(rule) user = Context.create('userkey') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -293,47 +288,6 @@ def test_segment_match_clause_falls_through_with_no_errors_if_segment_not_found( assert evaluator.evaluate(flag, user, event_factory).detail.value == False -def test_clause_matches_builtin_attribute(): - clause = { - 'attribute': 'name', - 'op': 'in', - 'values': [ 'Bob' ] - } - user = Context.builder('x').name('Bob').build() - flag = make_boolean_flag_with_clause(clause) - assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True - -def test_clause_matches_custom_attribute(): - clause = { - 'attribute': 'legs', - 'op': 'in', - 'values': [ 4 ] - } - user = Context.builder('x').name('Bob').set('legs', 4).build() - flag = make_boolean_flag_with_clause(clause) - assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == True - -def test_clause_returns_false_for_missing_attribute(): - clause = { - 'attribute': 'legs', - 'op': 'in', - 'values': [ 4 ] - } - user = Context.builder('x').name('Bob').build() - flag = make_boolean_flag_with_clause(clause) - assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == False - -def test_clause_can_be_negated(): - clause = { - 'attribute': 'name', - 'op': 'in', - 'values': [ 'Bob' ], - 'negate': True - } - user = Context.builder('x').name('Bob').build() - flag = make_boolean_flag_with_clause(clause) - assert basic_evaluator.evaluate(flag, user, event_factory).detail.value == False - def test_variation_index_is_returned_for_bucket(): user = Context.create('userkey') flag = { 'key': 'flagkey', 'salt': 'salt' } diff --git a/testing/impl/test_evaluator_clause.py b/testing/impl/test_evaluator_clause.py new file mode 100644 index 00000000..5b492f40 --- /dev/null +++ b/testing/impl/test_evaluator_clause.py @@ -0,0 +1,76 @@ +import math +import pytest +from ldclient.client import Context +from ldclient.evaluation import EvaluationDetail +from ldclient.impl.evaluator import _bucket_context, _variation_index_for_context +from testing.builders import * +from testing.impl.evaluator_util import * + + +def assert_match_clause(clause: dict, context: Context, should_match: bool): + assert_match(basic_evaluator, make_boolean_flag_with_clauses(clause), context, should_match) + + +class TestEvaluatorClause: + def test_match_built_in_attribute(self): + clause = make_clause(None, 'name', 'in', 'Bob') + context = Context.builder('key').name('Bob').build() + assert_match_clause(clause, context, True) + + def test_match_custom_attribute(self): + clause = make_clause(None, 'legs', 'in', 4) + context = Context.builder('key').set('legs', 4).build() + assert_match_clause(clause, context, True) + + def test_missing_attribute(self): + clause = make_clause(None, 'legs', 'in', '4') + context = Context.create('key') + assert_match_clause(clause, context, False) + + def test_match_context_value_to_any_of_multiple_values(self): + clause = make_clause(None, 'name', 'in', 'Bob', 'Carol') + context = Context.builder('key').name('Carol').build() + assert_match_clause(clause, context, True) + + def test_match_array_of_context_values_to_clause_value(self): + clause = make_clause(None, 'alias', 'in', 'Maurice') + context = Context.builder('key').set('alias', ['Space Cowboy', 'Maurice']).build() + assert_match_clause(clause, context, True) + + def test_no_match_in_array_of_context_values(self): + clause = make_clause(None, 'alias', 'in', 'Ma') + context = Context.builder('key').set('alias', ['Mary', 'May']).build() + assert_match_clause(clause, context, False) + + def test_negated_to_return_false(self): + clause = negate_clause(make_clause(None, 'name', 'in', 'Bob')) + context = Context.builder('key').name('Bob').build() + assert_match_clause(clause, context, False) + + def test_negated_to_return_true(self): + clause = negate_clause(make_clause(None, 'name', 'in', 'Bobby')) + context = Context.builder('key').name('Bob').build() + assert_match_clause(clause, context, True) + + def test_unknown_operator_does_not_match(self): + clause = make_clause(None, 'name', 'doesSomethingUnsupported', 'Bob') + context = Context.builder('key').name('Bob').build() + assert_match_clause(clause, context, False) + + def test_clause_match_uses_context_kind(self): + clause = make_clause('company', 'name', 'in', 'Catco') + context1 = Context.builder('cc').kind('company').name('Catco').build() + context2 = Context.builder('l').name('Lucy').build() + context3 = Context.create_multi(context1, context2) + assert_match_clause(clause, context1, True) + assert_match_clause(clause, context2, False) + assert_match_clause(clause, context3, True) + + def test_clause_match_by_kind_attribute(self): + clause = make_clause(None, 'kind', 'startsWith', 'a') + context1 = Context.create('key') + context2 = Context.create('key', 'ab') + context3 = Context.create_multi(Context.create('key', 'cd'), context2) + assert_match_clause(clause, context1, False) + assert_match_clause(clause, context2, True) + assert_match_clause(clause, context3, True) From e925cdd72858a472873ada6631ce045f0cf6eb89 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 16:28:04 -0800 Subject: [PATCH 309/356] misc fixes --- ldclient/context.py | 28 ++++++++++++++++++++++++---- testing/test_context.py | 2 +- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/ldclient/context.py b/ldclient/context.py index ad9af1e6..40960124 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -9,7 +9,8 @@ from typing import Any, Optional, Union -__VALID_KIND_REGEX = re.compile('^[-a-zA-Z0-9._]+$') +_INVALID_KIND_REGEX = re.compile('[^-a-zA-Z0-9._]') +_USER_STRING_ATTRS = {'name', 'firstName', 'lastName', 'email', 'country', 'avatar', 'ip'} def _escape_key_for_fully_qualified_key(key: str) -> str: # When building a fully-qualified key, ':' and '%' are percent-escaped; we do not use a full @@ -17,11 +18,13 @@ def _escape_key_for_fully_qualified_key(key: str) -> str: return key.replace('%', '%25').replace(':', '%3A') def _validate_kind(kind: str) -> Optional[str]: + if kind == '': + return 'context kind must not be empty' if kind == 'kind': return '"kind" is not a valid context kind' if kind == 'multi': return 'context of kind "multi" must be created with create_multi or multi_builder' - if not __VALID_KIND_REGEX.match(kind): + if _INVALID_KIND_REGEX.search(kind): return 'context kind contains disallowed characters' return None @@ -107,7 +110,7 @@ def __init__( self.__full_key = full_key self.__error = None # type: Optional[str] return - if kind is None or kind == '': + if kind is None: kind = Context.DEFAULT_KIND kind_error = _validate_kind(kind) if kind_error: @@ -537,6 +540,8 @@ def __from_dict_single(self, props: dict, kind: Optional[str]) -> Context: b.kind(kind) for k, v in props.items(): if k == '_meta': + if v is None: + continue if not isinstance(v, dict): return Context.__create_with_schema_type_error(k) p = v.get("privateAttributes") @@ -558,18 +563,30 @@ def __from_dict_old_user(self, props: dict) -> Context: has_key = False for k, v in props.items(): if k == 'custom': + if v is None: + continue if not isinstance(v, dict): return Context.__create_with_schema_type_error(k) for k1, v1 in v.items(): b.set(k1, v1) elif k == 'privateAttributeNames': + if v is None: + continue if not isinstance(v, list): return Context.__create_with_schema_type_error(k) for pa in v: if not isinstance(pa, str): return Context.__create_with_schema_type_error(k) b.private(pa) + elif k in _USER_STRING_ATTRS: + if v is None: + continue + if not isinstance(v, str): + return Context.__create_with_schema_type_error(k) + b.set(k, v) else: + if k == 'anonymous' and v is None: + v = False # anonymous: null was allowed in the old user model if not b.try_set(k, v): return Context.__create_with_schema_type_error(k) if k == 'key': @@ -852,7 +869,10 @@ def try_set(self, attribute: str, value: Any) -> bool: self.__attributes = self.__attributes and self.__attributes.copy() if self.__attributes is None: self.__attributes = {} - self.__attributes[attribute] = value + if value is None: + self.__attributes.pop(attribute, None) + else: + self.__attributes[attribute] = value return True def private(self, *attributes: str) -> ContextBuilder: diff --git a/testing/test_context.py b/testing/test_context.py index 7492d6b1..7281b3f3 100644 --- a/testing/test_context.py +++ b/testing/test_context.py @@ -259,7 +259,7 @@ def test_key_empty_string(self): assert_context_invalid(Context.create('')) assert_context_invalid(Context.builder('').build()) - @pytest.mark.parametrize('kind', ['kind', 'multi', 'b$c']) + @pytest.mark.parametrize('kind', ['kind', 'multi', 'b$c', '']) def test_kind_invalid_strings(self, kind): assert_context_invalid(Context.create('a', kind)) assert_context_invalid(Context.builder('a').kind(kind).build()) From 0ccecc2e12d1e3636b10d1403476bd6c1cd2147d Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 9 Dec 2022 16:36:38 -0800 Subject: [PATCH 310/356] misc fixes --- ldclient/client.py | 26 +++++--------------------- ldclient/impl/evaluator.py | 30 +++++++++++++++++++++++++++--- testing/impl/test_evaluator.py | 8 ++++---- 3 files changed, 36 insertions(+), 28 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index a68c4180..335cc265 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -19,7 +19,7 @@ from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.evaluation import EvaluationDetail, FeatureFlagsState from ldclient.impl.big_segments import BigSegmentStoreManager -from ldclient.impl.evaluator import Evaluator, error_reason +from ldclient.impl.evaluator import Evaluator, error_reason, _context_to_user_dict from ldclient.impl.event_factory import _EventFactory from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor from ldclient.interfaces import BigSegmentStoreStatusProvider, FeatureRequester, FeatureStore @@ -33,26 +33,6 @@ from threading import Lock -def _context_to_user_dict(context: Context) -> dict: - # temporary helper to allow us to update some parts of the SDK to use Context while others are - # still using the user model - ret = {'key': context.key} # type: Dict[str, Any] - if context.name is not None: - ret['name'] = context.name - if context.anonymous: - ret['anonymous'] = True - custom = None - for attr in context.custom_attributes: - if custom is None: - custom = {} - custom[attr] = context.get(attr) - if custom is not None: - ret['custom'] = custom - private = list(context.private_attributes) - if len(private) != 0: - ret['privateAttributeNames'] = private - return ret - class _FeatureStoreClientWrapper(FeatureStore): """Provides additional behavior that the client requires before or after feature store operations. @@ -242,6 +222,10 @@ def identify(self, context: Union[Context, dict]): context = Context.from_dict(context) if not context.valid: log.warning("Invalid context for identify (%s)" % context.error) + elif context.key == '' and not context.multiple: + # This could be a valid context for evaluations (if it was using the old user schema) + # but an identify event with an empty key is no good. + log.warning("Empty user key for identify") else: self._send_event(self._event_factory_default.new_identify_event(_context_to_user_dict(context))) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 2985a3db..d07191a8 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -1,5 +1,5 @@ from ldclient import operators -from ldclient.context import Context +from ldclient.context import Context, _USER_STRING_ATTRS from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail from ldclient.impl.event_factory import _EventFactory from ldclient.util import stringify_attrs @@ -7,7 +7,7 @@ from collections import namedtuple import hashlib import logging -from typing import Callable, Optional, Tuple +from typing import Any, Callable, Dict, Optional, Tuple # For consistency with past logging behavior, we are pretending that the evaluation logic still lives in # the ldclient.evaluation module. @@ -19,6 +19,30 @@ "firstName", "lastName", "avatar", "name", "anonymous"] +def _context_to_user_dict(context: Context) -> dict: + # temporary helper to allow us to update some parts of the SDK to use Context while others are + # still using the user model + ret = {'key': context.key} # type: Dict[str, Any] + if context.name is not None: + ret['name'] = context.name + if context.anonymous: + ret['anonymous'] = True + custom = None + for attr in context.custom_attributes: + if attr in _USER_STRING_ATTRS: + ret[attr] = context.get(attr) + continue + if custom is None: + custom = {} + custom[attr] = context.get(attr) + if custom is not None: + ret['custom'] = custom + private = list(context.private_attributes) + if len(private) != 0: + ret['privateAttributeNames'] = private + return ret + + # EvalResult is used internally to hold the EvaluationDetail result of an evaluation along with # other side effects that are not exposed to the application, such as events generated by # prerequisite evaluations, and the cached state of any Big Segments query that we may have @@ -105,7 +129,7 @@ def _check_prerequisites(self, flag: dict, context: Context, state: EvalResult, # off variation was. But we still need to evaluate it in order to generate an event. if (not prereq_flag.get('on', False)) or prereq_res.variation_index != prereq.get('variation'): failed_prereq = prereq - event = event_factory.new_eval_event(prereq_flag, context, prereq_res, None, flag) + event = event_factory.new_eval_event(prereq_flag, _context_to_user_dict(context), prereq_res, None, flag) state.add_event(event) if failed_prereq: return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.get('key')} diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index 6192c0c4..a7739b78 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -2,7 +2,7 @@ import pytest from ldclient.client import Context from ldclient.evaluation import EvaluationDetail -from ldclient.impl.evaluator import _bucket_context, _variation_index_for_context +from ldclient.impl.evaluator import _bucket_context, _context_to_user_dict, _variation_index_for_context from testing.impl.evaluator_util import * @@ -92,7 +92,7 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, - 'version': 2, 'user': user, 'prereqOf': 'feature0'}] + 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): @@ -117,7 +117,7 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None, - 'version': 2, 'user': user, 'prereqOf': 'feature0'}] + 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules(): @@ -142,7 +142,7 @@ def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_ru user = Context.create('x') detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, - 'version': 2, 'user': user, 'prereqOf': 'feature0'}] + 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_error_if_fallthrough_variation_is_too_high(): From 16e0efdfa549386883851e4457771c2529f18105 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Sat, 10 Dec 2022 11:31:08 -0800 Subject: [PATCH 311/356] support contextTargets --- Makefile | 2 - ldclient/impl/evaluator.py | 80 ++++++++++++++++++--------- testing/builders.py | 5 ++ testing/impl/test_evaluator.py | 13 ----- testing/impl/test_evaluator_target.py | 80 +++++++++++++++++++++++++++ 5 files changed, 140 insertions(+), 40 deletions(-) create mode 100644 testing/impl/test_evaluator_target.py diff --git a/Makefile b/Makefile index e15cca56..26d49f78 100644 --- a/Makefile +++ b/Makefile @@ -35,8 +35,6 @@ TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ -skip 'evaluation/parameterized/segment match/excluded list is specific to user kind' \ -skip 'evaluation/parameterized/segment match/excludedContexts' \ -skip 'evaluation/parameterized/segment recursion' \ - -skip 'evaluation/parameterized/target match/context targets' \ - -skip 'evaluation/parameterized/target match/multi-kind' \ -skip 'events' # port 8000 and 9000 is already used in the CI environment because we're diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 578bd28b..9c8f92ba 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -2,10 +2,7 @@ from ldclient.context import Context, _USER_STRING_ATTRS from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail from ldclient.impl.event_factory import _EventFactory -from ldclient.util import stringify_attrs -import json -from collections import namedtuple import hashlib import logging from typing import Any, Callable, Dict, List, Optional, Tuple @@ -92,7 +89,7 @@ def evaluate(self, flag: dict, context: Context, event_factory: _EventFactory) - state.detail.reason['bigSegmentsStatus'] = state.big_segments_status return state - def _evaluate(self, flag: dict, context: Context, state: EvalResult, event_factory: _EventFactory): + def _evaluate(self, flag: dict, context: Context, state: EvalResult, event_factory: _EventFactory) -> EvaluationDetail: if not flag.get('on', False): return _get_off_value(flag, {'kind': 'OFF'}) @@ -101,10 +98,9 @@ def _evaluate(self, flag: dict, context: Context, state: EvalResult, event_facto return _get_off_value(flag, prereq_failure_reason) # Check to see if any context targets match: - for target in flag.get('targets') or []: - for value in target.get('values') or []: - if value == context.key: - return _get_variation(flag, target.get('variation'), {'kind': 'TARGET_MATCH'}) + target_result = self._check_targets(flag, context) + if target_result is not None: + return target_result # Now walk through the rules to see if any match for index, rule in enumerate(flag.get('rules') or []): @@ -113,10 +109,9 @@ def _evaluate(self, flag: dict, context: Context, state: EvalResult, event_facto {'kind': 'RULE_MATCH', 'ruleIndex': index, 'ruleId': rule.get('id')}) # Walk through fallthrough and see if it matches - if flag.get('fallthrough') is not None: - return _get_value_for_variation_or_rollout(flag, flag['fallthrough'], context, {'kind': 'FALLTHROUGH'}) + return _get_value_for_variation_or_rollout(flag, flag['fallthrough'] or {}, context, {'kind': 'FALLTHROUGH'}) - def _check_prerequisites(self, flag: dict, context: Context, state: EvalResult, event_factory: _EventFactory): + def _check_prerequisites(self, flag: dict, context: Context, state: EvalResult, event_factory: _EventFactory) -> Optional[dict]: failed_prereq = None prereq_res = None for prereq in flag.get('prerequisites') or []: @@ -136,14 +131,46 @@ def _check_prerequisites(self, flag: dict, context: Context, state: EvalResult, return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.get('key')} return None - def _rule_matches_context(self, rule: dict, context: Context, state: EvalResult): + def _check_targets(self, flag: dict, context: Context) -> Optional[EvaluationDetail]: + user_targets = flag.get('targets') or [] + context_targets = flag.get('contextTargets') or [] + if len(context_targets) == 0: + # old-style data has only targets for users + if len(user_targets) != 0: + user_context = context.get_individual_context(Context.DEFAULT_KIND) + if (user_context is None): + return None + key = user_context.key + for t in user_targets: + if key in t['values']: + return _target_match_result(flag, t.get('variation')) + return None + for t in context_targets: + kind = t.get('contextKind') or Context.DEFAULT_KIND + var = t['variation'] + actual_context = context.get_individual_context(kind) + if actual_context is None: + continue + key = actual_context.key + if kind == Context.DEFAULT_KIND: + for ut in user_targets: + if ut['variation'] == var: + if key in ut['values']: + return _target_match_result(flag, var) + break + continue + if key in t['values']: + return _target_match_result(flag, var) + return None + + def _rule_matches_context(self, rule: dict, context: Context, state: EvalResult) -> bool: for clause in rule.get('clauses') or []: if clause.get('attribute') is not None: if not self._clause_matches_context(clause, context, state): return False return True - def _clause_matches_context(self, clause: dict, context: Context, state: EvalResult): + def _clause_matches_context(self, clause: dict, context: Context, state: EvalResult) -> bool: op = clause['op'] if op == 'segmentMatch': for seg_key in clause.get('values') or []: @@ -162,7 +189,7 @@ def _clause_matches_context(self, clause: dict, context: Context, state: EvalRes return False context_value = actual_context.get(attr) if context_value is None: - return None + return False clause_values = clause.get('values') or [] # is the attr an array? @@ -173,12 +200,12 @@ def _clause_matches_context(self, clause: dict, context: Context, state: EvalRes return _maybe_negate(clause, False) return _maybe_negate(clause, _match_single_context_value(op, context_value, clause_values)) - def _segment_matches_context(self, segment: dict, context: Context, state: EvalResult): + def _segment_matches_context(self, segment: dict, context: Context, state: EvalResult) -> bool: if segment.get('unbounded', False): return self._big_segment_match_context(segment, context, state) return self._simple_segment_match_context(segment, context, state, True) - def _simple_segment_match_context(self, segment: dict, context: Context, state: EvalResult, use_includes_and_excludes: bool): + def _simple_segment_match_context(self, segment: dict, context: Context, state: EvalResult, use_includes_and_excludes: bool) -> bool: key = context.key if key is not None: if use_includes_and_excludes: @@ -191,7 +218,7 @@ def _simple_segment_match_context(self, segment: dict, context: Context, state: return True return False - def _segment_rule_matches_context(self, rule: dict, context: Context, state: EvalResult, segment_key: str, salt: str): + def _segment_rule_matches_context(self, rule: dict, context: Context, state: EvalResult, segment_key: str, salt: str) -> bool: for clause in rule.get('clauses') or []: if not self._clause_matches_context(clause, context, state): return False @@ -206,7 +233,7 @@ def _segment_rule_matches_context(self, rule: dict, context: Context, state: Eva weight = rule['weight'] / 100000.0 return bucket < weight - def _big_segment_match_context(self, segment: dict, context: Context, state: EvalResult): + def _big_segment_match_context(self, segment: dict, context: Context, state: EvalResult) -> bool: generation = segment.get('generation', None) if generation is None: # Big segment queries can only be done if the generation is known. If it's unset, @@ -229,19 +256,19 @@ def _big_segment_match_context(self, segment: dict, context: Context, state: Eva # The following functions are declared outside Evaluator because they do not depend on any # of Evaluator's state. -def _get_variation(flag, variation, reason): +def _get_variation(flag: dict, variation: int, reason: dict) -> EvaluationDetail: vars = flag.get('variations') or [] if variation < 0 or variation >= len(vars): return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) return EvaluationDetail(vars[variation], variation, reason) -def _get_off_value(flag, reason): +def _get_off_value(flag: dict, reason: dict) -> EvaluationDetail: off_var = flag.get('offVariation') if off_var is None: return EvaluationDetail(None, None, reason) return _get_variation(flag, off_var, reason) -def _get_value_for_variation_or_rollout(flag, vr, context, reason): +def _get_value_for_variation_or_rollout(flag: dict, vr: dict, context: Context, reason: dict) -> EvaluationDetail: index, inExperiment = _variation_index_for_context(flag, vr, context) if index is None: return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) @@ -249,7 +276,7 @@ def _get_value_for_variation_or_rollout(flag, vr, context, reason): reason['inExperiment'] = inExperiment return _get_variation(flag, index, reason) -def _variation_index_for_context(feature, rule, context): +def _variation_index_for_context(feature: dict, rule: dict, context: Context) -> Tuple[Optional[int], bool]: if rule.get('variation') is not None: return (rule['variation'], False) @@ -281,7 +308,7 @@ def _variation_index_for_context(feature, rule, context): return (None, False) -def _bucket_context(seed, context, key, salt, bucket_by): +def _bucket_context(seed, context, key, salt, bucket_by) -> float: clause_value = context.get(bucket_by or 'key') if clause_value is None: return 0.0 @@ -303,7 +330,7 @@ def _bucket_context(seed, context, key, salt, bucket_by): result = hash_val / __LONG_SCALE__ return result -def _bucketable_string_value(u_value): +def _bucketable_string_value(u_value) -> Optional[str]: if isinstance(u_value, bool): return None elif isinstance(u_value, (str, int)): @@ -331,7 +358,7 @@ def _match_clause_by_kind(clause: dict, context: Context) -> bool: return True return False -def _maybe_negate(clause, val): +def _maybe_negate(clause: dict, val: bool) -> bool: if clause.get('negate', False) is True: return not val return val @@ -342,5 +369,8 @@ def _make_big_segment_ref(segment: dict) -> str: # the data model. The Relay Proxy will use the same format when writing to the store. return "%s.g%d" % (segment.get('key', ''), segment.get('generation', 0)) +def _target_match_result(flag: dict, var: int) -> EvaluationDetail: + return _get_variation(flag, var, {'kind': 'TARGET_MATCH'}) + def error_reason(error_kind: str) -> dict: return {'kind': 'ERROR', 'errorKind': error_kind} diff --git a/testing/builders.py b/testing/builders.py index 2feaf94f..9e526771 100644 --- a/testing/builders.py +++ b/testing/builders.py @@ -13,6 +13,7 @@ def __init__(self, key): 'fallthrough': {}, 'prerequisites': [], 'targets': [], + 'contextTargets': [], 'rules': [] } @@ -45,6 +46,10 @@ def target(self, variation: int, *keys: str) -> FlagBuilder: self.__data['targets'].append({'variation': variation, 'values': list(keys)}) return self + def context_target(self, context_kind: str, variation: int, *keys: str) -> FlagBuilder: + self.__data['contextTargets'].append({'contextKind': context_kind, 'variation': variation, 'values': list(keys)}) + return self + def rules(self, *rules: dict) -> FlagBuilder: for r in rules: self.__data['rules'].append(r) diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index 9dfaf967..8f7b3c54 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -185,19 +185,6 @@ def test_flag_returns_error_if_fallthrough_has_rollout_with_no_variations(): detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) -def test_flag_matches_user_from_targets(): - flag = { - 'key': 'feature0', - 'on': True, - 'targets': [{ 'values': ['whoever', 'userkey'], 'variation': 2 }], - 'fallthrough': { 'variation': 0 }, - 'offVariation': 1, - 'variations': ['a', 'b', 'c'] - } - user = Context.create('userkey') - detail = EvaluationDetail('c', 2, {'kind': 'TARGET_MATCH'}) - assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) - def test_flag_matches_user_from_rules(): rule = { 'id': 'id', 'clauses': [{'attribute': 'key', 'op': 'in', 'values': ['userkey']}], 'variation': 0} flag = make_boolean_flag_with_rules(rule) diff --git a/testing/impl/test_evaluator_target.py b/testing/impl/test_evaluator_target.py new file mode 100644 index 00000000..db0e755f --- /dev/null +++ b/testing/impl/test_evaluator_target.py @@ -0,0 +1,80 @@ +from ldclient.client import Context +from testing.builders import * +from testing.impl.evaluator_util import * + + +FALLTHROUGH_VAR = 0 +MATCH_VAR_1 = 1 +MATCH_VAR_2 = 2 +VARIATIONS = ['fallthrough', 'match1', 'match2'] + +def assert_match_clause(clause: dict, context: Context, should_match: bool): + assert_match(basic_evaluator, make_boolean_flag_with_clauses(clause), context, should_match) + +def base_flag_builder() -> FlagBuilder: + return FlagBuilder('feature').on(True).variations(*VARIATIONS) \ + .fallthrough_variation(FALLTHROUGH_VAR).off_variation(FALLTHROUGH_VAR) + +def expect_match(flag: dict, context: Context, variation: int): + result = basic_evaluator.evaluate(flag, context, event_factory) + assert result.detail.variation_index == variation + assert result.detail.value == VARIATIONS[variation] + assert result.detail.reason == {'kind': 'TARGET_MATCH'} + +def expect_fallthrough(flag: dict, context: Context): + result = basic_evaluator.evaluate(flag, context, event_factory) + assert result.detail.variation_index == FALLTHROUGH_VAR + assert result.detail.value == VARIATIONS[FALLTHROUGH_VAR] + assert result.detail.reason == {'kind': 'FALLTHROUGH'} + + +class TestEvaluatorTarget: + def test_user_targets_only(self): + flag = base_flag_builder() \ + .target(MATCH_VAR_1, 'c') \ + .target(MATCH_VAR_2, 'b', 'a') \ + .build() + + expect_match(flag, Context.create('a'), MATCH_VAR_2) + expect_match(flag, Context.create('b'), MATCH_VAR_2) + expect_match(flag, Context.create('c'), MATCH_VAR_1) + expect_fallthrough(flag, Context.create('z')) + + # in a multi-kind context, these targets match only the key for the user kind + expect_match(flag, + Context.create_multi(Context.create('b', 'dog'), Context.create('a')), + MATCH_VAR_2) + expect_match(flag, + Context.create_multi(Context.create('a', 'dog'), Context.create('c')), + MATCH_VAR_1) + expect_fallthrough(flag, + Context.create_multi(Context.create('b', 'dog'), Context.create('z'))) + expect_fallthrough(flag, + Context.create_multi(Context.create('a', 'dog'), Context.create('b', 'cat'))) + + def test_user_targets_and_context_targets(self): + flag = base_flag_builder() \ + .target(MATCH_VAR_1, 'c') \ + .target(MATCH_VAR_2, 'b', 'a') \ + .context_target('dog', MATCH_VAR_1, 'a', 'b') \ + .context_target('dog', MATCH_VAR_2, 'c') \ + .context_target(Context.DEFAULT_KIND, MATCH_VAR_1) \ + .context_target(Context.DEFAULT_KIND, MATCH_VAR_2) \ + .build() + + expect_match(flag, Context.create('a'), MATCH_VAR_2) + expect_match(flag, Context.create('b'), MATCH_VAR_2) + expect_match(flag, Context.create('c'), MATCH_VAR_1) + expect_fallthrough(flag, Context.create('z')) + + expect_match(flag, + Context.create_multi(Context.create('b', 'dog'), Context.create('a')), + MATCH_VAR_1) # the "dog" target takes precedence due to ordering + expect_match(flag, + Context.create_multi(Context.create('z', 'dog'), Context.create('a')), + MATCH_VAR_2) # "dog" targets don't match, continue to "user" targets + expect_fallthrough(flag, + Context.create_multi(Context.create('x', 'dog'), Context.create('z'))) # nothing matches + expect_match(flag, + Context.create_multi(Context.create('a', 'dog'), Context.create('b', 'cat')), + MATCH_VAR_1) From 38d885ee2e58ae0a06275000a04ec571e1b68a47 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Sat, 10 Dec 2022 12:16:33 -0800 Subject: [PATCH 312/356] support contextKind in rollouts/experiments --- Makefile | 5 +- ldclient/impl/evaluator.py | 82 +++++++------ testing/builders.py | 112 +++++++++++++----- testing/impl/evaluator_util.py | 7 -- testing/impl/test_evaluator.py | 103 +--------------- testing/impl/test_evaluator_big_segment.py | 1 + testing/impl/test_evaluator_bucketing.py | 131 +++++++++++++++++++++ testing/impl/test_evaluator_clause.py | 4 - testing/impl/test_evaluator_segment.py | 67 +++++++++-- testing/test_ldclient_evaluation.py | 1 - 10 files changed, 324 insertions(+), 189 deletions(-) create mode 100644 testing/impl/test_evaluator_bucketing.py diff --git a/Makefile b/Makefile index 26d49f78..bfd82c2d 100644 --- a/Makefile +++ b/Makefile @@ -20,13 +20,12 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # TEST_HARNESS_PARAMS can be set to add -skip parameters for any contract tests that cannot yet pass # Explanation of current skips: +# - "evaluation" subtests involving attribute references: Haven't yet implemented attribute references. # - "evaluation/parameterized/prerequisites": Can't pass yet because prerequisite cycle detection is not implemented. # - various other "evaluation" subtests: These tests require attribute reference support or targeting by kind. # - "events": These test suites will be unavailable until more of the U2C implementation is done. TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ - -skip 'evaluation/bucketing/bucket by non-key attribute' \ - -skip 'evaluation/bucketing/secondary' \ - -skip 'evaluation/bucketing/selection of context' \ + -skip 'evaluation/bucketing/bucket by non-key attribute/in rollouts/string value/complex attribute reference' \ -skip 'evaluation/parameterized/attribute references' \ -skip 'evaluation/parameterized/bad attribute reference errors' \ -skip 'evaluation/parameterized/prerequisites' \ diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 9c8f92ba..3d77bb11 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -229,7 +229,7 @@ def _segment_rule_matches_context(self, rule: dict, context: Context, state: Eva # All of the clauses are met. See if the context buckets in bucket_by = 'key' if rule.get('bucketBy') is None else rule['bucketBy'] - bucket = _bucket_context(None, context, segment_key, salt, bucket_by) + bucket = _bucket_context(None, context, rule.get('rolloutContextKind'), segment_key, salt, bucket_by) weight = rule['weight'] / 100000.0 return bucket < weight @@ -277,54 +277,68 @@ def _get_value_for_variation_or_rollout(flag: dict, vr: dict, context: Context, return _get_variation(flag, index, reason) def _variation_index_for_context(feature: dict, rule: dict, context: Context) -> Tuple[Optional[int], bool]: - if rule.get('variation') is not None: - return (rule['variation'], False) + var = rule.get('variation') + if var is not None: + return (var, False) rollout = rule.get('rollout') if rollout is None: return (None, False) variations = rollout.get('variations') - seed = rollout.get('seed') - if variations is not None and len(variations) > 0: - bucket_by = 'key' - if rollout.get('bucketBy') is not None: - bucket_by = rollout['bucketBy'] - bucket = _bucket_context(seed, context, feature['key'], feature['salt'], bucket_by) - is_experiment = rollout.get('kind') == 'experiment' - sum = 0.0 - for wv in variations: - sum += wv.get('weight', 0.0) / 100000.0 - if bucket < sum: - is_experiment_partition = is_experiment and not wv.get('untracked') - return (wv.get('variation'), is_experiment_partition) - - # The context's bucket value was greater than or equal to the end of the last bucket. This could happen due - # to a rounding error, or due to the fact that we are scaling to 100000 rather than 99999, or the flag - # data could contain buckets that don't actually add up to 100000. Rather than returning an error in - # this case (or changing the scaling, which would potentially change the results for *all* contexts), we - # will simply put the context in the last bucket. - is_experiment_partition = is_experiment and not variations[-1].get('untracked') - return (variations[-1].get('variation'), is_experiment_partition) - - return (None, False) - -def _bucket_context(seed, context, key, salt, bucket_by) -> float: - clause_value = context.get(bucket_by or 'key') + if variations is None or len(variations) == 0: + return (None, False) + + rollout_is_experiment = rollout.get('kind') == 'experiment' + bucket_by = None if rollout_is_experiment else rollout.get('bucketBy') + bucket = _bucket_context( + rollout.get('seed'), + context, + rollout.get('contextKind'), + feature['key'], + feature['salt'], + bucket_by + ) + is_experiment = rollout_is_experiment and bucket >= 0 + # _bucket_context returns a negative value if the context didn't exist, in which case we + # still end up returning the first bucket, but we will force the "in experiment" state to be false. + + sum = 0.0 + for wv in variations: + sum += wv.get('weight', 0.0) / 100000.0 + if bucket < sum: + is_experiment_partition = is_experiment and not wv.get('untracked') + return (wv.get('variation'), is_experiment_partition) + + # The context's bucket value was greater than or equal to the end of the last bucket. This could happen due + # to a rounding error, or due to the fact that we are scaling to 100000 rather than 99999, or the flag + # data could contain buckets that don't actually add up to 100000. Rather than returning an error in + # this case (or changing the scaling, which would potentially change the results for *all* contexts), we + # will simply put the context in the last bucket. + is_experiment_partition = is_experiment and not variations[-1].get('untracked') + return (variations[-1].get('variation'), is_experiment_partition) + +def _bucket_context( + seed: Optional[int], + context: Context, + context_kind: Optional[str], + key: str, + salt: str, + bucket_by: Optional[str] + ) -> float: + match_context = context.get_individual_context(context_kind or Context.DEFAULT_KIND) + if match_context is None: + return -1 + clause_value = match_context.get(bucket_by or 'key') if clause_value is None: return 0.0 bucket_by_value = _bucketable_string_value(clause_value) if bucket_by_value is None: return 0.0 - id_hash = clause_value - if context.get('secondary') is not None: - id_hash = id_hash + '.' + context.get('secondary') - if seed is not None: prefix = str(seed) else: prefix = '%s.%s' % (key, salt) - hash_key = '%s.%s' % (prefix, id_hash) hash_val = int(hashlib.sha1(hash_key.encode('utf-8')).hexdigest()[:15], 16) result = hash_val / __LONG_SCALE__ diff --git a/testing/builders.py b/testing/builders.py index 9e526771..43590e7e 100644 --- a/testing/builders.py +++ b/testing/builders.py @@ -1,10 +1,32 @@ from __future__ import annotations -from typing import Any, Optional +from typing import Any, List ,Optional +from ldclient.context import Context -class FlagBuilder: + +class BaseBuilder: + def __init__(self, data): + self.data = data + + def _set(self, key: str, value: Any): + self.data[key] = value + return self + + def _append(self, key: str, item: dict): + self.data[key].append(item) + return self + + def _append_all(self, key: str, items: List[dict]): + self.data[key].extend(items) + return self + + def build(self): + return self.data.copy() + + +class FlagBuilder(BaseBuilder): def __init__(self, key): - self.__data = { + super().__init__({ 'key': key, 'version': 1, 'on': False, @@ -15,14 +37,7 @@ def __init__(self, key): 'targets': [], 'contextTargets': [], 'rules': [] - } - - def build(self): - return self.__data.copy() - - def _set(self, k: str, v: Any) -> FlagBuilder: - self.__data[k] = v - return self + }) def key(self, key: str) -> FlagBuilder: return self._set('key', key) @@ -43,36 +58,71 @@ def fallthrough_variation(self, index: int) -> FlagBuilder: return self._set('fallthrough', {'variation': index}) def target(self, variation: int, *keys: str) -> FlagBuilder: - self.__data['targets'].append({'variation': variation, 'values': list(keys)}) - return self + return self._append('targets', {'variation': variation, 'values': list(keys)}) def context_target(self, context_kind: str, variation: int, *keys: str) -> FlagBuilder: - self.__data['contextTargets'].append({'contextKind': context_kind, 'variation': variation, 'values': list(keys)}) - return self + return self._append('contextTargets', + {'contextKind': context_kind, 'variation': variation, 'values': list(keys)}) def rules(self, *rules: dict) -> FlagBuilder: - for r in rules: - self.__data['rules'].append(r) - return self + return self._append_all('rules', list(rules)) -class FlagRuleBuilder: +class FlagRuleBuilder(BaseBuilder): def __init__(self): - self.__data = {'clauses': []} - - def build(self) -> dict: - return self.__data.copy() + super().__init__({'clauses': []}) def variation(self, variation: int) -> FlagRuleBuilder: - self.__data['variation'] = variation - return self + return self._set('variation', variation) def clauses(self, *clauses: dict) -> FlagRuleBuilder: - for c in clauses: - self.__data['clauses'].append(c) - return self + return self._append_all('clauses', list(clauses)) +class SegmentBuilder(BaseBuilder): + def __init__(self, key): + super().__init__({ + 'key': key, + 'version': 1, + 'included': [], + 'excluded': [], + 'rules': [], + 'unbounded': False + }) + + def key(self, key: str) -> SegmentBuilder: + return self._set('key', key) + + def version(self, version: int) -> SegmentBuilder: + return self._set('key', version) + + def salt(self, salt: str) -> SegmentBuilder: + return self._set('salt', salt) + + def rules(self, *rules: dict) -> SegmentBuilder: + return self._append_all('rules', list(rules)) + + +class SegmentRuleBuilder(BaseBuilder): + def __init__(self): + super().__init__({'clauses': []}) + + def bucket_by(self, value: Optional[str]) -> SegmentRuleBuilder: + return self._set('bucketBy', value) + + def clauses(self, *clauses: dict) -> SegmentRuleBuilder: + return self._append_all('clauses', list(clauses)) + + def rollout_context_kind(self, value: Optional[str]) -> SegmentRuleBuilder: + return self._set('rolloutContextKind', value) + + def weight(self, value: Optional[int]) -> SegmentRuleBuilder: + return self._set('weight', value) + + +def make_boolean_flag_matching_segment(segment: dict) -> dict: + return make_boolean_flag_with_clauses(make_clause_matching_segment_key(segment['key'])) + def make_boolean_flag_with_clauses(*clauses: dict) -> dict: return make_boolean_flag_with_rules(FlagRuleBuilder().clauses(*clauses).variation(0).build()) @@ -85,6 +135,12 @@ def make_clause(context_kind: Optional[str], attr: str, op: str, *values: Any) - ret['contextKind'] = context_kind return ret +def make_clause_matching_context(context: Context) -> dict: + return {'contextKind': context.kind, 'attribute': 'key', 'op': 'in', 'values': [context.key]} + +def make_clause_matching_segment_key(*segment_keys: str) -> dict: + return {'attribute': '', 'op': 'segmentMatch', 'values': list(segment_keys)} + def negate_clause(clause: dict) -> dict: c = clause.copy() c['negate'] = not c.get('negate') diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index 99367ee7..19d600e3 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -82,13 +82,6 @@ def assert_match(evaluator: Evaluator, flag: dict, context: Context, expect_valu assert result.detail.value == expect_value -def make_boolean_flag_matching_segment(segment: dict) -> dict: - return make_boolean_flag_with_clauses({ - 'attribute': '', - 'op': 'segmentMatch', - 'values': [ segment['key'] ] - }) - def make_clause_matching_user(user: Union[Context, dict]) -> dict: key = user.key if isinstance(user, Context) else user['key'] return { 'attribute': 'key', 'op': 'in', 'values': [ key ] } diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index 8f7b3c54..ddd77954 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -1,8 +1,6 @@ -import math -import pytest from ldclient.client import Context from ldclient.evaluation import EvaluationDetail -from ldclient.impl.evaluator import _bucket_context, _context_to_user_dict, _variation_index_for_context +from ldclient.impl.evaluator import _context_to_user_dict from testing.impl.evaluator_util import * @@ -274,102 +272,3 @@ def test_segment_match_clause_falls_through_with_no_errors_if_segment_not_found( evaluator = EvaluatorBuilder().with_unknown_segment('segkey').build() assert evaluator.evaluate(flag, user, event_factory).detail.value == False - -def test_variation_index_is_returned_for_bucket(): - user = Context.create('userkey') - flag = { 'key': 'flagkey', 'salt': 'salt' } - - # First verify that with our test inputs, the bucket value will be greater than zero and less than 100000, - # so we can construct a rollout whose second bucket just barely contains that value - bucket_value = math.trunc(_bucket_context(None, user, flag['key'], flag['salt'], 'key') * 100000) - assert bucket_value > 0 and bucket_value < 100000 - - bad_variation_a = 0 - matched_variation = 1 - bad_variation_b = 2 - rule = { - 'rollout': { - 'variations': [ - { 'variation': bad_variation_a, 'weight': bucket_value }, # end of bucket range is not inclusive, so it will *not* match the target value - { 'variation': matched_variation, 'weight': 1 }, # size of this bucket is 1, so it only matches that specific value - { 'variation': bad_variation_b, 'weight': 100000 - (bucket_value + 1) } - ] - } - } - result_variation = _variation_index_for_context(flag, rule, user) - assert result_variation == (matched_variation, False) - -def test_last_bucket_is_used_if_bucket_value_equals_total_weight(): - user = Context.create('userkey') - flag = { 'key': 'flagkey', 'salt': 'salt' } - - # We'll construct a list of variations that stops right at the target bucket value - bucket_value = math.trunc(_bucket_context(None, user, flag['key'], flag['salt'], 'key') * 100000) - - rule = { - 'rollout': { - 'variations': [ - { 'variation': 0, 'weight': bucket_value } - ] - } - } - result_variation = _variation_index_for_context(flag, rule, user) - assert result_variation == (0, False) - -def test_bucket_by_user_key(): - user = Context.create('userKeyA') - bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'key') - assert bucket == pytest.approx(0.42157587) - - user = Context.create('userKeyB') - bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'key') - assert bucket == pytest.approx(0.6708485) - - user = Context.create('userKeyC') - bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'key') - assert bucket == pytest.approx(0.10343106) - -def test_bucket_by_user_key_with_seed(): - seed = 61 - user = Context.create('userKeyA') - point = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') - assert point == pytest.approx(0.09801207) - - user = Context.create('userKeyB') - point = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') - assert point == pytest.approx(0.14483777) - - user = Context.create('userKeyC') - point = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') - assert point == pytest.approx(0.9242641) - -def test_bucket_by_int_attr(): - user = Context.builder('userKey').set('intAttr', 33333).set('stringAttr', '33333').build() - bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'intAttr') - assert bucket == pytest.approx(0.54771423) - bucket2 = _bucket_context(None, user, 'hashKey', 'saltyA', 'stringAttr') - assert bucket2 == bucket - -def test_bucket_by_float_attr_not_allowed(): - user = Context.builder('userKey').set('floatAttr', 33.5).build() - bucket = _bucket_context(None, user, 'hashKey', 'saltyA', 'floatAttr') - assert bucket == 0.0 - -def test_seed_independent_of_salt_and_hashKey(): - seed = 61 - user = Context.create('userKeyA') - point1 = _bucket_context(seed, user, 'hashKey', 'saltyA', 'key') - point2 = _bucket_context(seed, user, 'hashKey', 'saltyB', 'key') - point3 = _bucket_context(seed, user, 'hashKey2', 'saltyA', 'key') - - assert point1 == point2 - assert point2 == point3 - -def test_seed_changes_hash_evaluation(): - seed1 = 61 - user = Context.create('userKeyA') - point1 = _bucket_context(seed1, user, 'hashKey', 'saltyA', 'key') - seed2 = 62 - point2 = _bucket_context(seed2, user, 'hashKey', 'saltyB', 'key') - - assert point1 != point2 diff --git a/testing/impl/test_evaluator_big_segment.py b/testing/impl/test_evaluator_big_segment.py index 8d7eb403..b9ff1e32 100644 --- a/testing/impl/test_evaluator_big_segment.py +++ b/testing/impl/test_evaluator_big_segment.py @@ -1,6 +1,7 @@ import pytest from ldclient.evaluation import BigSegmentsStatus +from testing.builders import * from testing.impl.evaluator_util import * diff --git a/testing/impl/test_evaluator_bucketing.py b/testing/impl/test_evaluator_bucketing.py new file mode 100644 index 00000000..9990b23a --- /dev/null +++ b/testing/impl/test_evaluator_bucketing.py @@ -0,0 +1,131 @@ +from ldclient.client import Context +from ldclient.impl.evaluator import _bucket_context, _variation_index_for_context + +from testing.builders import * +from testing.impl.evaluator_util import * + +import math +import pytest + + +def assert_match_clause(clause: dict, context: Context, should_match: bool): + assert_match(basic_evaluator, make_boolean_flag_with_clauses(clause), context, should_match) + + +class TestEvaluatorBucketing: + def test_variation_index_is_returned_for_bucket(self): + user = Context.create('userkey') + flag = { 'key': 'flagkey', 'salt': 'salt' } + + # First verify that with our test inputs, the bucket value will be greater than zero and less than 100000, + # so we can construct a rollout whose second bucket just barely contains that value + bucket_value = math.trunc(_bucket_context(None, user, None, flag['key'], flag['salt'], 'key') * 100000) + assert bucket_value > 0 and bucket_value < 100000 + + bad_variation_a = 0 + matched_variation = 1 + bad_variation_b = 2 + rule = { + 'rollout': { + 'variations': [ + { 'variation': bad_variation_a, 'weight': bucket_value }, # end of bucket range is not inclusive, so it will *not* match the target value + { 'variation': matched_variation, 'weight': 1 }, # size of this bucket is 1, so it only matches that specific value + { 'variation': bad_variation_b, 'weight': 100000 - (bucket_value + 1) } + ] + } + } + result_variation = _variation_index_for_context(flag, rule, user) + assert result_variation == (matched_variation, False) + + def test_last_bucket_is_used_if_bucket_value_equals_total_weight(self): + user = Context.create('userkey') + flag = { 'key': 'flagkey', 'salt': 'salt' } + + # We'll construct a list of variations that stops right at the target bucket value + bucket_value = math.trunc(_bucket_context(None, user, None, flag['key'], flag['salt'], 'key') * 100000) + + rule = { + 'rollout': { + 'variations': [ + { 'variation': 0, 'weight': bucket_value } + ] + } + } + result_variation = _variation_index_for_context(flag, rule, user) + assert result_variation == (0, False) + + def test_bucket_by_user_key(self): + user = Context.create('userKeyA') + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'key') + assert bucket == pytest.approx(0.42157587) + + user = Context.create('userKeyB') + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'key') + assert bucket == pytest.approx(0.6708485) + + user = Context.create('userKeyC') + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'key') + assert bucket == pytest.approx(0.10343106) + + def test_bucket_by_user_key_with_seed(self): + seed = 61 + user = Context.create('userKeyA') + point = _bucket_context(seed, user, None, 'hashKey', 'saltyA', 'key') + assert point == pytest.approx(0.09801207) + + user = Context.create('userKeyB') + point = _bucket_context(seed, user, None, 'hashKey', 'saltyA', 'key') + assert point == pytest.approx(0.14483777) + + user = Context.create('userKeyC') + point = _bucket_context(seed, user, None, 'hashKey', 'saltyA', 'key') + assert point == pytest.approx(0.9242641) + + def test_bucket_by_int_attr(self): + user = Context.builder('userKey').set('intAttr', 33333).set('stringAttr', '33333').build() + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'intAttr') + assert bucket == pytest.approx(0.54771423) + bucket2 = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'stringAttr') + assert bucket2 == bucket + + def test_bucket_by_float_attr_not_allowed(self): + user = Context.builder('userKey').set('floatAttr', 33.5).build() + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'floatAttr') + assert bucket == 0.0 + + def test_seed_independent_of_salt_and_hashKey(self): + seed = 61 + user = Context.create('userKeyA') + point1 = _bucket_context(seed, user, None, 'hashKey', 'saltyA', 'key') + point2 = _bucket_context(seed, user, None, 'hashKey', 'saltyB', 'key') + point3 = _bucket_context(seed, user, None, 'hashKey2', 'saltyA', 'key') + + assert point1 == point2 + assert point2 == point3 + + def test_seed_changes_hash_evaluation(self): + seed1 = 61 + user = Context.create('userKeyA') + point1 = _bucket_context(seed1, user, None, 'hashKey', 'saltyA', 'key') + seed2 = 62 + point2 = _bucket_context(seed2, user, None, 'hashKey', 'saltyB', 'key') + + assert point1 != point2 + + def test_context_kind_selects_context(self): + seed = 357 + context1 = Context.create('key1') + context2 = Context.create('key2', 'kind2') + multi = Context.create_multi(context1, context2) + key = 'flag-key' + attr = 'key' + salt = 'testing123' + + assert _bucket_context(seed, context1, None, key, salt, attr) == \ + _bucket_context(seed, context1, 'user', key, salt, attr) + assert _bucket_context(seed, context1, None, key, salt, attr) == \ + _bucket_context(seed, multi, 'user', key, salt, attr) + assert _bucket_context(seed, context2, 'kind2', key, salt, attr) == \ + _bucket_context(seed, multi, 'kind2', key, salt, attr) + assert _bucket_context(seed, multi, 'user', key, salt, attr) != \ + _bucket_context(seed, multi, 'kind2', key, salt, attr) diff --git a/testing/impl/test_evaluator_clause.py b/testing/impl/test_evaluator_clause.py index 5b492f40..3a74d68e 100644 --- a/testing/impl/test_evaluator_clause.py +++ b/testing/impl/test_evaluator_clause.py @@ -1,8 +1,4 @@ -import math -import pytest from ldclient.client import Context -from ldclient.evaluation import EvaluationDetail -from ldclient.impl.evaluator import _bucket_context, _variation_index_for_context from testing.builders import * from testing.impl.evaluator_util import * diff --git a/testing/impl/test_evaluator_segment.py b/testing/impl/test_evaluator_segment.py index e61beb48..cc1536d7 100644 --- a/testing/impl/test_evaluator_segment.py +++ b/testing/impl/test_evaluator_segment.py @@ -1,15 +1,51 @@ import pytest from ldclient import Context +from ldclient.impl.evaluator import _bucket_context +from testing.builders import * from testing.impl.evaluator_util import * -def _segment_matches_user(segment: dict, context: Context) -> bool: +def _segment_matches_context(segment: dict, context: Context) -> bool: e = EvaluatorBuilder().with_segment(segment).build() flag = make_boolean_flag_matching_segment(segment) result = e.evaluate(flag, context, event_factory) return result.detail.value +def verify_rollout( + eval_context: Context, + match_context: Context, + expected_bucket_value: int, + segment_key: str, + salt: str, + bucket_by: Optional[str], + rollout_context_kind: Optional[str] +): + segment_should_match = SegmentBuilder(segment_key) \ + .salt(salt) \ + .rules( + SegmentRuleBuilder() \ + .clauses(make_clause_matching_context(match_context)) \ + .weight(expected_bucket_value + 1) \ + .bucket_by(bucket_by) \ + .rollout_context_kind(rollout_context_kind) \ + .build() + ) \ + .build() + segment_should_not_match = SegmentBuilder(segment_key) \ + .salt(salt) \ + .rules( + SegmentRuleBuilder() \ + .clauses(make_clause_matching_context(match_context)) \ + .weight(expected_bucket_value) \ + .bucket_by(bucket_by) \ + .rollout_context_kind(rollout_context_kind) \ + .build() + ) \ + .build() + assert _segment_matches_context(segment_should_match, eval_context) is True + assert _segment_matches_context(segment_should_not_match, eval_context) is False + def test_explicit_include_user(): s = { @@ -18,7 +54,7 @@ def test_explicit_include_user(): "version": 1 } u = Context.create('foo') - assert _segment_matches_user(s, u) is True + assert _segment_matches_context(s, u) is True def test_explicit_exclude_user(): s = { @@ -27,7 +63,7 @@ def test_explicit_exclude_user(): "version": 1 } u = Context.create('foo') - assert _segment_matches_user(s, u) is False + assert _segment_matches_context(s, u) is False def test_explicit_include_has_precedence(): s = { @@ -37,7 +73,7 @@ def test_explicit_include_has_precedence(): "version": 1 } u = Context.create('foo') - assert _segment_matches_user(s, u) is True + assert _segment_matches_context(s, u) is True def test_matching_rule_with_no_weight(): s = { @@ -55,7 +91,7 @@ def test_matching_rule_with_no_weight(): ] } u = Context.builder('foo').set('email', 'test@example.com').build() - assert _segment_matches_user(s, u) is True + assert _segment_matches_context(s, u) is True def test_matching_rule_with_none_weight(): s = { @@ -74,7 +110,7 @@ def test_matching_rule_with_none_weight(): ] } u = Context.builder('foo').set('email', 'test@example.com').build() - assert _segment_matches_user(s, u) is True + assert _segment_matches_context(s, u) is True def test_matching_rule_with_full_rollout(): s = { @@ -93,7 +129,7 @@ def test_matching_rule_with_full_rollout(): ] } u = Context.builder('foo').set('email', 'test@example.com').build() - assert _segment_matches_user(s, u) is True + assert _segment_matches_context(s, u) is True def test_matching_rule_with_zero_rollout(): s = { @@ -112,7 +148,18 @@ def test_matching_rule_with_zero_rollout(): ] } u = Context.builder('foo').set('email', 'test@example.com').build() - assert _segment_matches_user(s, u) is False + assert _segment_matches_context(s, u) is False + +def test_rollout_calculation_can_bucket_by_key(): + context = Context.builder('userkey').name('Bob').build() + verify_rollout(context, context, 12551, 'test', 'salt', None, None) + +def test_rollout_uses_context_kind(): + context1 = Context.create('key1', 'kind1') + context2 = Context.create('key2', 'kind2') + multi = Context.create_multi(context1, context2) + expected_bucket_value = int(100000 * _bucket_context(None, context2, 'kind2', 'test', 'salt', None)) + verify_rollout(multi, context2, expected_bucket_value, 'test', 'salt', None, 'kind2') def test_matching_rule_with_multiple_clauses(): s = { @@ -136,7 +183,7 @@ def test_matching_rule_with_multiple_clauses(): ] } u = Context.builder('foo').name('bob').set('email', 'test@example.com').build() - assert _segment_matches_user(s, u) is True + assert _segment_matches_context(s, u) is True def test_non_matching_rule_with_multiple_clauses(): s = { @@ -160,4 +207,4 @@ def test_non_matching_rule_with_multiple_clauses(): ] } u = Context.builder('foo').name('bob').set('email', 'test@example.com').build() - assert _segment_matches_user(s, u) is False + assert _segment_matches_context(s, u) is False diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 3f41e7e3..c58c04c7 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -11,7 +11,6 @@ from ldclient.versioned_data_kind import FEATURES, SEGMENTS from testing.builders import * -from testing.impl.evaluator_util import make_boolean_flag_matching_segment from testing.mock_components import MockBigSegmentStore from testing.stub_util import MockEventProcessor, MockUpdateProcessor from testing.test_ldclient import make_off_flag_with_value From 9a1f932dfb85aa2622aefd122b3ebb070e8a1a36 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Sat, 10 Dec 2022 12:41:02 -0800 Subject: [PATCH 313/356] support includedContexts/excludedContexts in segment --- Makefile | 4 - ldclient/impl/evaluator.py | 26 ++- testing/builders.py | 19 ++- testing/impl/test_evaluator_segment.py | 216 ++++++++++--------------- 4 files changed, 119 insertions(+), 146 deletions(-) diff --git a/Makefile b/Makefile index bfd82c2d..359c4338 100644 --- a/Makefile +++ b/Makefile @@ -29,10 +29,6 @@ TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ -skip 'evaluation/parameterized/attribute references' \ -skip 'evaluation/parameterized/bad attribute reference errors' \ -skip 'evaluation/parameterized/prerequisites' \ - -skip 'evaluation/parameterized/segment match/included list is specific to user kind' \ - -skip 'evaluation/parameterized/segment match/includedContexts' \ - -skip 'evaluation/parameterized/segment match/excluded list is specific to user kind' \ - -skip 'evaluation/parameterized/segment match/excludedContexts' \ -skip 'evaluation/parameterized/segment recursion' \ -skip 'events' diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 3d77bb11..a80cc2c1 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -206,16 +206,20 @@ def _segment_matches_context(self, segment: dict, context: Context, state: EvalR return self._simple_segment_match_context(segment, context, state, True) def _simple_segment_match_context(self, segment: dict, context: Context, state: EvalResult, use_includes_and_excludes: bool) -> bool: - key = context.key - if key is not None: - if use_includes_and_excludes: - if key in segment.get('included', []): + if use_includes_and_excludes: + if _context_key_is_in_target_list(context, None, segment.get('included')): + return True + for t in segment.get('includedContexts') or []: + if _context_key_is_in_target_list(context, t.get('contextKind'), t.get('values')): return True - if key in segment.get('excluded', []): + if _context_key_is_in_target_list(context, None, segment.get('excluded')): + return False + for t in segment.get('excludedContexts') or []: + if _context_key_is_in_target_list(context, t.get('contextKind'), t.get('values')): return False - for rule in segment.get('rules', []): - if self._segment_rule_matches_context(rule, context, state, segment['key'], segment.get('salt', '')): - return True + for rule in segment.get('rules', []): + if self._segment_rule_matches_context(rule, context, state, segment['key'], segment.get('salt', '')): + return True return False def _segment_rule_matches_context(self, rule: dict, context: Context, state: EvalResult, segment_key: str, salt: str) -> bool: @@ -352,6 +356,12 @@ def _bucketable_string_value(u_value) -> Optional[str]: return None +def _context_key_is_in_target_list(context: Context, context_kind: Optional[str], keys: Optional[List[str]]) -> bool: + if keys is None or len(keys) == 0: + return False + match_context = context.get_individual_context(context_kind or Context.DEFAULT_KIND) + return match_context is not None and match_context.key in keys + def _match_single_context_value(op: str, context_value: Any, values: List[Any]) -> bool: op_fn = operators.ops.get(op) if op_fn is None: diff --git a/testing/builders.py b/testing/builders.py index 43590e7e..f5d76c15 100644 --- a/testing/builders.py +++ b/testing/builders.py @@ -16,7 +16,7 @@ def _append(self, key: str, item: dict): self.data[key].append(item) return self - def _append_all(self, key: str, items: List[dict]): + def _append_all(self, key: str, items: List[Any]): self.data[key].extend(items) return self @@ -86,6 +86,8 @@ def __init__(self, key): 'version': 1, 'included': [], 'excluded': [], + 'includedContexts': [], + 'excludedContexts': [], 'rules': [], 'unbounded': False }) @@ -96,6 +98,18 @@ def key(self, key: str) -> SegmentBuilder: def version(self, version: int) -> SegmentBuilder: return self._set('key', version) + def excluded(self, *keys: str) -> SegmentBuilder: + return self._append_all('excluded', list(keys)) + + def excluded_contexts(self, context_kind: str, *keys: str) -> SegmentBuilder: + return self._append('excludedContexts', {'contextKind': context_kind, 'values': list(keys)}) + + def included(self, *keys: str) -> SegmentBuilder: + return self._append_all('included', list(keys)) + + def included_contexts(self, context_kind: str, *keys: str) -> SegmentBuilder: + return self._append('includedContexts', {'contextKind': context_kind, 'values': list(keys)}) + def salt(self, salt: str) -> SegmentBuilder: return self._set('salt', salt) @@ -141,6 +155,9 @@ def make_clause_matching_context(context: Context) -> dict: def make_clause_matching_segment_key(*segment_keys: str) -> dict: return {'attribute': '', 'op': 'segmentMatch', 'values': list(segment_keys)} +def make_segment_rule_matching_context(context: Context) -> dict: + return SegmentRuleBuilder().clauses(make_clause_matching_context(context)).build() + def negate_clause(clause: dict) -> dict: c = clause.copy() c['negate'] = not c.get('negate') diff --git a/testing/impl/test_evaluator_segment.py b/testing/impl/test_evaluator_segment.py index cc1536d7..e0471e1b 100644 --- a/testing/impl/test_evaluator_segment.py +++ b/testing/impl/test_evaluator_segment.py @@ -48,107 +48,81 @@ def verify_rollout( def test_explicit_include_user(): - s = { - "key": "test", - "included": [ "foo" ], - "version": 1 - } - u = Context.create('foo') - assert _segment_matches_context(s, u) is True + user = Context.create('foo') + segment = SegmentBuilder('test').included(user.key).build() + assert _segment_matches_context(segment, user) is True def test_explicit_exclude_user(): - s = { - "key": "test", - "excluded": [ "foo" ], - "version": 1 - } - u = Context.create('foo') - assert _segment_matches_context(s, u) is False + user = Context.create('foo') + segment = SegmentBuilder('test').excluded(user.key) \ + .rules(make_segment_rule_matching_context(user)) \ + .build() + assert _segment_matches_context(segment, user) is False def test_explicit_include_has_precedence(): - s = { - "key": "test", - "included": [ "foo" ], - "excluded": [ "foo" ], - "version": 1 - } - u = Context.create('foo') - assert _segment_matches_context(s, u) is True + user = Context.create('foo') + segment = SegmentBuilder('test').included(user.key).excluded(user.key).build() + assert _segment_matches_context(segment, user) is True + +def test_included_key_for_context_kind(): + c1 = Context.create('key1', 'kind1') + c2 = Context.create('key2', 'kind2') + multi = Context.create_multi(c1, c2) + segment = SegmentBuilder('test').included_contexts('kind1', 'key1').build() + assert _segment_matches_context(segment, c1) is True + assert _segment_matches_context(segment, c2) is False + assert _segment_matches_context(segment, multi) is True + +def test_excluded_key_for_context_kind(): + c1 = Context.create('key1', 'kind1') + c2 = Context.create('key2', 'kind2') + multi = Context.create_multi(c1, c2) + segment = SegmentBuilder('test') \ + .excluded_contexts('kind1', 'key1') \ + .rules( + make_segment_rule_matching_context(c1), + make_segment_rule_matching_context(c2) + ) \ + .build() + assert _segment_matches_context(segment, c1) is False + assert _segment_matches_context(segment, c2) is True + assert _segment_matches_context(segment, multi) is False def test_matching_rule_with_no_weight(): - s = { - "key": "test", - "rules": [ - { - "clauses": [ - { - "attribute": "email", - "op": "in", - "values": [ "test@example.com" ] - } - ] - } - ] - } - u = Context.builder('foo').set('email', 'test@example.com').build() - assert _segment_matches_context(s, u) is True + context = Context.create('foo') + segment = SegmentBuilder('test') \ + .rules( + SegmentRuleBuilder().clauses(make_clause_matching_context(context)).build() + ) \ + .build() + assert _segment_matches_context(segment, context) is True def test_matching_rule_with_none_weight(): - s = { - "key": "test", - "rules": [ - { - "clauses": [ - { - "attribute": "email", - "op": "in", - "values": [ "test@example.com" ] - } - ], - "weight": None - } - ] - } - u = Context.builder('foo').set('email', 'test@example.com').build() - assert _segment_matches_context(s, u) is True + context = Context.create('foo') + segment = SegmentBuilder('test') \ + .rules( + SegmentRuleBuilder().weight(None).clauses(make_clause_matching_context(context)).build() + ) \ + .build() + assert _segment_matches_context(segment, context) is True def test_matching_rule_with_full_rollout(): - s = { - "key": "test", - "rules": [ - { - "clauses": [ - { - "attribute": "email", - "op": "in", - "values": [ "test@example.com" ] - } - ], - "weight": 100000 - } - ] - } - u = Context.builder('foo').set('email', 'test@example.com').build() - assert _segment_matches_context(s, u) is True + context = Context.create('foo') + segment = SegmentBuilder('test') \ + .rules( + SegmentRuleBuilder().weight(100000).clauses(make_clause_matching_context(context)).build() + ) \ + .build() + assert _segment_matches_context(segment, context) is True def test_matching_rule_with_zero_rollout(): - s = { - "key": "test", - "rules": [ - { - "clauses": [ - { - "attribute": "email", - "op": "in", - "values": [ "test@example.com" ] - } - ], - "weight": 0 - } - ] - } - u = Context.builder('foo').set('email', 'test@example.com').build() - assert _segment_matches_context(s, u) is False + context = Context.create('foo') + segment = SegmentBuilder('test') \ + .rules( + SegmentRuleBuilder().weight(0).clauses(make_clause_matching_context(context)).build() + ) \ + .build() + assert _segment_matches_context(segment, context) is False def test_rollout_calculation_can_bucket_by_key(): context = Context.builder('userkey').name('Bob').build() @@ -162,49 +136,25 @@ def test_rollout_uses_context_kind(): verify_rollout(multi, context2, expected_bucket_value, 'test', 'salt', None, 'kind2') def test_matching_rule_with_multiple_clauses(): - s = { - "key": "test", - "rules": [ - { - "clauses": [ - { - "attribute": "email", - "op": "in", - "values": [ "test@example.com" ] - }, - { - "attribute": "name", - "op": "in", - "values": [ "bob" ] - } - ], - "weight": 100000 - } - ] - } - u = Context.builder('foo').name('bob').set('email', 'test@example.com').build() - assert _segment_matches_context(s, u) is True + context = Context.builder('foo').name('bob').set('email', 'test@example.com').build() + segment = SegmentBuilder('test') \ + .rules( + SegmentRuleBuilder().clauses( + make_clause(None, 'email', 'in', 'test@example.com'), + make_clause(None, 'name', 'in', 'bob') + ).build() + ) \ + .build() + assert _segment_matches_context(segment, context) is True def test_non_matching_rule_with_multiple_clauses(): - s = { - "key": "test", - "rules": [ - { - "clauses": [ - { - "attribute": "email", - "op": "in", - "values": [ "test@example.com" ] - }, - { - "attribute": "name", - "op": "in", - "values": [ "bill" ] - } - ], - "weight": 100000 - } - ] - } - u = Context.builder('foo').name('bob').set('email', 'test@example.com').build() - assert _segment_matches_context(s, u) is False + context = Context.builder('foo').name('bob').set('email', 'test@example.com').build() + segment = SegmentBuilder('test') \ + .rules( + SegmentRuleBuilder().clauses( + make_clause(None, 'email', 'in', 'test@example.com'), + make_clause(None, 'name', 'in', 'bill') + ).build() + ) \ + .build() + assert _segment_matches_context(segment, context) is False From 213f656ca4ad00850d66a39101813bc5498dbcb2 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 13:24:40 -0800 Subject: [PATCH 314/356] comment copyedit Co-authored-by: Matthew M. Keeler --- ldclient/context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/context.py b/ldclient/context.py index 40960124..3a65fb05 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -455,7 +455,7 @@ def custom_attributes(self) -> Iterable[str]: @property def _attributes(self) -> Optional[dict[str, Any]]: - # for internal use by ContextBuilder - we don't want to expose the original dict otherwise + # for internal use by ContextBuilder - we don't want to expose the original dict # since that would break immutability return self.__attributes From b07a8e46903a0afed8ce1e76191e677bfd7570af Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 13:32:14 -0800 Subject: [PATCH 315/356] comment fixes --- ldclient/context.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ldclient/context.py b/ldclient/context.py index 3a65fb05..40698063 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -202,7 +202,7 @@ def builder(cls, key: str) -> ContextBuilder: You may use :class:`ldclient.ContextBuilder` methods to set additional attributes and/or change the context kind before calling :func:`ldclient.ContextBuilder.build()`. If you - do not change any values, the defaults for the LDContext are that its `kind` is :const:`DEFAULT_KIND`, + do not change any values, the defaults for the Context are that its `kind` is :const:`DEFAULT_KIND`, its `key` is set to the key parameter specified here, `anonymous` is False, and it has no values for any other attributes. @@ -415,7 +415,7 @@ def get_individual_context(self, kind: Union[int, str]) -> Optional[Context]: If the method is called on a multi-context, and `kind` is a number, it must be a non-negative index that is less than the number of kinds (that is, less than the return value of :func:`individual_context_count`), and the return value on success is one of - the individual LDContexts within. Or, if `kind` is a string, it must match the context + the individual Contexts within. Or, if `kind` is a string, it must match the context kind of one of the individual contexts. If there is no context corresponding to `kind`, the method returns null. @@ -698,7 +698,7 @@ def __init__(self, key: str, copy_from: Optional[Context] = None): def build(self) -> Context: """ - Creates a LDContext from the current builder properties. + Creates a Context from the current builder properties. The Context is immutable and will not be affected by any subsequent actions on the builder. @@ -706,7 +706,7 @@ def build(self) -> Context: Instead of throwing an exception, the ContextBuilder always returns an Context and you can check :func:`ldclient.Context.valid()` or :func:`ldclient.Context.error()` to see if it has an error. See :func:`ldclient.Context.valid()` for more information about invalid conditions. - If you pass an invalid LDContext to an SDK method, the SDK will detect this and will log a + If you pass an invalid Context to an SDK method, the SDK will detect this and will log a description of the error. :return: a new :class:`ldclient.Context` From 51e626d9ee0753255dd0c876317f51dc159d9991 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 13:32:59 -0800 Subject: [PATCH 316/356] rm unused Co-authored-by: Matthew M. Keeler --- ldclient/context.py | 1 - 1 file changed, 1 deletion(-) diff --git a/ldclient/context.py b/ldclient/context.py index 40698063..d24b3cfc 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -499,7 +499,6 @@ def to_dict(self) -> dict[str, Any]: """ if not self.valid: return {} - ret = {"kind": self.__kind} # type: dict[str, Any] if self.__multi is not None: ret = {"kind": "multi"} for c in self.__multi: From 1a6ed3077c47586dfc4473078b5dbe85d2146681 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 13:36:36 -0800 Subject: [PATCH 317/356] fix create_multi to support flattening --- ldclient/context.py | 6 +++++- testing/test_context.py | 8 ++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/ldclient/context.py b/ldclient/context.py index d24b3cfc..29f9eb8a 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -163,7 +163,11 @@ def create_multi(cls, *contexts: Context) -> Context: :param contexts: the individual contexts :return: a multi-context """ - return Context(None, '', None, False, None, None, list(contexts)) + # implementing this via multi_builder gives us the flattening behavior for free + builder = ContextMultiBuilder() + for c in contexts: + builder.add(c) + return builder.build() @classmethod def from_dict(cls, props: dict) -> Context: diff --git a/testing/test_context.py b/testing/test_context.py index 7281b3f3..0921881d 100644 --- a/testing/test_context.py +++ b/testing/test_context.py @@ -233,6 +233,14 @@ def test_create_multi(self): assert mc.get_individual_context(-1) is None assert mc.get_individual_context(2) is None + def test_create_multi_flattens_nested_multi_context(self): + c1 = Context.create('a', 'kind1') + c2 = Context.create('b', 'kind2') + c3 = Context.create('c', 'kind3') + c2plus3 = Context.create_multi(c2, c3) + mc = Context.create_multi(c1, c2plus3) + assert mc == Context.create_multi(c1, c2, c3) + def test_multi_builder(self): c1 = Context.create('a', 'kind1') c2 = Context.create('b', 'kind2') From 6e716c74a94be91880f54ae49d5b7f9a8761ebbf Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 13:41:19 -0800 Subject: [PATCH 318/356] lint --- ldclient/context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/context.py b/ldclient/context.py index 29f9eb8a..4be46f5e 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -504,7 +504,7 @@ def to_dict(self) -> dict[str, Any]: if not self.valid: return {} if self.__multi is not None: - ret = {"kind": "multi"} + ret = {"kind": "multi"} # type: dict[str, Any] for c in self.__multi: ret[c.kind] = c.__to_dict_single(False) return ret From 7ae9528832cd387ed5dd6e09ea1fd83f7d263fda Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 18:42:29 -0800 Subject: [PATCH 319/356] use custom classes for flag/segment data model --- ldclient/client.py | 2 + ldclient/feature_store.py | 11 +- ldclient/feature_store_helpers.py | 13 +- ldclient/impl/evaluator.py | 170 ++++++++--------- ldclient/impl/event_factory.py | 37 ++-- .../integrations/files/file_data_source.py | 15 +- ldclient/impl/model/__init__.py | 6 + ldclient/impl/model/clause.py | 33 ++++ ldclient/impl/model/encoder.py | 17 ++ ldclient/impl/model/entity.py | 104 ++++++++++ ldclient/impl/model/feature_flag.py | 158 ++++++++++++++++ ldclient/impl/model/segment.py | 115 +++++++++++ ldclient/impl/model/variation_or_rollout.py | 73 +++++++ ldclient/versioned_data_kind.py | 21 ++- testing/builders.py | 64 +++++-- testing/feature_store_test_base.py | 24 +-- testing/impl/evaluator_util.py | 17 +- testing/impl/test_evaluator.py | 178 +++--------------- testing/impl/test_evaluator_big_segment.py | 63 +++---- testing/impl/test_evaluator_bucketing.py | 17 +- testing/impl/test_evaluator_segment.py | 2 +- testing/impl/test_evaluator_target.py | 4 +- testing/impl/test_model_encoder.py | 17 ++ testing/integrations/test_test_data_source.py | 20 +- testing/stub_util.py | 11 +- testing/test_event_factory.py | 34 ++-- testing/test_ldclient_evaluation.py | 19 +- testing/test_polling_processor.py | 14 +- testing/test_streaming.py | 22 ++- 29 files changed, 864 insertions(+), 417 deletions(-) create mode 100644 ldclient/impl/model/__init__.py create mode 100644 ldclient/impl/model/clause.py create mode 100644 ldclient/impl/model/encoder.py create mode 100644 ldclient/impl/model/entity.py create mode 100644 ldclient/impl/model/feature_flag.py create mode 100644 ldclient/impl/model/segment.py create mode 100644 ldclient/impl/model/variation_or_rollout.py create mode 100644 testing/impl/test_model_encoder.py diff --git a/ldclient/client.py b/ldclient/client.py index 335cc265..17c365d7 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -310,6 +310,8 @@ def _evaluate_internal(self, key: str, context: Union[Context, dict], default: A try: flag = self._store.get(FEATURES, key, lambda x: x) + if isinstance(flag, dict): # shouldn't happen if we're using our standard store implementation + flag = FEATURES.decode(flag) except Exception as e: log.error("Unexpected error while retrieving feature flag \"%s\": %s" % (key, repr(e))) log.debug(traceback.format_exc()) diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index dcac232d..062ed5fa 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -108,10 +108,16 @@ def all(self, kind, callback): def init(self, all_data): """ """ + all_decoded = {} + for kind, items in all_data.items(): + items_decoded = {} + for key, item in items.items(): + items_decoded[key] = kind.decode(item) + all_decoded[kind] = items_decoded try: self._lock.rlock() self._items.clear() - self._items.update(all_data) + self._items.update(all_decoded) self._initialized = True for k in all_data: log.debug("Initialized '%s' store with %d items", k.namespace, len(all_data[k])) @@ -135,13 +141,14 @@ def delete(self, kind, key: str, version: int): def upsert(self, kind, item): """ """ + decoded_item = kind.decode(item) key = item['key'] try: self._lock.rlock() itemsOfKind = self._items[kind] i = itemsOfKind.get(key) if i is None or i['version'] < item['version']: - itemsOfKind[key] = item + itemsOfKind[key] = decoded_item log.debug("Updated %s in '%s' to version %d", key, kind.namespace, item['version']) finally: self._lock.runlock() diff --git a/ldclient/feature_store_helpers.py b/ldclient/feature_store_helpers.py index 1904f59d..d1177c9d 100644 --- a/ldclient/feature_store_helpers.py +++ b/ldclient/feature_store_helpers.py @@ -53,7 +53,8 @@ def get(self, kind, key, callback=lambda x: x): # note, cached items are wrapped in an array so we can cache None values if cached_item is not None: return callback(self._item_if_not_deleted(cached_item[0])) - item = self._core.get_internal(kind, key) + item_as_dict = self._core.get_internal(kind, key) + item = None if item_as_dict is None else kind.decode(item_as_dict) if self._cache is not None: self._cache[cache_key] = [item] return callback(self._item_if_not_deleted(item)) @@ -66,7 +67,12 @@ def all(self, kind, callback=lambda x: x): cached_items = self._cache.get(cache_key) if cached_items is not None: return callback(cached_items) - items = self._items_if_not_deleted(self._core.get_all_internal(kind)) + items_as_dicts = self._core.get_all_internal(kind) + all_items = {} + if items_as_dicts is not None: + for key, item in items_as_dicts.items(): + all_items[key] = kind.decode(item) + items = self._items_if_not_deleted(all_items) if self._cache is not None: self._cache[cache_key] = items return callback(items) @@ -80,7 +86,8 @@ def delete(self, kind, key, version): def upsert(self, kind, item): """ """ - new_state = self._core.upsert_internal(kind, item) + item_as_dict = kind.encode(item) + new_state = self._core.upsert_internal(kind, item_as_dict) if self._cache is not None: self._cache[self._item_cache_key(kind, item.get('key'))] = [new_state] self._cache.pop(self._all_cache_key(kind), None) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index a80cc2c1..01dc2ed6 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -2,6 +2,7 @@ from ldclient.context import Context, _USER_STRING_ATTRS from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail from ldclient.impl.event_factory import _EventFactory +from ldclient.impl.model import * import hashlib import logging @@ -67,8 +68,8 @@ class Evaluator: """ def __init__( self, - get_flag: Callable[[str], Optional[dict]], - get_segment: Callable[[str], Optional[dict]], + get_flag: Callable[[str], Optional[FeatureFlag]], + get_segment: Callable[[str], Optional[Segment]], get_big_segments_membership: Callable[[str], Tuple[Optional[dict], str]] ): """ @@ -82,15 +83,15 @@ def __init__( self.__get_segment = get_segment self.__get_big_segments_membership = get_big_segments_membership - def evaluate(self, flag: dict, context: Context, event_factory: _EventFactory) -> EvalResult: + def evaluate(self, flag: FeatureFlag, context: Context, event_factory: _EventFactory) -> EvalResult: state = EvalResult() state.detail = self._evaluate(flag, context, state, event_factory) if state.big_segments_status is not None: state.detail.reason['bigSegmentsStatus'] = state.big_segments_status return state - def _evaluate(self, flag: dict, context: Context, state: EvalResult, event_factory: _EventFactory) -> EvaluationDetail: - if not flag.get('on', False): + def _evaluate(self, flag: FeatureFlag, context: Context, state: EvalResult, event_factory: _EventFactory) -> EvaluationDetail: + if not flag.on: return _get_off_value(flag, {'kind': 'OFF'}) prereq_failure_reason = self._check_prerequisites(flag, context, state, event_factory) @@ -103,37 +104,37 @@ def _evaluate(self, flag: dict, context: Context, state: EvalResult, event_facto return target_result # Now walk through the rules to see if any match - for index, rule in enumerate(flag.get('rules') or []): + for index, rule in enumerate(flag.rules): if self._rule_matches_context(rule, context, state): - return _get_value_for_variation_or_rollout(flag, rule, context, - {'kind': 'RULE_MATCH', 'ruleIndex': index, 'ruleId': rule.get('id')}) + return _get_value_for_variation_or_rollout(flag, rule.variation_or_rollout, context, + {'kind': 'RULE_MATCH', 'ruleIndex': index, 'ruleId': rule.id}) # Walk through fallthrough and see if it matches - return _get_value_for_variation_or_rollout(flag, flag['fallthrough'] or {}, context, {'kind': 'FALLTHROUGH'}) + return _get_value_for_variation_or_rollout(flag, flag.fallthrough, context, {'kind': 'FALLTHROUGH'}) - def _check_prerequisites(self, flag: dict, context: Context, state: EvalResult, event_factory: _EventFactory) -> Optional[dict]: + def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalResult, event_factory: _EventFactory) -> Optional[dict]: failed_prereq = None prereq_res = None - for prereq in flag.get('prerequisites') or []: - prereq_flag = self.__get_flag(prereq.get('key')) + for prereq in flag.prerequisites: + prereq_flag = self.__get_flag(prereq.key) if prereq_flag is None: - log.warning("Missing prereq flag: " + prereq.get('key')) + log.warning("Missing prereq flag: " + prereq.key) failed_prereq = prereq else: prereq_res = self._evaluate(prereq_flag, context, state, event_factory) # Note that if the prerequisite flag is off, we don't consider it a match no matter what its # off variation was. But we still need to evaluate it in order to generate an event. - if (not prereq_flag.get('on', False)) or prereq_res.variation_index != prereq.get('variation'): + if (not prereq_flag.on) or prereq_res.variation_index != prereq.variation: failed_prereq = prereq event = event_factory.new_eval_event(prereq_flag, _context_to_user_dict(context), prereq_res, None, flag) state.add_event(event) if failed_prereq: - return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.get('key')} + return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key} return None - def _check_targets(self, flag: dict, context: Context) -> Optional[EvaluationDetail]: - user_targets = flag.get('targets') or [] - context_targets = flag.get('contextTargets') or [] + def _check_targets(self, flag: FeatureFlag, context: Context) -> Optional[EvaluationDetail]: + user_targets = flag.targets + context_targets = flag.context_targets if len(context_targets) == 0: # old-style data has only targets for users if len(user_targets) != 0: @@ -142,55 +143,54 @@ def _check_targets(self, flag: dict, context: Context) -> Optional[EvaluationDet return None key = user_context.key for t in user_targets: - if key in t['values']: - return _target_match_result(flag, t.get('variation')) + if key in t.values: + return _target_match_result(flag, t.variation) return None for t in context_targets: - kind = t.get('contextKind') or Context.DEFAULT_KIND - var = t['variation'] + kind = t.context_kind or Context.DEFAULT_KIND + var = t.variation actual_context = context.get_individual_context(kind) if actual_context is None: continue key = actual_context.key if kind == Context.DEFAULT_KIND: for ut in user_targets: - if ut['variation'] == var: - if key in ut['values']: + if ut.variation == var: + if key in ut.values: return _target_match_result(flag, var) break continue - if key in t['values']: + if key in t.values: return _target_match_result(flag, var) return None - def _rule_matches_context(self, rule: dict, context: Context, state: EvalResult) -> bool: - for clause in rule.get('clauses') or []: - if clause.get('attribute') is not None: - if not self._clause_matches_context(clause, context, state): - return False + def _rule_matches_context(self, rule: FlagRule, context: Context, state: EvalResult) -> bool: + for clause in rule.clauses: + if not self._clause_matches_context(clause, context, state): + return False return True - def _clause_matches_context(self, clause: dict, context: Context, state: EvalResult) -> bool: - op = clause['op'] + def _clause_matches_context(self, clause: Clause, context: Context, state: EvalResult) -> bool: + op = clause.op + clause_values = clause.values if op == 'segmentMatch': - for seg_key in clause.get('values') or []: + for seg_key in clause_values: segment = self.__get_segment(seg_key) if segment is not None and self._segment_matches_context(segment, context, state): return _maybe_negate(clause, True) return _maybe_negate(clause, False) - attr = clause.get('attribute') + attr = clause.attribute if attr is None: return False if attr == 'kind': return _maybe_negate(clause, _match_clause_by_kind(clause, context)) - actual_context = context.get_individual_context(clause.get('contextKind') or Context.DEFAULT_KIND) + actual_context = context.get_individual_context(clause.context_kind or Context.DEFAULT_KIND) if actual_context is None: return False context_value = actual_context.get(attr) if context_value is None: return False - clause_values = clause.get('values') or [] # is the attr an array? if isinstance(context_value, (list, tuple)): @@ -200,45 +200,44 @@ def _clause_matches_context(self, clause: dict, context: Context, state: EvalRes return _maybe_negate(clause, False) return _maybe_negate(clause, _match_single_context_value(op, context_value, clause_values)) - def _segment_matches_context(self, segment: dict, context: Context, state: EvalResult) -> bool: - if segment.get('unbounded', False): + def _segment_matches_context(self, segment: Segment, context: Context, state: EvalResult) -> bool: + if segment.unbounded: return self._big_segment_match_context(segment, context, state) return self._simple_segment_match_context(segment, context, state, True) - def _simple_segment_match_context(self, segment: dict, context: Context, state: EvalResult, use_includes_and_excludes: bool) -> bool: + def _simple_segment_match_context(self, segment: Segment, context: Context, state: EvalResult, use_includes_and_excludes: bool) -> bool: if use_includes_and_excludes: - if _context_key_is_in_target_list(context, None, segment.get('included')): + if _context_key_is_in_target_list(context, None, segment.included): return True - for t in segment.get('includedContexts') or []: - if _context_key_is_in_target_list(context, t.get('contextKind'), t.get('values')): + for t in segment.included_contexts: + if _context_key_is_in_target_list(context, t.context_kind, t.values): return True - if _context_key_is_in_target_list(context, None, segment.get('excluded')): + if _context_key_is_in_target_list(context, None, segment.excluded): return False - for t in segment.get('excludedContexts') or []: - if _context_key_is_in_target_list(context, t.get('contextKind'), t.get('values')): + for t in segment.excluded_contexts: + if _context_key_is_in_target_list(context, t.context_kind, t.values): return False - for rule in segment.get('rules', []): - if self._segment_rule_matches_context(rule, context, state, segment['key'], segment.get('salt', '')): + for rule in segment.rules: + if self._segment_rule_matches_context(rule, context, state, segment.key, segment.salt): return True return False - def _segment_rule_matches_context(self, rule: dict, context: Context, state: EvalResult, segment_key: str, salt: str) -> bool: - for clause in rule.get('clauses') or []: + def _segment_rule_matches_context(self, rule: SegmentRule, context: Context, state: EvalResult, segment_key: str, salt: str) -> bool: + for clause in rule.clauses: if not self._clause_matches_context(clause, context, state): return False # If the weight is absent, this rule matches - if 'weight' not in rule or rule['weight'] is None: + if rule.weight is None: return True # All of the clauses are met. See if the context buckets in - bucket_by = 'key' if rule.get('bucketBy') is None else rule['bucketBy'] - bucket = _bucket_context(None, context, rule.get('rolloutContextKind'), segment_key, salt, bucket_by) - weight = rule['weight'] / 100000.0 + bucket = _bucket_context(None, context, rule.rollout_context_kind, segment_key, salt, rule.bucket_by) + weight = rule.weight / 100000.0 return bucket < weight - def _big_segment_match_context(self, segment: dict, context: Context, state: EvalResult) -> bool: - generation = segment.get('generation', None) + def _big_segment_match_context(self, segment: Segment, context: Context, state: EvalResult) -> bool: + generation = segment.generation if generation is None: # Big segment queries can only be done if the generation is known. If it's unset, # that probably means the data store was populated by an older SDK that doesn't know @@ -260,19 +259,19 @@ def _big_segment_match_context(self, segment: dict, context: Context, state: Eva # The following functions are declared outside Evaluator because they do not depend on any # of Evaluator's state. -def _get_variation(flag: dict, variation: int, reason: dict) -> EvaluationDetail: - vars = flag.get('variations') or [] +def _get_variation(flag: FeatureFlag, variation: int, reason: dict) -> EvaluationDetail: + vars = flag.variations if variation < 0 or variation >= len(vars): return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) return EvaluationDetail(vars[variation], variation, reason) -def _get_off_value(flag: dict, reason: dict) -> EvaluationDetail: - off_var = flag.get('offVariation') +def _get_off_value(flag: FeatureFlag, reason: dict) -> EvaluationDetail: + off_var = flag.off_variation if off_var is None: return EvaluationDetail(None, None, reason) return _get_variation(flag, off_var, reason) -def _get_value_for_variation_or_rollout(flag: dict, vr: dict, context: Context, reason: dict) -> EvaluationDetail: +def _get_value_for_variation_or_rollout(flag: FeatureFlag, vr: VariationOrRollout, context: Context, reason: dict) -> EvaluationDetail: index, inExperiment = _variation_index_for_context(flag, vr, context) if index is None: return EvaluationDetail(None, None, error_reason('MALFORMED_FLAG')) @@ -280,46 +279,45 @@ def _get_value_for_variation_or_rollout(flag: dict, vr: dict, context: Context, reason['inExperiment'] = inExperiment return _get_variation(flag, index, reason) -def _variation_index_for_context(feature: dict, rule: dict, context: Context) -> Tuple[Optional[int], bool]: - var = rule.get('variation') +def _variation_index_for_context(flag: FeatureFlag, vr: VariationOrRollout, context: Context) -> Tuple[Optional[int], bool]: + var = vr.variation if var is not None: return (var, False) - rollout = rule.get('rollout') + rollout = vr.rollout if rollout is None: return (None, False) - variations = rollout.get('variations') - if variations is None or len(variations) == 0: + variations = rollout.variations + if len(variations) == 0: return (None, False) - rollout_is_experiment = rollout.get('kind') == 'experiment' - bucket_by = None if rollout_is_experiment else rollout.get('bucketBy') + bucket_by = None if rollout.is_experiment else rollout.bucket_by bucket = _bucket_context( - rollout.get('seed'), + rollout.seed, context, - rollout.get('contextKind'), - feature['key'], - feature['salt'], + rollout.context_kind, + flag.key, + flag.salt, bucket_by ) - is_experiment = rollout_is_experiment and bucket >= 0 + is_experiment = rollout.is_experiment and bucket >= 0 # _bucket_context returns a negative value if the context didn't exist, in which case we # still end up returning the first bucket, but we will force the "in experiment" state to be false. sum = 0.0 for wv in variations: - sum += wv.get('weight', 0.0) / 100000.0 + sum += wv.weight / 100000.0 if bucket < sum: - is_experiment_partition = is_experiment and not wv.get('untracked') - return (wv.get('variation'), is_experiment_partition) + is_experiment_partition = is_experiment and not wv.untracked + return (wv.variation, is_experiment_partition) # The context's bucket value was greater than or equal to the end of the last bucket. This could happen due # to a rounding error, or due to the fact that we are scaling to 100000 rather than 99999, or the flag # data could contain buckets that don't actually add up to 100000. Rather than returning an error in # this case (or changing the scaling, which would potentially change the results for *all* contexts), we # will simply put the context in the last bucket. - is_experiment_partition = is_experiment and not variations[-1].get('untracked') - return (variations[-1].get('variation'), is_experiment_partition) + is_experiment_partition = is_experiment and not variations[-1].untracked + return (variations[-1].variation, is_experiment_partition) def _bucket_context( seed: Optional[int], @@ -371,29 +369,27 @@ def _match_single_context_value(op: str, context_value: Any, values: List[Any]) return True return False -def _match_clause_by_kind(clause: dict, context: Context) -> bool: +def _match_clause_by_kind(clause: Clause, context: Context) -> bool: # If attribute is "kind", then we treat operator and values as a match expression against a list # of all individual kinds in the context. That is, for a multi-kind context with kinds of "org" # and "user", it is a match if either of those strings is a match with Operator and Values. - op = clause['op'] + op = clause.op for i in range(context.individual_context_count): c = context.get_individual_context(i) - if c is not None and _match_single_context_value(op, c.kind, clause.get('values') or []): + if c is not None and _match_single_context_value(op, c.kind, clause.values): return True return False -def _maybe_negate(clause: dict, val: bool) -> bool: - if clause.get('negate', False) is True: - return not val - return val +def _maybe_negate(clause: Clause, val: bool) -> bool: + return not val if clause.negate else val -def _make_big_segment_ref(segment: dict) -> str: +def _make_big_segment_ref(segment: Segment) -> str: # The format of Big Segment references is independent of what store implementation is being # used; the store implementation receives only this string and does not know the details of # the data model. The Relay Proxy will use the same format when writing to the store. - return "%s.g%d" % (segment.get('key', ''), segment.get('generation', 0)) + return "%s.g%d" % (segment.key, segment.generation or 0) -def _target_match_result(flag: dict, var: int) -> EvaluationDetail: +def _target_match_result(flag: FeatureFlag, var: int) -> EvaluationDetail: return _get_variation(flag, var, {'kind': 'TARGET_MATCH'}) def error_reason(error_kind: str) -> dict: diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py index d9ba5925..d32291ea 100644 --- a/ldclient/impl/event_factory.py +++ b/ldclient/impl/event_factory.py @@ -1,3 +1,6 @@ +from ldclient.impl.model import * + +from typing import Optional # Event constructors are centralized here to avoid mistakes and repetitive logic. # The LDClient owns two instances of _EventFactory: one that always embeds evaluation reasons @@ -10,44 +13,44 @@ class _EventFactory: def __init__(self, with_reasons): self._with_reasons = with_reasons - def new_eval_event(self, flag, user, detail, default_value, prereq_of_flag = None): + def new_eval_event(self, flag: FeatureFlag, user, detail, default_value, prereq_of_flag: Optional[FeatureFlag] = None) -> dict: add_experiment_data = self.is_experiment(flag, detail.reason) e = { 'kind': 'feature', - 'key': flag.get('key'), + 'key': flag.key, 'user': user, 'value': detail.value, 'variation': detail.variation_index, 'default': default_value, - 'version': flag.get('version') + 'version': flag.version } # the following properties are handled separately so we don't waste bandwidth on unused keys - if add_experiment_data or flag.get('trackEvents', False): + if add_experiment_data or flag.track_events: e['trackEvents'] = True - if flag.get('debugEventsUntilDate', None): - e['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') + if flag.debug_events_until_date: + e['debugEventsUntilDate'] = flag.debug_events_until_date if prereq_of_flag is not None: - e['prereqOf'] = prereq_of_flag.get('key') + e['prereqOf'] = prereq_of_flag.key if add_experiment_data or self._with_reasons: e['reason'] = detail.reason if user is not None and user.get('anonymous'): e['contextKind'] = self._user_to_context_kind(user) return e - def new_default_event(self, flag, user, default_value, reason): + def new_default_event(self, flag: FeatureFlag, user, default_value, reason) -> dict: e = { 'kind': 'feature', - 'key': flag.get('key'), + 'key': flag.key, 'user': user, 'value': default_value, 'default': default_value, - 'version': flag.get('version') + 'version': flag.version } # the following properties are handled separately so we don't waste bandwidth on unused keys - if flag.get('trackEvents', False): + if flag.track_events: e['trackEvents'] = True - if flag.get('debugEventsUntilDate', None): - e['debugEventsUntilDate'] = flag.get('debugEventsUntilDate') + if flag.debug_events_until_date: + e['debugEventsUntilDate'] = flag.debug_events_until_date if self._with_reasons: e['reason'] = reason if user is not None and user.get('anonymous'): @@ -96,15 +99,15 @@ def _user_to_context_kind(self, user): return "user" @staticmethod - def is_experiment(flag, reason): + def is_experiment(flag: FeatureFlag, reason): if reason is not None: if reason.get('inExperiment'): return True kind = reason['kind'] if kind == 'RULE_MATCH': index = reason['ruleIndex'] - rules = flag.get('rules') or [] - return index >= 0 and index < len(rules) and rules[index].get('trackEvents', False) + rules = flag.rules + return index >= 0 and index < len(rules) and rules[index].track_events elif kind == 'FALLTHROUGH': - return flag.get('trackEventsFallthrough', False) + return flag.track_events_fallthrough return False diff --git a/ldclient/impl/integrations/files/file_data_source.py b/ldclient/impl/integrations/files/file_data_source.py index f25eecae..34e69f26 100644 --- a/ldclient/impl/integrations/files/file_data_source.py +++ b/ldclient/impl/integrations/files/file_data_source.py @@ -23,6 +23,10 @@ from ldclient.util import log from ldclient.versioned_data_kind import FEATURES, SEGMENTS +def _sanitize_json_item(item): + if not ('version' in item): + item['version'] = 1 + class _FileDataSource(UpdateProcessor): def __init__(self, store, ready, paths, auto_update, poll_interval, force_polling): self._store = store @@ -62,8 +66,12 @@ def _load_all(self): log.error('Unable to load flag data from "%s": %s' % (path, repr(e))) traceback.print_exc() return - self._store.init(all_data) - self._inited = True + try: + self._store.init(all_data) + self._inited = True + except Exception as e: + log.error('Unable to store data: %s' % repr(e)) + traceback.print_exc() def _load_file(self, path, all_data): content = None @@ -71,10 +79,12 @@ def _load_file(self, path, all_data): content = f.read() parsed = self._parse_content(content) for key, flag in parsed.get('flags', {}).items(): + _sanitize_json_item(flag) self._add_item(all_data, FEATURES, flag) for key, value in parsed.get('flagValues', {}).items(): self._add_item(all_data, FEATURES, self._make_flag_with_value(key, value)) for key, segment in parsed.get('segments', {}).items(): + _sanitize_json_item(segment) self._add_item(all_data, SEGMENTS, segment) def _parse_content(self, content): @@ -93,6 +103,7 @@ def _add_item(self, all_data, kind, item): def _make_flag_with_value(self, key, value): return { 'key': key, + 'version': 1, 'on': True, 'fallthrough': { 'variation': 0 diff --git a/ldclient/impl/model/__init__.py b/ldclient/impl/model/__init__.py new file mode 100644 index 00000000..b485d21f --- /dev/null +++ b/ldclient/impl/model/__init__.py @@ -0,0 +1,6 @@ +from .clause import * +from .encoder import * +from .entity import * +from .feature_flag import * +from .segment import * +from .variation_or_rollout import * diff --git a/ldclient/impl/model/clause.py b/ldclient/impl/model/clause.py new file mode 100644 index 00000000..e601be4a --- /dev/null +++ b/ldclient/impl/model/clause.py @@ -0,0 +1,33 @@ +from typing import Any, List, Optional + +from ldclient.impl.model.entity import * + +class Clause: + __slots__ = ['_context_kind', '_attribute', '_op', '_values', '_negate'] + + def __init__(self, data: dict): + self._attribute = req_str(data, 'attribute') + self._context_kind = opt_str(data, 'contextKind') + self._negate = opt_bool(data, 'negate') + self._op = req_str(data, 'op') + self._values = req_list(data, 'values') + + @property + def attribute(self) -> str: + return self._attribute + + @property + def context_kind(self) -> Optional[str]: + return self._context_kind + + @property + def negate(self) -> bool: + return self._negate + + @property + def op(self) -> str: + return self._op + + @property + def values(self) -> List[Any]: + return self._values diff --git a/ldclient/impl/model/encoder.py b/ldclient/impl/model/encoder.py new file mode 100644 index 00000000..af6ad258 --- /dev/null +++ b/ldclient/impl/model/encoder.py @@ -0,0 +1,17 @@ +from ldclient.impl.model.entity import ModelEntity + +import json + +class ModelEncoder(json.JSONEncoder): + """ + A JSON encoder customized to serialize our data model types correctly. We should + use this whenever we are writing flag data to a persistent store. + """ + + def __init__(self): + super().__init__(separators=(',',':')) + + def default(self, obj): + if isinstance(obj, ModelEntity): + return obj.to_json_dict() + return json.JSONEncoder.default(self, obj) diff --git a/ldclient/impl/model/entity.py b/ldclient/impl/model/entity.py new file mode 100644 index 00000000..42b2a4b0 --- /dev/null +++ b/ldclient/impl/model/entity.py @@ -0,0 +1,104 @@ +import json + +from typing import Any, List, Optional, Union + +# This file provides support for our data model classes. +# +# Top-level data model classes (FeatureFlag, Segment) should subclass ModelEntity. This +# provides a standard behavior where we decode the entity from a dict that corresponds to +# the JSON representation, and the constructor for each class does any necessary capturing +# and validation of individual properties, while the ModelEntity constructor also stores +# the original data as a dict so we can easily re-serialize it or inspect it as a dict. +# +# Lower-level classes such as Clause are not derived from ModelEntity because we don't +# need to serialize them outside of the enclosing FeatureFlag/Segment. +# +# All data model classes should use the opt_ and req_ functions so that any JSON values +# of invalid types will cause immediate rejection of the data set, rather than allowing +# invalid types to get into the evaluation/event logic where they would cause errors that +# are harder to diagnose. + +def opt_type(data: dict, name: str, desired_type) -> Any: + value = data.get(name) + if value is not None and not isinstance(value, desired_type): + raise ValueError('error in flag/segment data: property "%s" should be type %s but was %s"' % \ + (name, desired_type, value.__class__)) + return value + +def opt_bool(data: dict, name: str) -> bool: + return opt_type(data, name, bool) is True + +def opt_dict(data: dict, name: str) -> Optional[dict]: + return opt_type(data, name, dict) + +def opt_dict_list(data: dict, name: str) -> list: + return validate_list_type(opt_list(data, name), name, dict) + +def opt_int(data: dict, name: str) -> Optional[int]: + return opt_type(data, name, int) + +def opt_number(data: dict, name: str) -> Optional[Union[int, float]]: + value = data.get(name) + if value is not None and not isinstance(value, int) and not isinstance(value, float): + raise ValueError('error in flag/segment data: property "%s" should be a number but was %s"' % \ + (name, value.__class__)) + return value + +def opt_list(data: dict, name: str) -> list: + return opt_type(data, name, list) or [] + +def opt_str(data: dict, name: str) -> Optional[str]: + return opt_type(data, name, str) + +def opt_str_list(data: dict, name: str) -> List[str]: + return validate_list_type(opt_list(data, name), name, str) + +def req_type(data: dict, name: str, desired_type) -> Any: + value = opt_type(data, name, desired_type) + if value is None: + raise ValueError('error in flag/segment data: required property "%s" is missing' % name) + return value + +def req_dict_list(data: dict, name: str) -> list: + return validate_list_type(req_list(data, name), name, dict) + +def req_int(data: dict, name: str) -> int: + return req_type(data, name, int) + +def req_list(data: dict, name: str) -> list: + return req_type(data, name, list) + +def req_str(data: dict, name: str) -> str: + return req_type(data, name, str) + +def req_str_list(data: dict, name: str) -> List[str]: + return validate_list_type(req_list(data, name), name, str) + +def validate_list_type(items: list, name: str, desired_type) -> list: + for item in items: + if not isinstance(item, desired_type): + raise ValueError('error in flag/segment data: property %s should be an array of %s but an item was %s' % \ + (name, desired_type, item.__class__)) + return items + +class ModelEntity: + def __init__(self, data: dict): + self._data = data + + def to_json_dict(self): + return self._data + + def get(self, attribute, default = None) -> Any: + return self._data.get(attribute, default) + + def __getitem__(self, attribute) -> Any: + return self._data[attribute] + + def __contains__(self, attribute) -> bool: + return attribute in self._data + + def __eq__(self, other) -> bool: + return self.__class__ == other.__class__ and self._data == other._data + + def __repr__(self) -> str: + return json.dumps(self._data, separators=(',',':')) diff --git a/ldclient/impl/model/feature_flag.py b/ldclient/impl/model/feature_flag.py new file mode 100644 index 00000000..7fb83f99 --- /dev/null +++ b/ldclient/impl/model/feature_flag.py @@ -0,0 +1,158 @@ +from typing import Any, List, Optional + +from ldclient.impl.model.clause import Clause +from ldclient.impl.model.entity import * +from ldclient.impl.model.variation_or_rollout import VariationOrRollout + + +class Prerequisite: + __slots__ = ['_key', '_variation'] + + def __init__(self, data: dict): + self._key = req_str(data, 'key') + self._variation = req_int(data, 'variation') + + @property + def key(self) -> str: + return self._key + + @property + def variation(self) -> int: + return self._variation + + +class Target: + __slots__ = ['_data', '_context_kind', '_variation', '_values'] + + def __init__(self, data: dict): + self._context_kind = opt_str(data, 'contextKind') + self._variation = req_int(data, 'variation') + self._values = req_str_list(data, 'values') + + @property + def context_kind(self) -> Optional[str]: + return self._context_kind + + @property + def variation(self) -> int: + return self._variation + + @property + def values(self) -> List[str]: + return self._values + + +class FlagRule: + __slots__ = ['_data', '_id', '_clauses', '_track_events', '_variation_or_rollout'] + + def __init__(self, data: dict): + self._id = opt_str(data, 'id') + self._variation_or_rollout = VariationOrRollout(data) + self._clauses = list(Clause(item) for item in req_dict_list(data, 'clauses')) + self._track_events = opt_bool(data, 'trackEvents') + + @property + def id(self) -> Optional[str]: + return self._id + + @property + def clauses(self) -> List[Clause]: + return self._clauses + + @property + def track_events(self) -> bool: + return self._track_events + + @property + def variation_or_rollout(self) -> VariationOrRollout: + return self._variation_or_rollout + + +class FeatureFlag(ModelEntity): + __slots__ = ['_data', '_key', '_version', '_deleted', '_variations', '_on', + '_off_variation', '_fallthrough', '_prerequisites', '_targets', '_context_targets', '_rules', + '_salt', '_track_events', '_debug_events_until_date'] + + def __init__(self, data: dict): + super().__init__(data) + # In the following logic, we're being somewhat lenient in terms of allowing most properties to + # be absent even if they are really required in the schema. That's for backward compatibility + # with test logic that constructed incomplete JSON, and also with the file data source which + # previously allowed users to get away with leaving out a lot of properties in the JSON. + self._key = req_str(data, 'key') + self._version = req_int(data, 'version') + self._deleted = opt_bool(data, 'deleted') + if self._deleted: + return + self._variations = opt_list(data, 'variations') + self._on = opt_bool(data, 'on') + self._off_variation = opt_int(data, 'offVariation') + self._fallthrough = VariationOrRollout(opt_dict(data, 'fallthrough')) + self._prerequisites = list(Prerequisite(item) for item in opt_dict_list(data, 'prerequisites')) + self._rules = list(FlagRule(item) for item in opt_dict_list(data, 'rules')) + self._targets = list(Target(item) for item in opt_dict_list(data, 'targets')) + self._context_targets = list(Target(item) for item in opt_dict_list(data, 'contextTargets')) + self._salt = opt_str(data, 'salt') or '' + self._track_events = opt_bool(data, 'trackEvents') + self._track_events_fallthrough = opt_bool(data, 'trackEventsFallthrough') + self._debug_events_until_date = opt_number(data, 'debugEventsUntilDate') + + @property + def key(self) -> str: + return self._key + + @property + def version(self) -> int: + return self._version + + @property + def deleted(self) -> bool: + return self._deleted + + @property + def variations(self) -> List[Any]: + return self._variations + + @property + def on(self) -> bool: + return self._on + + @property + def off_variation(self) -> Optional[int]: + return self._off_variation + + @property + def fallthrough(self) -> VariationOrRollout: + return self._fallthrough + + @property + def prerequisites(self) -> list[Prerequisite]: + return self._prerequisites + + @property + def targets(self) -> list[Target]: + return self._targets + + @property + def context_targets(self) -> list[Target]: + return self._context_targets + + @property + def rules(self) -> list[FlagRule]: + return self._rules + + @property + def salt(self) -> str: + return self._salt + + @property + def track_events(self) -> bool: + return self._track_events + + @property + def track_events_fallthrough(self) -> bool: + return self._track_events_fallthrough + + @property + def debug_events_until_date(self) -> Optional[Union[int, float]]: + return self._debug_events_until_date diff --git a/ldclient/impl/model/segment.py b/ldclient/impl/model/segment.py new file mode 100644 index 00000000..93c06371 --- /dev/null +++ b/ldclient/impl/model/segment.py @@ -0,0 +1,115 @@ +from typing import Any, List, Optional + +from ldclient.impl.model.clause import Clause +from ldclient.impl.model.entity import * + + +class SegmentTarget: + __slots__ = ['_context_kind', '_values'] + + def __init__(self, data: dict, logger = None): + self._context_kind = opt_str(data, 'contextKind') + self._values = req_str_list(data, 'values') + + @property + def context_kind(self) -> Optional[str]: + return self._context_kind + + @property + def values(self) -> List[str]: + return self._values + + +class SegmentRule: + __slots__ = ['_bucket_by', '_clauses', '_rollout_context_kind', '_weight'] + + def __init__(self, data: dict): + self._bucket_by = opt_str(data, 'bucketBy') + self._clauses = list(Clause(item) for item in req_dict_list(data, 'clauses')) + self._rollout_context_kind = opt_str(data, 'rolloutContextKind') + self._weight = opt_int(data, 'weight') + + @property + def bucket_by(self) -> Optional[str]: + return self._bucket_by + + @property + def clauses(self) -> List[Clause]: + return self._clauses + + @property + def rollout_context_kind(self) -> Optional[str]: + return self._rollout_context_kind + + @property + def weight(self) -> Optional[int]: + return self._weight + + +class Segment(ModelEntity): + __slots__ = ['_data', '_key', '_version', '_deleted', '_included', '_excluded', + '_included_contexts', '_excluded_contexts', '_rules', '_salt', '_unbounded', '_generation'] + + def __init__(self, data: dict): + super().__init__(data) + # In the following logic, we're being somewhat lenient in terms of allowing most properties to + # be absent even if they are really required in the schema. That's for backward compatibility + # with test logic that constructed incomplete JSON, and also with the file data source which + # previously allowed users to get away with leaving out a lot of properties in the JSON. + self._key = req_str(data, 'key') + self._version = req_int(data, 'version') + self._deleted = opt_bool(data, 'deleted') + if self._deleted: + return + self._included = opt_str_list(data, 'included') + self._excluded = opt_str_list(data, 'excluded') + self._included_contexts = list(SegmentTarget(item) for item in opt_dict_list(data, 'includedContexts')) + self._excluded_contexts = list(SegmentTarget(item) for item in opt_dict_list(data, 'excludedContexts')) + self._rules = list(SegmentRule(item) for item in opt_dict_list(data, 'rules')) + self._salt = opt_str(data, 'salt') or '' + self._unbounded = opt_bool(data, 'unbounded') + self._generation = opt_int(data, 'generation') + + @property + def key(self) -> str: + return self._key + + @property + def version(self) -> int: + return self._version + + @property + def deleted(self) -> bool: + return self._deleted + + @property + def included(self) -> List[str]: + return self._included + + @property + def excluded(self) -> List[str]: + return self._excluded + + @property + def included_contexts(self) -> List[SegmentTarget]: + return self._included_contexts + + @property + def excluded_contexts(self) -> List[SegmentTarget]: + return self._excluded_contexts + + @property + def rules(self) -> List[Any]: + return self._rules + + @property + def salt(self) -> str: + return self._salt + + @property + def unbounded(self) -> bool: + return self._unbounded + + @property + def generation(self) -> Optional[int]: + return self._generation diff --git a/ldclient/impl/model/variation_or_rollout.py b/ldclient/impl/model/variation_or_rollout.py new file mode 100644 index 00000000..79134c3d --- /dev/null +++ b/ldclient/impl/model/variation_or_rollout.py @@ -0,0 +1,73 @@ +from typing import Any, List, Optional + +from ldclient.impl.model.entity import * + + +class WeightedVariation: + __slots__ = ['_variation', '_weight', '_untracked'] + + def __init__(self, data: dict): + self._variation = req_int(data, 'variation') + self._weight = req_int(data, 'weight') + self._untracked = opt_bool(data, 'untracked') + + @property + def variation(self) -> int: + return self._variation + + @property + def weight(self) -> int: + return self._weight + + @property + def untracked(self) -> int: + return self._untracked + + +class Rollout: + __slots__ = ['_bucket_by', '_context_kind', '_is_experiment', '_seed', '_variations'] + + def __init__(self, data: dict): + self._bucket_by = opt_str(data, 'bucketBy') + self._context_kind = opt_str(data, 'contextKind') + self._is_experiment = opt_str(data, 'kind') == 'experiment' + self._seed = opt_int(data, 'seed') + self._variations = list(WeightedVariation(item) for item in req_dict_list(data, 'variations')) + + @property + def bucket_by(self) -> Optional[str]: + return self._bucket_by + + @property + def context_kind(self) -> Optional[str]: + return self._context_kind + + @property + def is_experiment(self) -> bool: + return self._is_experiment + + @property + def seed(self) -> Optional[int]: + return self._seed + + @property + def variations(self) -> list[WeightedVariation]: + return self._variations + + +class VariationOrRollout: + __slots__ = ['_variation', '_rollout'] + + def __init__(self, data): + data = {} if data is None else data + self._variation = opt_int(data, 'variation') + rollout = opt_dict(data, 'rollout') + self._rollout = None if rollout is None else Rollout(rollout) + + @property + def variation(self) -> Optional[int]: + return self._variation + + @property + def rollout(self) -> Optional[Rollout]: + return self._rollout diff --git a/ldclient/versioned_data_kind.py b/ldclient/versioned_data_kind.py index 910618e6..93d3ca83 100644 --- a/ldclient/versioned_data_kind.py +++ b/ldclient/versioned_data_kind.py @@ -9,16 +9,20 @@ for features or segments. """ +from ldclient.impl.model import FeatureFlag, ModelEntity, Segment + from collections import namedtuple -from typing import Callable, Iterable, Optional +from typing import Any, Callable, Iterable, Optional # Note that VersionedDataKind without the extra attributes is no longer used in the SDK, # but it's preserved here for backward compatibility just in case someone else used it class VersionedDataKind: - def __init__(self, namespace: str, request_api_path: str, stream_api_path: str): + def __init__(self, namespace: str, request_api_path: str, stream_api_path: str, + decoder: Optional[Callable[[dict], Any]] = None): self._namespace = namespace self._request_api_path = request_api_path self._stream_api_path = stream_api_path + self._decoder = decoder @property def namespace(self) -> str: @@ -31,11 +35,20 @@ def request_api_path(self) -> str: @property def stream_api_path(self) -> str: return self._stream_api_path + + def decode(self, data: Any) -> Any: + if self._decoder is None or isinstance(data, ModelEntity): + return data + return self._decoder(data) + + def encode(self, item: Any) -> dict: + return item.to_json_dict() if isinstance(item, ModelEntity) else item class VersionedDataKindWithOrdering(VersionedDataKind): def __init__(self, namespace: str, request_api_path: str, stream_api_path: str, + decoder: Optional[Callable[[dict], Any]], priority: int, get_dependency_keys: Optional[Callable[[dict], Iterable[str]]]): - super().__init__(namespace, request_api_path, stream_api_path) + super().__init__(namespace, request_api_path, stream_api_path, decoder) self._priority = priority self._get_dependency_keys = get_dependency_keys @@ -50,11 +63,13 @@ def get_dependency_keys(self) -> Optional[Callable[[dict], Iterable[str]]]: FEATURES = VersionedDataKindWithOrdering(namespace = "features", request_api_path = "/sdk/latest-flags", stream_api_path = "/flags/", + decoder = FeatureFlag, priority = 1, get_dependency_keys = lambda flag: (p.get('key') for p in flag.get('prerequisites', []))) SEGMENTS = VersionedDataKindWithOrdering(namespace = "segments", request_api_path = "/sdk/latest-segments", stream_api_path = "/segments/", + decoder = Segment, priority = 0, get_dependency_keys = None) diff --git a/testing/builders.py b/testing/builders.py index f5d76c15..3111016a 100644 --- a/testing/builders.py +++ b/testing/builders.py @@ -2,6 +2,7 @@ from typing import Any, List ,Optional from ldclient.context import Context +from ldclient.impl.model import * class BaseBuilder: @@ -36,14 +37,18 @@ def __init__(self, key): 'prerequisites': [], 'targets': [], 'contextTargets': [], - 'rules': [] + 'rules': [], + 'salt': '' }) + def build(self): + return FeatureFlag(self.data.copy()) + def key(self, key: str) -> FlagBuilder: return self._set('key', key) def version(self, version: int) -> FlagBuilder: - return self._set('key', version) + return self._set('version', version) def on(self, on: bool) -> FlagBuilder: return self._set('on', on) @@ -57,6 +62,12 @@ def off_variation(self, value: Optional[int]) -> FlagBuilder: def fallthrough_variation(self, index: int) -> FlagBuilder: return self._set('fallthrough', {'variation': index}) + def fallthrough_rollout(self, rollout: dict) -> FlagBuilder: + return self._set('fallthrough', {'rollout': rollout}) + + def prerequisite(self, key: str, variation: int) -> FlagBuilder: + return self._append('prerequisites', {'key': key, 'variation': variation}) + def target(self, variation: int, *keys: str) -> FlagBuilder: return self._append('targets', {'variation': variation, 'values': list(keys)}) @@ -66,18 +77,39 @@ def context_target(self, context_kind: str, variation: int, *keys: str) -> FlagB def rules(self, *rules: dict) -> FlagBuilder: return self._append_all('rules', list(rules)) + + def salt(self, value: str) -> FlagBuilder: + return self._set('salt', value) + + def track_events(self, value: bool) -> FlagBuilder: + return self._set('trackEvents', value) + + def track_events_fallthrough(self, value: bool) -> FlagBuilder: + return self._set('trackEventsFallthrough', value) + + def debug_events_until_date(self, value: Optional[int]) -> FlagBuilder: + return self._set('debugEventsUntilDate', value) class FlagRuleBuilder(BaseBuilder): def __init__(self): super().__init__({'clauses': []}) - def variation(self, variation: int) -> FlagRuleBuilder: - return self._set('variation', variation) - def clauses(self, *clauses: dict) -> FlagRuleBuilder: return self._append_all('clauses', list(clauses)) + def id(self, value: str) -> FlagRuleBuilder: + return self._set('id', value) + + def rollout(self, rollout: Optional[dict]) -> FlagRuleBuilder: + return self._set('rollout', rollout) + + def track_events(self, value: bool) -> FlagRuleBuilder: + return self._set('trackEvents', value) + + def variation(self, variation: int) -> FlagRuleBuilder: + return self._set('variation', variation) + class SegmentBuilder(BaseBuilder): def __init__(self, key): @@ -89,14 +121,18 @@ def __init__(self, key): 'includedContexts': [], 'excludedContexts': [], 'rules': [], - 'unbounded': False + 'unbounded': False, + 'salt': '' }) + def build(self): + return Segment(self.data.copy()) + def key(self, key: str) -> SegmentBuilder: return self._set('key', key) def version(self, version: int) -> SegmentBuilder: - return self._set('key', version) + return self._set('version', version) def excluded(self, *keys: str) -> SegmentBuilder: return self._append_all('excluded', list(keys)) @@ -115,6 +151,12 @@ def salt(self, salt: str) -> SegmentBuilder: def rules(self, *rules: dict) -> SegmentBuilder: return self._append_all('rules', list(rules)) + + def unbounded(self, value: bool) -> SegmentBuilder: + return self._set('unbounded', value) + + def generation(self, value: Optional[int]) -> SegmentBuilder: + return self._set('generation', value) class SegmentRuleBuilder(BaseBuilder): @@ -134,13 +176,13 @@ def weight(self, value: Optional[int]) -> SegmentRuleBuilder: return self._set('weight', value) -def make_boolean_flag_matching_segment(segment: dict) -> dict: - return make_boolean_flag_with_clauses(make_clause_matching_segment_key(segment['key'])) +def make_boolean_flag_matching_segment(segment: Segment) -> FeatureFlag: + return make_boolean_flag_with_clauses(make_clause_matching_segment_key(segment.key)) -def make_boolean_flag_with_clauses(*clauses: dict) -> dict: +def make_boolean_flag_with_clauses(*clauses: dict) -> FeatureFlag: return make_boolean_flag_with_rules(FlagRuleBuilder().clauses(*clauses).variation(0).build()) -def make_boolean_flag_with_rules(*rules: dict) -> dict: +def make_boolean_flag_with_rules(*rules: dict) -> FeatureFlag: return FlagBuilder('flagkey').on(True).variations(True, False).fallthrough_variation(1).rules(*rules).build() def make_clause(context_kind: Optional[str], attr: str, op: str, *values: Any) -> dict: diff --git a/testing/feature_store_test_base.py b/testing/feature_store_test_base.py index e622b62d..8f5eff4c 100644 --- a/testing/feature_store_test_base.py +++ b/testing/feature_store_test_base.py @@ -1,6 +1,8 @@ from ldclient.interfaces import FeatureStore from ldclient.versioned_data_kind import FEATURES +from testing.builders import * + from abc import abstractmethod import pytest @@ -60,24 +62,7 @@ def inited_store(self, tester): @staticmethod def make_feature(key, ver): - return { - u'key': key, - u'version': ver, - u'salt': u'abc', - u'on': True, - u'variations': [ - { - u'value': True, - u'weight': 100, - u'targets': [] - }, - { - u'value': False, - u'weight': 0, - u'targets': [] - } - ] - } + return FlagBuilder(key).version(ver).on(True).variations(True, False).salt('abc').build() def test_not_initialized_before_init(self, tester): with self.store(tester) as store: @@ -90,7 +75,8 @@ def test_initialized(self, tester): def test_get_existing_feature(self, tester): with self.inited_store(tester) as store: expected = self.make_feature('foo', 10) - assert store.get(FEATURES, 'foo', lambda x: x) == expected + flag = store.get(FEATURES, 'foo', lambda x: x) + assert flag == expected def test_get_nonexisting_feature(self, tester): with self.inited_store(tester) as store: diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index 19d600e3..e72f29cd 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -2,6 +2,7 @@ from ldclient.evaluation import BigSegmentsStatus from ldclient.impl.evaluator import Evaluator, _make_big_segment_ref from ldclient.impl.event_factory import _EventFactory +from ldclient.impl.model import * from testing.builders import * from typing import Any, Optional, Tuple, Union @@ -23,23 +24,23 @@ def build(self) -> Evaluator: self._get_big_segments_membership ) - def with_flag(self, flag: dict) -> 'EvaluatorBuilder': - self.__flags[flag['key']] = flag + def with_flag(self, flag: FeatureFlag) -> 'EvaluatorBuilder': + self.__flags[flag.key] = flag return self def with_unknown_flag(self, key) -> 'EvaluatorBuilder': self.__flags[key] = None return self - def with_segment(self, segment: dict) -> 'EvaluatorBuilder': - self.__segments[segment['key']] = segment + def with_segment(self, segment: Segment) -> 'EvaluatorBuilder': + self.__segments[segment.key] = segment return self def with_unknown_segment(self, key) -> 'EvaluatorBuilder': self.__segments[key] = None return self - def with_big_segment_for_user(self, user: dict, segment: dict, included: bool) -> 'EvaluatorBuilder': + def with_big_segment_for_user(self, user: dict, segment: Segment, included: bool) -> 'EvaluatorBuilder': user_key = user['key'] if user_key not in self.__big_segments: self.__big_segments[user_key] = {} @@ -54,12 +55,12 @@ def with_big_segments_status(self, status: str) -> 'EvaluatorBuilder': self.__big_segments_status = status return self - def _get_flag(self, key: str) -> Optional[dict]: + def _get_flag(self, key: str) -> Optional[FeatureFlag]: if key not in self.__flags: raise Exception("test made unexpected request for flag '%s'" % key) return self.__flags[key] - def _get_segment(self, key: str) -> Optional[dict]: + def _get_segment(self, key: str) -> Optional[Segment]: if key not in self.__segments: raise Exception("test made unexpected request for segment '%s'" % key) return self.__segments[key] @@ -77,7 +78,7 @@ def assert_eval_result(result, expected_detail, expected_events): assert result.events == expected_events -def assert_match(evaluator: Evaluator, flag: dict, context: Context, expect_value: Any): +def assert_match(evaluator: Evaluator, flag: FeatureFlag, context: Context, expect_value: Any): result = evaluator.evaluate(flag, context, event_factory) assert result.detail.value == expect_value diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index ddd77954..17b3827a 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -1,86 +1,48 @@ from ldclient.client import Context from ldclient.evaluation import EvaluationDetail from ldclient.impl.evaluator import _context_to_user_dict +from testing.builders import * from testing.impl.evaluator_util import * def test_flag_returns_off_variation_if_flag_is_off(): - flag = { - 'key': 'feature', - 'on': False, - 'offVariation': 1, - 'variations': ['a', 'b', 'c'] - } + flag = FlagBuilder('feature').on(False).off_variation(1).variations('a', 'b', 'c').build() user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'OFF'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_none_if_flag_is_off_and_off_variation_is_unspecified(): - flag = { - 'key': 'feature', - 'on': False, - 'variations': ['a', 'b', 'c'] - } + flag = FlagBuilder('feature').on(False).variations('a', 'b', 'c').build() user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'OFF'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_off_variation_is_too_high(): - flag = { - 'key': 'feature', - 'on': False, - 'offVariation': 999, - 'variations': ['a', 'b', 'c'] - } + flag = FlagBuilder('feature').on(False).off_variation(999).variations('a', 'b', 'c').build() user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_off_variation_is_negative(): - flag = { - 'key': 'feature', - 'on': False, - 'offVariation': -1, - 'variations': ['a', 'b', 'c'] - } + flag = FlagBuilder('feature').on(False).off_variation(-1).variations('a', 'b', 'c').build() user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_off_variation_if_prerequisite_not_found(): - flag = { - 'key': 'feature0', - 'on': True, - 'prerequisites': [{'key': 'badfeature', 'variation': 1}], - 'fallthrough': { 'variation': 0 }, - 'offVariation': 1, - 'variations': ['a', 'b', 'c'] - } + flag = FlagBuilder('feature').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ + .prerequisite('badfeature', 1).build() evaluator = EvaluatorBuilder().with_unknown_flag('badfeature').build() user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'}) assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): - flag = { - 'key': 'feature0', - 'on': True, - 'prerequisites': [{'key': 'feature1', 'variation': 1}], - 'fallthrough': { 'variation': 0 }, - 'offVariation': 1, - 'variations': ['a', 'b', 'c'], - 'version': 1 - } - flag1 = { - 'key': 'feature1', - 'off': False, - 'offVariation': 1, - # note that even though it returns the desired variation, it is still off and therefore not a match - 'fallthrough': { 'variation': 0 }, - 'variations': ['d', 'e'], - 'version': 2, - 'trackEvents': False - } + flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ + .prerequisite('feature1', 1).build() + flag1 = FlagBuilder('feature1').version(2).on(False).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ + .build() + # note that even though flag1 returns the desired variation, it is still off and therefore not a match evaluator = EvaluatorBuilder().with_flag(flag1).build() user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) @@ -89,23 +51,10 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): - flag = { - 'key': 'feature0', - 'on': True, - 'prerequisites': [{'key': 'feature1', 'variation': 1}], - 'fallthrough': { 'variation': 0 }, - 'offVariation': 1, - 'variations': ['a', 'b', 'c'], - 'version': 1 - } - flag1 = { - 'key': 'feature1', - 'on': True, - 'fallthrough': { 'variation': 0 }, - 'variations': ['d', 'e'], - 'version': 2, - 'trackEvents': False - } + flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ + .prerequisite('feature1', 1).build() + flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(0) \ + .build() evaluator = EvaluatorBuilder().with_flag(flag1).build() user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) @@ -114,23 +63,10 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules(): - flag = { - 'key': 'feature0', - 'on': True, - 'prerequisites': [{ 'key': 'feature1', 'variation': 1 }], - 'fallthrough': { 'variation': 0 }, - 'offVariation': 1, - 'variations': ['a', 'b', 'c'], - 'version': 1 - } - flag1 = { - 'key': 'feature1', - 'on': True, - 'fallthrough': { 'variation': 1 }, - 'variations': ['d', 'e'], - 'version': 2, - 'trackEvents': False - } + flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(0) \ + .prerequisite('feature1', 1).build() + flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ + .build() evaluator = EvaluatorBuilder().with_flag(flag1).build() user = Context.create('x') detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) @@ -139,46 +75,25 @@ def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_ru assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_error_if_fallthrough_variation_is_too_high(): - flag = { - 'key': 'feature', - 'on': True, - 'fallthrough': {'variation': 999}, - 'variations': ['a', 'b', 'c'] - } + flag = FlagBuilder('feature').on(True).variations('a', 'b', 'c').fallthrough_variation(999).build() user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_fallthrough_variation_is_negative(): - flag = { - 'key': 'feature', - 'on': True, - 'fallthrough': {'variation': -1}, - 'variations': ['a', 'b', 'c'] - } + flag = FlagBuilder('feature').on(True).variations('a', 'b', 'c').fallthrough_variation(-1).build() user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_fallthrough_has_no_variation_or_rollout(): - flag = { - 'key': 'feature', - 'on': True, - 'fallthrough': {}, - 'variations': ['a', 'b', 'c'] - } + flag = FlagBuilder('feature').on(True).variations('a', 'b', 'c').build() user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_flag_returns_error_if_fallthrough_has_rollout_with_no_variations(): - flag = { - 'key': 'feature', - 'on': True, - 'fallthrough': {'rollout': {'variations': []}}, - 'variations': ['a', 'b', 'c'], - 'salt': '' - } + flag = FlagBuilder('feature').on(True).variations('a', 'b', 'c').fallthrough_rollout({'variations': []}).build() user = Context.create('x') detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) @@ -220,55 +135,16 @@ def test_flag_returns_error_if_rule_has_rollout_with_no_variations(): assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) def test_segment_match_clause_retrieves_segment_from_store(): - segment = { - "key": "segkey", - "included": [ "foo" ], - "version": 1 - } + segment = SegmentBuilder('segkey').included('foo').build() evaluator = EvaluatorBuilder().with_segment(segment).build() - user = Context.create('foo') - flag = { - "key": "test", - "variations": [ False, True ], - "fallthrough": { "variation": 0 }, - "on": True, - "rules": [ - { - "clauses": [ - { - "attribute": "", - "op": "segmentMatch", - "values": [ "segkey" ] - } - ], - "variation": 1 - } - ] - } + flag = make_boolean_flag_matching_segment(segment) assert evaluator.evaluate(flag, user, event_factory).detail.value == True def test_segment_match_clause_falls_through_with_no_errors_if_segment_not_found(): user = Context.create('foo') - flag = { - "key": "test", - "variations": [ False, True ], - "fallthrough": { "variation": 0 }, - "on": True, - "rules": [ - { - "clauses": [ - { - "attribute": "", - "op": "segmentMatch", - "values": [ "segkey" ] - } - ], - "variation": 1 - } - ] - } + flag = make_boolean_flag_with_clauses(make_clause_matching_segment_key('segkey')) evaluator = EvaluatorBuilder().with_unknown_segment('segkey').build() assert evaluator.evaluate(flag, user, event_factory).detail.value == False diff --git a/testing/impl/test_evaluator_big_segment.py b/testing/impl/test_evaluator_big_segment.py index b9ff1e32..3b63d0ce 100644 --- a/testing/impl/test_evaluator_big_segment.py +++ b/testing/impl/test_evaluator_big_segment.py @@ -6,12 +6,11 @@ def test_big_segment_with_no_generation_is_not_matched(): - segment = { - 'key': 'test', - 'included': [ basic_user.key ], # included should be ignored for a big segment - 'version': 1, - 'unbounded': True - } + segment = SegmentBuilder('key').version(1) \ + .included(basic_user.key) \ + .unbounded(True) \ + .build() + # included should be ignored for a big segment evaluator = EvaluatorBuilder().with_segment(segment).build() flag = make_boolean_flag_matching_segment(segment) result = evaluator.evaluate(flag, basic_user, event_factory) @@ -19,12 +18,10 @@ def test_big_segment_with_no_generation_is_not_matched(): assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.NOT_CONFIGURED def test_big_segment_matched_with_include(): - segment = { - 'key': 'test', - 'version': 1, - 'unbounded': True, - 'generation': 2 - } + segment = SegmentBuilder('key').version(1) \ + .unbounded(True) \ + .generation(2) \ + .build() evaluator = EvaluatorBuilder().with_segment(segment).with_big_segment_for_user(basic_user, segment, True).build() flag = make_boolean_flag_matching_segment(segment) result = evaluator.evaluate(flag, basic_user, event_factory) @@ -32,15 +29,13 @@ def test_big_segment_matched_with_include(): assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.HEALTHY def test_big_segment_matched_with_rule(): - segment = { - 'key': 'test', - 'version': 1, - 'unbounded': True, - 'generation': 2, - 'rules': [ - { 'clauses': [ make_clause_matching_user(basic_user) ] } - ] - } + segment = SegmentBuilder('key').version(1) \ + .unbounded(True) \ + .generation(2) \ + .rules( + make_segment_rule_matching_context(basic_user) + ) \ + .build() evaluator = EvaluatorBuilder().with_segment(segment).with_no_big_segments_for_user(basic_user).build() flag = make_boolean_flag_matching_segment(segment) result = evaluator.evaluate(flag, basic_user, event_factory) @@ -48,15 +43,13 @@ def test_big_segment_matched_with_rule(): assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.HEALTHY def test_big_segment_unmatched_by_exclude_regardless_of_rule(): - segment = { - 'key': 'test', - 'version': 1, - 'unbounded': True, - 'generation': 2, - 'rules': [ - { 'clauses': make_clause_matching_user(basic_user) } - ] - } + segment = SegmentBuilder('key').version(1) \ + .unbounded(True) \ + .generation(2) \ + .rules( + make_segment_rule_matching_context(basic_user) + ) \ + .build() evaluator = EvaluatorBuilder().with_segment(segment).with_big_segment_for_user(basic_user, segment, False).build() flag = make_boolean_flag_matching_segment(segment) result = evaluator.evaluate(flag, basic_user, event_factory) @@ -64,12 +57,10 @@ def test_big_segment_unmatched_by_exclude_regardless_of_rule(): assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.HEALTHY def test_big_segment_status_is_returned_by_provider(): - segment = { - 'key': 'test', - 'version': 1, - 'unbounded': True, - 'generation': 1 - } + segment = SegmentBuilder('key').version(1) \ + .unbounded(True) \ + .generation(1) \ + .build() evaluator = EvaluatorBuilder().with_segment(segment).with_no_big_segments_for_user(basic_user). \ with_big_segments_status(BigSegmentsStatus.NOT_CONFIGURED).build() flag = make_boolean_flag_matching_segment(segment) diff --git a/testing/impl/test_evaluator_bucketing.py b/testing/impl/test_evaluator_bucketing.py index 9990b23a..c53adf6a 100644 --- a/testing/impl/test_evaluator_bucketing.py +++ b/testing/impl/test_evaluator_bucketing.py @@ -1,5 +1,6 @@ from ldclient.client import Context from ldclient.impl.evaluator import _bucket_context, _variation_index_for_context +from ldclient.impl.model import * from testing.builders import * from testing.impl.evaluator_util import * @@ -15,17 +16,17 @@ def assert_match_clause(clause: dict, context: Context, should_match: bool): class TestEvaluatorBucketing: def test_variation_index_is_returned_for_bucket(self): user = Context.create('userkey') - flag = { 'key': 'flagkey', 'salt': 'salt' } + flag = FlagBuilder('key').salt('salt').build() # First verify that with our test inputs, the bucket value will be greater than zero and less than 100000, # so we can construct a rollout whose second bucket just barely contains that value - bucket_value = math.trunc(_bucket_context(None, user, None, flag['key'], flag['salt'], 'key') * 100000) + bucket_value = math.trunc(_bucket_context(None, user, None, flag.key, flag.salt, 'key') * 100000) assert bucket_value > 0 and bucket_value < 100000 bad_variation_a = 0 matched_variation = 1 bad_variation_b = 2 - rule = { + rule = VariationOrRollout({ 'rollout': { 'variations': [ { 'variation': bad_variation_a, 'weight': bucket_value }, # end of bucket range is not inclusive, so it will *not* match the target value @@ -33,24 +34,24 @@ def test_variation_index_is_returned_for_bucket(self): { 'variation': bad_variation_b, 'weight': 100000 - (bucket_value + 1) } ] } - } + }) result_variation = _variation_index_for_context(flag, rule, user) assert result_variation == (matched_variation, False) def test_last_bucket_is_used_if_bucket_value_equals_total_weight(self): user = Context.create('userkey') - flag = { 'key': 'flagkey', 'salt': 'salt' } + flag = FlagBuilder('key').salt('salt').build() # We'll construct a list of variations that stops right at the target bucket value - bucket_value = math.trunc(_bucket_context(None, user, None, flag['key'], flag['salt'], 'key') * 100000) + bucket_value = math.trunc(_bucket_context(None, user, None, flag.key, flag.salt, 'key') * 100000) - rule = { + rule = VariationOrRollout({ 'rollout': { 'variations': [ { 'variation': 0, 'weight': bucket_value } ] } - } + }) result_variation = _variation_index_for_context(flag, rule, user) assert result_variation == (0, False) diff --git a/testing/impl/test_evaluator_segment.py b/testing/impl/test_evaluator_segment.py index e0471e1b..3cd12390 100644 --- a/testing/impl/test_evaluator_segment.py +++ b/testing/impl/test_evaluator_segment.py @@ -6,7 +6,7 @@ from testing.impl.evaluator_util import * -def _segment_matches_context(segment: dict, context: Context) -> bool: +def _segment_matches_context(segment: Segment, context: Context) -> bool: e = EvaluatorBuilder().with_segment(segment).build() flag = make_boolean_flag_matching_segment(segment) result = e.evaluate(flag, context, event_factory) diff --git a/testing/impl/test_evaluator_target.py b/testing/impl/test_evaluator_target.py index db0e755f..c31f7420 100644 --- a/testing/impl/test_evaluator_target.py +++ b/testing/impl/test_evaluator_target.py @@ -15,13 +15,13 @@ def base_flag_builder() -> FlagBuilder: return FlagBuilder('feature').on(True).variations(*VARIATIONS) \ .fallthrough_variation(FALLTHROUGH_VAR).off_variation(FALLTHROUGH_VAR) -def expect_match(flag: dict, context: Context, variation: int): +def expect_match(flag: FeatureFlag, context: Context, variation: int): result = basic_evaluator.evaluate(flag, context, event_factory) assert result.detail.variation_index == variation assert result.detail.value == VARIATIONS[variation] assert result.detail.reason == {'kind': 'TARGET_MATCH'} -def expect_fallthrough(flag: dict, context: Context): +def expect_fallthrough(flag: FeatureFlag, context: Context): result = basic_evaluator.evaluate(flag, context, event_factory) assert result.detail.variation_index == FALLTHROUGH_VAR assert result.detail.value == VARIATIONS[FALLTHROUGH_VAR] diff --git a/testing/impl/test_model_encoder.py b/testing/impl/test_model_encoder.py new file mode 100644 index 00000000..c1d164f5 --- /dev/null +++ b/testing/impl/test_model_encoder.py @@ -0,0 +1,17 @@ +from ldclient.impl.model import * + +import json + + +class MyTestEntity(ModelEntity): + def __init__(self, value): + self._value = value + + def to_json_dict(self) -> dict: + return {'magicValue': self._value} + + +def test_model_encoder(): + data = [MyTestEntity(1), MyTestEntity('x')] + output = ModelEncoder().encode(data) + assert output == '[{"magicValue":1},{"magicValue":"x"}]' diff --git a/testing/integrations/test_test_data_source.py b/testing/integrations/test_test_data_source.py index 47f0d025..f4e06d8e 100644 --- a/testing/integrations/test_test_data_source.py +++ b/testing/integrations/test_test_data_source.py @@ -39,7 +39,7 @@ def test_can_retrieve_flag_from_store(): client = LDClient(config=Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store)) - assert store.get(FEATURES, 'some-flag') == td.flag('some-flag')._build(1) + assert store.get(FEATURES, 'some-flag') == FEATURES.decode(td.flag('some-flag')._build(1)) client.close() @@ -52,7 +52,7 @@ def test_updates_to_flags_are_reflected_in_store(): td.update(td.flag('some-flag')) - assert store.get(FEATURES, 'some-flag') == td.flag('some-flag')._build(1) + assert store.get(FEATURES, 'some-flag') == FEATURES.decode(td.flag('some-flag')._build(1)) client.close() @@ -82,7 +82,7 @@ def test_can_handle_multiple_clients(): config2 = Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store2) client2 = LDClient(config=config2) - assert store.get(FEATURES, 'flag') == { + assert store.get(FEATURES, 'flag') == FEATURES.decode({ 'fallthrough': { 'variation': 0, }, @@ -93,9 +93,9 @@ def test_can_handle_multiple_clients(): 'targets': [], 'variations': [True, False], 'version': 1 - } + }) - assert store2.get(FEATURES, 'flag') == { + assert store2.get(FEATURES, 'flag') == FEATURES.decode({ 'fallthrough': { 'variation': 0, }, @@ -106,11 +106,11 @@ def test_can_handle_multiple_clients(): 'targets': [], 'variations': [True, False], 'version': 1 - } + }) td.update(td.flag('flag').variation_for_all_users(False)) - assert store.get(FEATURES, 'flag') == { + assert store.get(FEATURES, 'flag') == FEATURES.decode({ 'fallthrough': { 'variation': 1, }, @@ -121,9 +121,9 @@ def test_can_handle_multiple_clients(): 'targets': [], 'variations': [True, False], 'version': 2 - } + }) - assert store2.get(FEATURES, 'flag') == { + assert store2.get(FEATURES, 'flag') == FEATURES.decode({ 'fallthrough': { 'variation': 1, }, @@ -134,7 +134,7 @@ def test_can_handle_multiple_clients(): 'targets': [], 'variations': [True, False], 'version': 2 - } + }) client.close() client2.close() diff --git a/testing/stub_util.py b/testing/stub_util.py index 5a7e99ad..834f0e71 100644 --- a/testing/stub_util.py +++ b/testing/stub_util.py @@ -1,14 +1,19 @@ from email.utils import formatdate import json -from testing.http_util import ChunkedResponse, JsonResponse +from ldclient.impl.model import ModelEntity from ldclient.interfaces import EventProcessor, FeatureRequester, FeatureStore, UpdateProcessor +from testing.http_util import ChunkedResponse, JsonResponse + + +def item_as_json(item): + return item.to_json_dict() if isinstance(item, ModelEntity) else item def make_items_map(items = []): ret = {} for item in items: - ret[item['key']] = item + ret[item['key']] = item_as_json(item) return ret def make_put_event(flags = [], segments = []): @@ -17,7 +22,7 @@ def make_put_event(flags = [], segments = []): def make_patch_event(kind, item): path = '%s%s' % (kind.stream_api_path, item['key']) - data = { "path": path, "data": item } + data = { "path": path, "data": item_as_json(item) } return 'event:patch\ndata: %s\n\n' % json.dumps(data) def make_delete_event(kind, key, version): diff --git a/testing/test_event_factory.py b/testing/test_event_factory.py index e039c6c7..f00bc7ad 100644 --- a/testing/test_event_factory.py +++ b/testing/test_event_factory.py @@ -2,32 +2,26 @@ from ldclient.evaluation import EvaluationDetail from ldclient.impl.event_factory import _EventFactory +from testing.builders import * + _event_factory_default = _EventFactory(False) _user = { 'key': 'x' } def make_basic_flag_with_rules(kind, should_track_events): - rule = { - 'rollout': { - 'variations': [ - { 'variation': 0, 'weight': 50000 }, - { 'variation': 1, 'weight': 50000 } - ] - } - } + rule_builder = FlagRuleBuilder().rollout({ + 'variations': [ + { 'variation': 0, 'weight': 50000 }, + { 'variation': 1, 'weight': 50000 } + ] + }) if kind == 'rulematch': - rule.update({'trackEvents': should_track_events}) - - flag = { - 'key': 'feature', - 'on': True, - 'rules': [rule], - 'fallthrough': { 'variation': 0 }, - 'variations': [ False, True ], - 'salt': '' - } + rule_builder.track_events(should_track_events) + + flag_builder = FlagBuilder('feature').on(True).fallthrough_variation(0).variations(False, True) \ + .rules(rule_builder.build()) if kind == 'fallthrough': - flag.update({'trackEventsFallthrough': should_track_events}) - return flag + flag_builder.track_events_fallthrough(should_track_events) + return flag_builder.build() def test_fallthrough_track_event_false(): flag = make_basic_flag_with_rules('fallthrough', False) diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index c58c04c7..84986f7c 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -121,11 +121,7 @@ def test_variation_for_invalid_context(): assert 'default' == client.variation('feature.key', c, default='default') def test_variation_for_flag_that_evaluates_to_none(): - empty_flag = { - 'key': 'feature.key', - 'on': False, - 'offVariation': None - } + empty_flag = FlagBuilder('feature.key').on(False).build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': empty_flag}}) client = make_client(store) @@ -162,11 +158,7 @@ def test_variation_detail_when_user_has_no_key(): assert expected == client.variation_detail('feature.key', { }, default='default') def test_variation_detail_for_flag_that_evaluates_to_none(): - empty_flag = { - 'key': 'feature.key', - 'on': False, - 'offVariation': None - } + empty_flag = FlagBuilder('feature.key').on(False).build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': empty_flag}}) client = make_client(store) @@ -193,12 +185,7 @@ def test_variation_detail_when_feature_store_throws_error(caplog): assert errlog == [ 'Unexpected error while retrieving feature flag "feature.key": NotImplementedError()' ] def test_flag_using_big_segment(): - segment = { - 'key': 'segkey', - 'version': 1, - 'generation': 1, - 'unbounded': True - } + segment = SegmentBuilder('segkey').unbounded(True).generation(1).build() flag = make_boolean_flag_matching_segment(segment) store = InMemoryFeatureStore() store.init({ FEATURES: { flag['key']: flag }, SEGMENTS: { segment['key']: segment } }) diff --git a/testing/test_polling_processor.py b/testing/test_polling_processor.py index e4a4dcd0..c34f9721 100644 --- a/testing/test_polling_processor.py +++ b/testing/test_polling_processor.py @@ -9,6 +9,8 @@ from ldclient.polling import PollingUpdateProcessor from ldclient.util import UnsuccessfulResponseException from ldclient.versioned_data_kind import FEATURES, SEGMENTS + +from testing.builders import * from testing.stub_util import MockFeatureRequester, MockResponse pp = None @@ -33,18 +35,14 @@ def setup_processor(config): pp.start() def test_successful_request_puts_feature_data_in_store(): - flag = { - "key": "flagkey" - } - segment = { - "key": "segkey" - } + flag = FlagBuilder('flagkey').build() + segment = SegmentBuilder('segkey').build() mock_requester.all_data = { FEATURES: { - "flagkey": flag + "flagkey": flag.to_json_dict() }, SEGMENTS: { - "segkey": segment + "segkey": segment.to_json_dict() } } setup_processor(Config("SDK_KEY")) diff --git a/testing/test_streaming.py b/testing/test_streaming.py index 1838e500..adc1cf44 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -9,6 +9,8 @@ from ldclient.streaming import StreamingUpdateProcessor from ldclient.version import VERSION from ldclient.versioned_data_kind import FEATURES, SEGMENTS + +from testing.builders import * from testing.http_util import start_server, BasicResponse, CauseNetworkError, SequentialHandler from testing.proxy_test_util import do_proxy_tests from testing.stub_util import make_delete_event, make_patch_event, make_put_event, stream_content @@ -72,8 +74,8 @@ def test_sends_wrapper_header_without_version(): def test_receives_put_event(): store = InMemoryFeatureStore() ready = Event() - flag = { 'key': 'flagkey', 'version': 1 } - segment = { 'key': 'segkey', 'version': 1 } + flag = FlagBuilder('flagkey').version(1).build() + segment = SegmentBuilder('segkey').version(1).build() with start_server() as server: with stream_content(make_put_event([ flag ], [ segment ])) as stream: @@ -90,10 +92,10 @@ def test_receives_put_event(): def test_receives_patch_events(): store = InMemoryFeatureStore() ready = Event() - flagv1 = { 'key': 'flagkey', 'version': 1 } - flagv2 = { 'key': 'flagkey', 'version': 2 } - segmentv1 = { 'key': 'segkey', 'version': 1 } - segmentv2 = { 'key': 'segkey', 'version': 1 } + flagv1 = FlagBuilder('flagkey').version(1).build() + flagv2 = FlagBuilder('flagkey').version(2).build() + segmentv1 = SegmentBuilder('segkey').version(1).build() + segmentv2 = SegmentBuilder('segkey').version(2).build() with start_server() as server: with stream_content(make_put_event([ flagv1 ], [ segmentv1 ])) as stream: @@ -116,8 +118,8 @@ def test_receives_patch_events(): def test_receives_delete_events(): store = InMemoryFeatureStore() ready = Event() - flagv1 = { 'key': 'flagkey', 'version': 1 } - segmentv1 = { 'key': 'segkey', 'version': 1 } + flagv1 = FlagBuilder('flagkey').version(1).build() + segmentv1 = SegmentBuilder('segkey').version(1).build() with start_server() as server: with stream_content(make_put_event([ flagv1 ], [ segmentv1 ])) as stream: @@ -140,8 +142,8 @@ def test_receives_delete_events(): def test_reconnects_if_stream_is_broken(): store = InMemoryFeatureStore() ready = Event() - flagv1 = { 'key': 'flagkey', 'version': 1 } - flagv2 = { 'key': 'flagkey', 'version': 2 } + flagv1 = FlagBuilder('flagkey').version(1).build() + flagv2 = FlagBuilder('flagkey').version(2).build() with start_server() as server: with stream_content(make_put_event([ flagv1 ])) as stream1: From 2b01775f4f26343909e8dc07ff21b8cb845f7d89 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 18:50:44 -0800 Subject: [PATCH 320/356] use store adapter for safety --- ldclient/client.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 17c365d7..d429c2e0 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -63,6 +63,14 @@ def initialized(self) -> bool: return self.store.initialized +def _get_store_item(store, kind: VersionedDataKind, key: str) -> Any: + # This decorator around store.get provides backward compatibility with any custom data + # store implementation that might still be returning a dict, instead of our data model + # classes like FeatureFlag. + item = store.get(kind, key, lambda x: x) + return kind.decode(item) if isinstance(item, dict) else item + + class LDClient: """The LaunchDarkly SDK client object. @@ -96,8 +104,8 @@ def __init__(self, config: Config, start_wait: float=5): self.__big_segment_store_manager = big_segment_store_manager self._evaluator = Evaluator( - lambda key: store.get(FEATURES, key, lambda x: x), - lambda key: store.get(SEGMENTS, key, lambda x: x), + lambda key: _get_store_item(store, FEATURES, key), + lambda key: _get_store_item(store, SEGMENTS, key), lambda key: big_segment_store_manager.get_user_membership(key) ) @@ -309,9 +317,7 @@ def _evaluate_internal(self, key: str, context: Union[Context, dict], default: A return EvaluationDetail(default, None, error_reason('USER_NOT_SPECIFIED')) try: - flag = self._store.get(FEATURES, key, lambda x: x) - if isinstance(flag, dict): # shouldn't happen if we're using our standard store implementation - flag = FEATURES.decode(flag) + flag = _get_store_item(self._store, FEATURES, key) except Exception as e: log.error("Unexpected error while retrieving feature flag \"%s\": %s" % (key, repr(e))) log.debug(traceback.format_exc()) From 329e33d52e4d4355841772e7d1377681d2f6c110 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 19:03:08 -0800 Subject: [PATCH 321/356] misc cleanup --- ldclient/impl/model/feature_flag.py | 12 ++++++------ ldclient/impl/model/variation_or_rollout.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ldclient/impl/model/feature_flag.py b/ldclient/impl/model/feature_flag.py index 7fb83f99..b282e4b9 100644 --- a/ldclient/impl/model/feature_flag.py +++ b/ldclient/impl/model/feature_flag.py @@ -22,7 +22,7 @@ def variation(self) -> int: class Target: - __slots__ = ['_data', '_context_kind', '_variation', '_values'] + __slots__ = ['_context_kind', '_variation', '_values'] def __init__(self, data: dict): self._context_kind = opt_str(data, 'contextKind') @@ -43,7 +43,7 @@ def values(self) -> List[str]: class FlagRule: - __slots__ = ['_data', '_id', '_clauses', '_track_events', '_variation_or_rollout'] + __slots__ = ['_id', '_clauses', '_track_events', '_variation_or_rollout'] def __init__(self, data: dict): self._id = opt_str(data, 'id') @@ -126,19 +126,19 @@ def fallthrough(self) -> VariationOrRollout: return self._fallthrough @property - def prerequisites(self) -> list[Prerequisite]: + def prerequisites(self) -> List[Prerequisite]: return self._prerequisites @property - def targets(self) -> list[Target]: + def targets(self) -> List[Target]: return self._targets @property - def context_targets(self) -> list[Target]: + def context_targets(self) -> List[Target]: return self._context_targets @property - def rules(self) -> list[FlagRule]: + def rules(self) -> List[FlagRule]: return self._rules @property diff --git a/ldclient/impl/model/variation_or_rollout.py b/ldclient/impl/model/variation_or_rollout.py index 79134c3d..e2dfd414 100644 --- a/ldclient/impl/model/variation_or_rollout.py +++ b/ldclient/impl/model/variation_or_rollout.py @@ -51,7 +51,7 @@ def seed(self) -> Optional[int]: return self._seed @property - def variations(self) -> list[WeightedVariation]: + def variations(self) -> List[WeightedVariation]: return self._variations From e0f414ce403cfadf3920c8fc530a8ed2cdf6b983 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 19:22:39 -0800 Subject: [PATCH 322/356] misc fixes for persistent stores --- ldclient/feature_store_helpers.py | 50 +++++++++++-------- .../consul/consul_feature_store.py | 6 +-- .../integrations/redis/redis_feature_store.py | 2 +- testing/feature_store_test_base.py | 4 +- .../persistent_feature_store_test_base.py | 8 +-- 5 files changed, 38 insertions(+), 32 deletions(-) diff --git a/ldclient/feature_store_helpers.py b/ldclient/feature_store_helpers.py index d1177c9d..2ed911f9 100644 --- a/ldclient/feature_store_helpers.py +++ b/ldclient/feature_store_helpers.py @@ -9,6 +9,12 @@ from ldclient.versioned_data_kind import VersionedDataKind from ldclient.feature_store import CacheConfig +def _ensure_encoded(kind, item): + return item if isinstance(item, dict) else kind.encode(item) + +def _is_deleted(item): + return item is not None and item.get('deleted') is True + class CachingStoreWrapper(DiagnosticDescription, FeatureStore): """A partial implementation of :class:`ldclient.interfaces.FeatureStore`. @@ -32,16 +38,20 @@ def __init__(self, core: FeatureStoreCore, cache_config: CacheConfig): self._cache = None self._inited = False - def init(self, all_data: Mapping[VersionedDataKind, Mapping[str, Dict[Any, Any]]]): + def init(self, all_encoded_data: Mapping[VersionedDataKind, Mapping[str, Dict[Any, Any]]]): """ """ - self._core.init_internal(all_data) + self._core.init_internal(all_encoded_data) # currently FeatureStoreCore expects to receive dicts if self._cache is not None: self._cache.clear() - for kind, items in all_data.items(): - self._cache[self._all_cache_key(kind)] = self._items_if_not_deleted(items) + for kind, items in all_encoded_data.items(): + decoded_items = {} # we don't want to cache dicts, we want to cache FeatureFlags/Segments for key, item in items.items(): - self._cache[self._item_cache_key(kind, key)] = [item] # note array wrapper + decoded_item = kind.decode(item) + self._cache[self._item_cache_key(kind, key)] = [decoded_item] # note array wrapper + if not _is_deleted(decoded_item): + decoded_items[key] = decoded_item + self._cache[self._all_cache_key(kind)] = decoded_items self._inited = True def get(self, kind, key, callback=lambda x: x): @@ -52,12 +62,13 @@ def get(self, kind, key, callback=lambda x: x): cached_item = self._cache.get(cache_key) # note, cached items are wrapped in an array so we can cache None values if cached_item is not None: - return callback(self._item_if_not_deleted(cached_item[0])) - item_as_dict = self._core.get_internal(kind, key) - item = None if item_as_dict is None else kind.decode(item_as_dict) + item = cached_item[0] + return callback(None if _is_deleted(item) else item) + encoded_item = self._core.get_internal(kind, key) # currently FeatureStoreCore returns dicts + item = None if encoded_item is None else kind.decode(encoded_item) if self._cache is not None: self._cache[cache_key] = [item] - return callback(self._item_if_not_deleted(item)) + return callback(None if _is_deleted(item) else item) def all(self, kind, callback=lambda x: x): """ @@ -67,10 +78,10 @@ def all(self, kind, callback=lambda x: x): cached_items = self._cache.get(cache_key) if cached_items is not None: return callback(cached_items) - items_as_dicts = self._core.get_all_internal(kind) + encoded_items = self._core.get_all_internal(kind) all_items = {} - if items_as_dicts is not None: - for key, item in items_as_dicts.items(): + if encoded_items is not None: + for key, item in encoded_items.items(): all_items[key] = kind.decode(item) items = self._items_if_not_deleted(all_items) if self._cache is not None: @@ -83,13 +94,14 @@ def delete(self, kind, key, version): deleted_item = { "key": key, "version": version, "deleted": True } self.upsert(kind, deleted_item) - def upsert(self, kind, item): + def upsert(self, kind, encoded_item): """ """ - item_as_dict = kind.encode(item) - new_state = self._core.upsert_internal(kind, item_as_dict) + encoded_item = _ensure_encoded(kind, encoded_item) + new_state = self._core.upsert_internal(kind, encoded_item) + new_decoded_item = kind.decode(new_state) if self._cache is not None: - self._cache[self._item_cache_key(kind, item.get('key'))] = [new_state] + self._cache[self._item_cache_key(kind, new_decoded_item.get('key'))] = [new_decoded_item] self._cache.pop(self._all_cache_key(kind), None) @property @@ -122,12 +134,6 @@ def _item_cache_key(kind, key): def _all_cache_key(kind): return kind.namespace - @staticmethod - def _item_if_not_deleted(item): - if item is not None and item.get('deleted', False): - return None - return item - @staticmethod def _items_if_not_deleted(items): results = {} diff --git a/ldclient/impl/integrations/consul/consul_feature_store.py b/ldclient/impl/integrations/consul/consul_feature_store.py index 497828a3..8fc91036 100644 --- a/ldclient/impl/integrations/consul/consul_feature_store.py +++ b/ldclient/impl/integrations/consul/consul_feature_store.py @@ -87,7 +87,7 @@ def get_all_internal(self, kind): def upsert_internal(self, kind, new_item): key = self._item_key(kind, new_item['key']) - encoded_item = json.dumps(new_item) + encoded_item = json.dumps(kind.encode(new_item)) # We will potentially keep retrying indefinitely until someone's write succeeds while True: @@ -95,10 +95,10 @@ def upsert_internal(self, kind, new_item): if old_value is None: mod_index = 0 else: - old_item = json.loads(old_value['Value'].decode('utf-8')) + old_item = kind.decode(json.loads(old_value['Value'].decode('utf-8'))) # Check whether the item is stale. If so, don't do the update (and return the existing item to # CachingStoreWrapper so it can be cached) - if old_item['version'] >= new_item['version']: + if old_item.version >= new_item.version: return old_item mod_index = old_value['ModifyIndex'] diff --git a/ldclient/impl/integrations/redis/redis_feature_store.py b/ldclient/impl/integrations/redis/redis_feature_store.py index 9bc5d13b..f5657d17 100644 --- a/ldclient/impl/integrations/redis/redis_feature_store.py +++ b/ldclient/impl/integrations/redis/redis_feature_store.py @@ -35,7 +35,7 @@ def init_internal(self, all_data): base_key = self._items_key(kind) pipe.delete(base_key) for key, item in items.items(): - item_json = json.dumps(item) + item_json = json.dumps(kind.encode(item)) pipe.hset(base_key, key, item_json) all_count = all_count + len(items) pipe.execute() diff --git a/testing/feature_store_test_base.py b/testing/feature_store_test_base.py index 8f5eff4c..ab24f5a2 100644 --- a/testing/feature_store_test_base.py +++ b/testing/feature_store_test_base.py @@ -54,8 +54,8 @@ def inited_store(self, tester): scope = StoreTestScope(tester.create_feature_store()) scope.store.init({ FEATURES: { - 'foo': self.make_feature('foo', 10), - 'bar': self.make_feature('bar', 10), + 'foo': self.make_feature('foo', 10).to_json_dict(), + 'bar': self.make_feature('bar', 10).to_json_dict(), } }) return scope diff --git a/testing/integrations/persistent_feature_store_test_base.py b/testing/integrations/persistent_feature_store_test_base.py index be473e3d..bb02bda0 100644 --- a/testing/integrations/persistent_feature_store_test_base.py +++ b/testing/integrations/persistent_feature_store_test_base.py @@ -91,15 +91,15 @@ def test_stores_with_different_prefixes_are_independent(self): store_b.upsert(FEATURES, flag_b2) item = store_a.get(FEATURES, 'flagA1', lambda x: x) - assert item == flag_a1 + assert item == FEATURES.decode(flag_a1) item = store_a.get(FEATURES, 'flagB1', lambda x: x) assert item is None items = store_a.all(FEATURES, lambda x: x) - assert items == { 'flagA1': flag_a1, 'flagA2': flag_a2 } + assert items == { 'flagA1': FEATURES.decode(flag_a1), 'flagA2': FEATURES.decode(flag_a2) } item = store_b.get(FEATURES, 'flagB1', lambda x: x) - assert item == flag_b1 + assert item == FEATURES.decode(flag_b1) item = store_b.get(FEATURES, 'flagA1', lambda x: x) assert item is None items = store_b.all(FEATURES, lambda x: x) - assert items == { 'flagB1': flag_b1, 'flagB2': flag_b2 } + assert items == { 'flagB1': FEATURES.decode(flag_b1), 'flagB2': FEATURES.decode(flag_b2) } From e4a478cd6e72899178a4be975796285b7379d186 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Mon, 12 Dec 2022 19:27:20 -0800 Subject: [PATCH 323/356] more database store fixes --- ldclient/impl/integrations/consul/consul_feature_store.py | 6 +++--- ldclient/impl/integrations/redis/redis_feature_store.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ldclient/impl/integrations/consul/consul_feature_store.py b/ldclient/impl/integrations/consul/consul_feature_store.py index 8fc91036..497828a3 100644 --- a/ldclient/impl/integrations/consul/consul_feature_store.py +++ b/ldclient/impl/integrations/consul/consul_feature_store.py @@ -87,7 +87,7 @@ def get_all_internal(self, kind): def upsert_internal(self, kind, new_item): key = self._item_key(kind, new_item['key']) - encoded_item = json.dumps(kind.encode(new_item)) + encoded_item = json.dumps(new_item) # We will potentially keep retrying indefinitely until someone's write succeeds while True: @@ -95,10 +95,10 @@ def upsert_internal(self, kind, new_item): if old_value is None: mod_index = 0 else: - old_item = kind.decode(json.loads(old_value['Value'].decode('utf-8'))) + old_item = json.loads(old_value['Value'].decode('utf-8')) # Check whether the item is stale. If so, don't do the update (and return the existing item to # CachingStoreWrapper so it can be cached) - if old_item.version >= new_item.version: + if old_item['version'] >= new_item['version']: return old_item mod_index = old_value['ModifyIndex'] diff --git a/ldclient/impl/integrations/redis/redis_feature_store.py b/ldclient/impl/integrations/redis/redis_feature_store.py index f5657d17..9bc5d13b 100644 --- a/ldclient/impl/integrations/redis/redis_feature_store.py +++ b/ldclient/impl/integrations/redis/redis_feature_store.py @@ -35,7 +35,7 @@ def init_internal(self, all_data): base_key = self._items_key(kind) pipe.delete(base_key) for key, item in items.items(): - item_json = json.dumps(kind.encode(item)) + item_json = json.dumps(item) pipe.hset(base_key, key, item_json) all_count = all_count + len(items) pipe.execute() From 56d1c6f772007949176ee05e020dc89ad21c0689 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 13 Dec 2022 10:50:41 -0800 Subject: [PATCH 324/356] support attribute reference lookups in evaluations --- Makefile | 6 +- ldclient/impl/evaluator.py | 52 ++++++++++-- ldclient/impl/model/attribute_ref.py | 94 +++++++++++++++++++++ ldclient/impl/model/clause.py | 5 +- ldclient/impl/model/entity.py | 1 + ldclient/impl/model/segment.py | 5 +- ldclient/impl/model/variation_or_rollout.py | 5 +- testing/impl/test_attribute_ref.py | 54 ++++++++++++ testing/impl/test_evaluator_bucketing.py | 49 ++++++----- 9 files changed, 229 insertions(+), 42 deletions(-) create mode 100644 ldclient/impl/model/attribute_ref.py create mode 100644 testing/impl/test_attribute_ref.py diff --git a/Makefile b/Makefile index 359c4338..cb0d12f2 100644 --- a/Makefile +++ b/Makefile @@ -20,14 +20,10 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # TEST_HARNESS_PARAMS can be set to add -skip parameters for any contract tests that cannot yet pass # Explanation of current skips: -# - "evaluation" subtests involving attribute references: Haven't yet implemented attribute references. # - "evaluation/parameterized/prerequisites": Can't pass yet because prerequisite cycle detection is not implemented. -# - various other "evaluation" subtests: These tests require attribute reference support or targeting by kind. +# - "evaluation/parameterized/segment recursion": Segment recursion is not yet implemented. # - "events": These test suites will be unavailable until more of the U2C implementation is done. TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ - -skip 'evaluation/bucketing/bucket by non-key attribute/in rollouts/string value/complex attribute reference' \ - -skip 'evaluation/parameterized/attribute references' \ - -skip 'evaluation/parameterized/bad attribute reference errors' \ -skip 'evaluation/parameterized/prerequisites' \ -skip 'evaluation/parameterized/segment recursion' \ -skip 'events' diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 01dc2ed6..b12511ba 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -59,6 +59,20 @@ def add_event(self, event): self.events.append(event) +class EvaluationException(Exception): + def __init__(self, message: str, error_kind: str = 'MALFORMED_FLAG'): + self._message = message + self._error_kind = error_kind + + @property + def message(self) -> str: + return self._message + + @property + def error_kind(self) -> str: + return self._error_kind + + class Evaluator: """ Encapsulates the feature flag evaluation logic. The Evaluator has no knowledge of the rest of the SDK environment; @@ -70,7 +84,8 @@ def __init__( self, get_flag: Callable[[str], Optional[FeatureFlag]], get_segment: Callable[[str], Optional[Segment]], - get_big_segments_membership: Callable[[str], Tuple[Optional[dict], str]] + get_big_segments_membership: Callable[[str], Tuple[Optional[dict], str]], + logger: Optional[logging.Logger] = None ): """ :param get_flag: function provided by LDClient that takes a flag key and returns either the flag or None @@ -82,10 +97,17 @@ def __init__( self.__get_flag = get_flag self.__get_segment = get_segment self.__get_big_segments_membership = get_big_segments_membership + self.__logger = logger def evaluate(self, flag: FeatureFlag, context: Context, event_factory: _EventFactory) -> EvalResult: state = EvalResult() - state.detail = self._evaluate(flag, context, state, event_factory) + try: + state.detail = self._evaluate(flag, context, state, event_factory) + except EvaluationException as e: + if self.__logger is not None: + self.__logger.error('Could not evaluate flag "%s": %s' % (flag.key, e.message)) + state.detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': e.error_kind}) + return state if state.big_segments_status is not None: state.detail.reason['bigSegmentsStatus'] = state.big_segments_status return state @@ -183,12 +205,12 @@ def _clause_matches_context(self, clause: Clause, context: Context, state: EvalR attr = clause.attribute if attr is None: return False - if attr == 'kind': + if attr.depth == 1 and attr[0] == 'kind': return _maybe_negate(clause, _match_clause_by_kind(clause, context)) actual_context = context.get_individual_context(clause.context_kind or Context.DEFAULT_KIND) if actual_context is None: return False - context_value = actual_context.get(attr) + context_value = _get_context_value_by_attr_ref(actual_context, attr) if context_value is None: return False @@ -325,12 +347,12 @@ def _bucket_context( context_kind: Optional[str], key: str, salt: str, - bucket_by: Optional[str] + bucket_by: Optional[AttributeRef] ) -> float: match_context = context.get_individual_context(context_kind or Context.DEFAULT_KIND) if match_context is None: return -1 - clause_value = match_context.get(bucket_by or 'key') + clause_value = match_context.key if bucket_by is None else _get_context_value_by_attr_ref(match_context, bucket_by) if clause_value is None: return 0.0 bucket_by_value = _bucketable_string_value(clause_value) @@ -360,6 +382,24 @@ def _context_key_is_in_target_list(context: Context, context_kind: Optional[str] match_context = context.get_individual_context(context_kind or Context.DEFAULT_KIND) return match_context is not None and match_context.key in keys +def _get_context_value_by_attr_ref(context: Context, attr: AttributeRef) -> Any: + if attr is None: + raise EvaluationException("rule clause did not specify an attribute") + if attr.error is not None: + raise EvaluationException("invalid attribute reference: " + attr.error) + name = attr[0] + if name is None: + return None + value = context.get(name) + depth = attr.depth + i = 1 + while i < depth: + if not isinstance(value, dict): + return None # can't get subproperty if we're not in a JSON object + value = value.get(attr[i]) + i += 1 + return value + def _match_single_context_value(op: str, context_value: Any, values: List[Any]) -> bool: op_fn = operators.ops.get(op) if op_fn is None: diff --git a/ldclient/impl/model/attribute_ref.py b/ldclient/impl/model/attribute_ref.py new file mode 100644 index 00000000..e1b76251 --- /dev/null +++ b/ldclient/impl/model/attribute_ref.py @@ -0,0 +1,94 @@ +from __future__ import annotations +import re +from typing import List, Optional + + +def req_attr_ref_with_opt_context_kind(attr_ref_str: str, context_kind: Optional[str]) -> AttributeRef: + if context_kind is None or context_kind == '': + return AttributeRef.from_literal(attr_ref_str) + return AttributeRef.from_path(attr_ref_str) + +def opt_attr_ref_with_opt_context_kind(attr_ref_str: Optional[str], context_kind: Optional[str]) -> Optional[AttributeRef]: + if attr_ref_str is None or attr_ref_str == '': + return None + return req_attr_ref_with_opt_context_kind(attr_ref_str, context_kind) + + +_INVALID_ATTR_ESCAPE_REGEX = re.compile('(~[^01]|~$)') + + +class AttributeRef: + __slots__ = ['_raw', '_single_component', '_components', '_error'] + + _ERR_EMPTY = 'attribute reference cannot be empty' + + def __init__( + self, + raw: str, + single_component: Optional[str], + components: Optional[List[str]], + error: Optional[str] + ): + self._raw = raw + self._single_component = single_component + self._components = components + self._error = error + + @property + def valid(self) -> bool: + return self._error is None + + @property + def error(self) -> Optional[str]: + return self._error + + @property + def depth(self) -> int: + if self._error is not None: + return 0 + if self._components is not None: + return len(self._components) + return 1 + + def __getitem__(self, index) -> Optional[str]: + if self._error is not None: + return None + if self._components is not None: + return None if index < 0 or index >= len(self._components) else self._components[index] + return self._single_component if index == 0 else None + + @staticmethod + def from_path(path: str) -> AttributeRef: + if path == '' or path == '/': + return AttributeRef._from_error(AttributeRef._ERR_EMPTY) + if path[0] != '/': + return AttributeRef(path, path, None, None) + components = path[1:].split('/') + for i, c in enumerate(components): + if c == '': + return AttributeRef._from_error('attribute reference contained a double slash or a trailing slash') + unescaped = AttributeRef._unescape(c) + if unescaped is None: + return AttributeRef._from_error('attribute reference contained an escape character (~) that was not followed by 0 or 1') + components[i] = unescaped + return AttributeRef(path, None, components, None) + + @staticmethod + def from_literal(name: str) -> AttributeRef: + if name == '': + return AttributeRef._from_error(AttributeRef._ERR_EMPTY) + return AttributeRef(AttributeRef._escape(name), name, None, None) + + @staticmethod + def _from_error(error: str) -> AttributeRef: + return AttributeRef('', None, None, error) + + @staticmethod + def _unescape(s: str) -> Optional[str]: + if _INVALID_ATTR_ESCAPE_REGEX.search(s): + return None + return s.replace("~1", "/").replace("~0", "~") + + @staticmethod + def _escape(s: str) -> str: + return s.replace("~", "~0").replace("/", "~1") diff --git a/ldclient/impl/model/clause.py b/ldclient/impl/model/clause.py index e601be4a..8c476f13 100644 --- a/ldclient/impl/model/clause.py +++ b/ldclient/impl/model/clause.py @@ -1,19 +1,20 @@ from typing import Any, List, Optional +from ldclient.impl.model.attribute_ref import AttributeRef, req_attr_ref_with_opt_context_kind from ldclient.impl.model.entity import * class Clause: __slots__ = ['_context_kind', '_attribute', '_op', '_values', '_negate'] def __init__(self, data: dict): - self._attribute = req_str(data, 'attribute') self._context_kind = opt_str(data, 'contextKind') + self._attribute = req_attr_ref_with_opt_context_kind(req_str(data, 'attribute'), self._context_kind) self._negate = opt_bool(data, 'negate') self._op = req_str(data, 'op') self._values = req_list(data, 'values') @property - def attribute(self) -> str: + def attribute(self) -> AttributeRef: return self._attribute @property diff --git a/ldclient/impl/model/entity.py b/ldclient/impl/model/entity.py index 42b2a4b0..03cf1085 100644 --- a/ldclient/impl/model/entity.py +++ b/ldclient/impl/model/entity.py @@ -81,6 +81,7 @@ def validate_list_type(items: list, name: str, desired_type) -> list: (name, desired_type, item.__class__)) return items + class ModelEntity: def __init__(self, data: dict): self._data = data diff --git a/ldclient/impl/model/segment.py b/ldclient/impl/model/segment.py index 93c06371..92cfd14b 100644 --- a/ldclient/impl/model/segment.py +++ b/ldclient/impl/model/segment.py @@ -1,5 +1,6 @@ from typing import Any, List, Optional +from ldclient.impl.model.attribute_ref import AttributeRef, opt_attr_ref_with_opt_context_kind from ldclient.impl.model.clause import Clause from ldclient.impl.model.entity import * @@ -24,13 +25,13 @@ class SegmentRule: __slots__ = ['_bucket_by', '_clauses', '_rollout_context_kind', '_weight'] def __init__(self, data: dict): - self._bucket_by = opt_str(data, 'bucketBy') self._clauses = list(Clause(item) for item in req_dict_list(data, 'clauses')) self._rollout_context_kind = opt_str(data, 'rolloutContextKind') + self._bucket_by = opt_attr_ref_with_opt_context_kind(opt_str(data, 'bucketBy'), self._rollout_context_kind) self._weight = opt_int(data, 'weight') @property - def bucket_by(self) -> Optional[str]: + def bucket_by(self) -> Optional[AttributeRef]: return self._bucket_by @property diff --git a/ldclient/impl/model/variation_or_rollout.py b/ldclient/impl/model/variation_or_rollout.py index e2dfd414..476d1df3 100644 --- a/ldclient/impl/model/variation_or_rollout.py +++ b/ldclient/impl/model/variation_or_rollout.py @@ -1,5 +1,6 @@ from typing import Any, List, Optional +from ldclient.impl.model.attribute_ref import AttributeRef, opt_attr_ref_with_opt_context_kind from ldclient.impl.model.entity import * @@ -28,14 +29,14 @@ class Rollout: __slots__ = ['_bucket_by', '_context_kind', '_is_experiment', '_seed', '_variations'] def __init__(self, data: dict): - self._bucket_by = opt_str(data, 'bucketBy') self._context_kind = opt_str(data, 'contextKind') + self._bucket_by = opt_attr_ref_with_opt_context_kind(opt_str(data, 'bucketBy'), self._context_kind) self._is_experiment = opt_str(data, 'kind') == 'experiment' self._seed = opt_int(data, 'seed') self._variations = list(WeightedVariation(item) for item in req_dict_list(data, 'variations')) @property - def bucket_by(self) -> Optional[str]: + def bucket_by(self) -> Optional[AttributeRef]: return self._bucket_by @property diff --git a/testing/impl/test_attribute_ref.py b/testing/impl/test_attribute_ref.py new file mode 100644 index 00000000..4370e98f --- /dev/null +++ b/testing/impl/test_attribute_ref.py @@ -0,0 +1,54 @@ +from ldclient.impl.model.attribute_ref import * + +import pytest + + +class TestAttributeRef: + @pytest.mark.parametrize("input", ["", "/"]) + def test_invalid_attr_ref_from_path(self, input: str): + a = AttributeRef.from_path(input) + assert a.valid is False + assert a.error is not None + assert a.depth == 0 + + @pytest.mark.parametrize("input", [""]) + def test_invalid_attr_ref_from_literal(self, input: str): + a = AttributeRef.from_literal(input) + assert a.valid is False + assert a.error is not None + assert a.depth == 0 + + @pytest.mark.parametrize("input", ["name", "name/with/slashes", "name~0~1with-what-looks-like-escape-sequences"]) + def test_ref_with_no_leading_slash(self, input: str): + a = AttributeRef.from_path(input) + assert a.valid is True + assert a.error is None + assert a.depth == 1 + assert a[0] == input + + @pytest.mark.parametrize("input,unescaped", [ + ("/name", "name"), + ("/0", "0"), + ("/name~1with~1slashes~0and~0tildes", "name/with/slashes~and~tildes") + ]) + def test_ref_simple_with_leading_slash(self, input: str, unescaped: str): + a = AttributeRef.from_path(input) + assert a.valid is True + assert a.error is None + assert a.depth == 1 + assert a[0] == unescaped + + @pytest.mark.parametrize("input", []) + def test_literal(self, input: str): + a = AttributeRef.from_literal(input) + assert a.valid is True + assert a.error is None + assert a.depth == 1 + assert a[0] == input + + def test_get_component(self): + a = AttributeRef.from_path("/first/sec~1ond/third") + assert a.depth == 3 + assert a[0] == "first" + assert a[1] == "sec/ond" + assert a[2] == "third" diff --git a/testing/impl/test_evaluator_bucketing.py b/testing/impl/test_evaluator_bucketing.py index c53adf6a..767668b5 100644 --- a/testing/impl/test_evaluator_bucketing.py +++ b/testing/impl/test_evaluator_bucketing.py @@ -20,7 +20,7 @@ def test_variation_index_is_returned_for_bucket(self): # First verify that with our test inputs, the bucket value will be greater than zero and less than 100000, # so we can construct a rollout whose second bucket just barely contains that value - bucket_value = math.trunc(_bucket_context(None, user, None, flag.key, flag.salt, 'key') * 100000) + bucket_value = math.trunc(_bucket_context(None, user, None, flag.key, flag.salt, None) * 100000) assert bucket_value > 0 and bucket_value < 100000 bad_variation_a = 0 @@ -43,7 +43,7 @@ def test_last_bucket_is_used_if_bucket_value_equals_total_weight(self): flag = FlagBuilder('key').salt('salt').build() # We'll construct a list of variations that stops right at the target bucket value - bucket_value = math.trunc(_bucket_context(None, user, None, flag.key, flag.salt, 'key') * 100000) + bucket_value = math.trunc(_bucket_context(None, user, None, flag.key, flag.salt, None) * 100000) rule = VariationOrRollout({ 'rollout': { @@ -57,49 +57,49 @@ def test_last_bucket_is_used_if_bucket_value_equals_total_weight(self): def test_bucket_by_user_key(self): user = Context.create('userKeyA') - bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'key') + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', None) assert bucket == pytest.approx(0.42157587) user = Context.create('userKeyB') - bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'key') + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', None) assert bucket == pytest.approx(0.6708485) user = Context.create('userKeyC') - bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'key') + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', None) assert bucket == pytest.approx(0.10343106) def test_bucket_by_user_key_with_seed(self): seed = 61 user = Context.create('userKeyA') - point = _bucket_context(seed, user, None, 'hashKey', 'saltyA', 'key') + point = _bucket_context(seed, user, None, 'hashKey', 'saltyA', None) assert point == pytest.approx(0.09801207) user = Context.create('userKeyB') - point = _bucket_context(seed, user, None, 'hashKey', 'saltyA', 'key') + point = _bucket_context(seed, user, None, 'hashKey', 'saltyA', None) assert point == pytest.approx(0.14483777) user = Context.create('userKeyC') - point = _bucket_context(seed, user, None, 'hashKey', 'saltyA', 'key') + point = _bucket_context(seed, user, None, 'hashKey', 'saltyA', None) assert point == pytest.approx(0.9242641) def test_bucket_by_int_attr(self): user = Context.builder('userKey').set('intAttr', 33333).set('stringAttr', '33333').build() - bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'intAttr') + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', AttributeRef.from_literal('intAttr')) assert bucket == pytest.approx(0.54771423) - bucket2 = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'stringAttr') + bucket2 = _bucket_context(None, user, None, 'hashKey', 'saltyA', AttributeRef.from_literal('stringAttr')) assert bucket2 == bucket def test_bucket_by_float_attr_not_allowed(self): user = Context.builder('userKey').set('floatAttr', 33.5).build() - bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', 'floatAttr') + bucket = _bucket_context(None, user, None, 'hashKey', 'saltyA', AttributeRef.from_literal('floatAttr')) assert bucket == 0.0 def test_seed_independent_of_salt_and_hashKey(self): seed = 61 user = Context.create('userKeyA') - point1 = _bucket_context(seed, user, None, 'hashKey', 'saltyA', 'key') - point2 = _bucket_context(seed, user, None, 'hashKey', 'saltyB', 'key') - point3 = _bucket_context(seed, user, None, 'hashKey2', 'saltyA', 'key') + point1 = _bucket_context(seed, user, None, 'hashKey', 'saltyA', None) + point2 = _bucket_context(seed, user, None, 'hashKey', 'saltyB', None) + point3 = _bucket_context(seed, user, None, 'hashKey2', 'saltyA', None) assert point1 == point2 assert point2 == point3 @@ -107,9 +107,9 @@ def test_seed_independent_of_salt_and_hashKey(self): def test_seed_changes_hash_evaluation(self): seed1 = 61 user = Context.create('userKeyA') - point1 = _bucket_context(seed1, user, None, 'hashKey', 'saltyA', 'key') + point1 = _bucket_context(seed1, user, None, 'hashKey', 'saltyA', None) seed2 = 62 - point2 = _bucket_context(seed2, user, None, 'hashKey', 'saltyB', 'key') + point2 = _bucket_context(seed2, user, None, 'hashKey', 'saltyB', None) assert point1 != point2 @@ -119,14 +119,13 @@ def test_context_kind_selects_context(self): context2 = Context.create('key2', 'kind2') multi = Context.create_multi(context1, context2) key = 'flag-key' - attr = 'key' salt = 'testing123' - assert _bucket_context(seed, context1, None, key, salt, attr) == \ - _bucket_context(seed, context1, 'user', key, salt, attr) - assert _bucket_context(seed, context1, None, key, salt, attr) == \ - _bucket_context(seed, multi, 'user', key, salt, attr) - assert _bucket_context(seed, context2, 'kind2', key, salt, attr) == \ - _bucket_context(seed, multi, 'kind2', key, salt, attr) - assert _bucket_context(seed, multi, 'user', key, salt, attr) != \ - _bucket_context(seed, multi, 'kind2', key, salt, attr) + assert _bucket_context(seed, context1, None, key, salt, None) == \ + _bucket_context(seed, context1, 'user', key, salt, None) + assert _bucket_context(seed, context1, None, key, salt, None) == \ + _bucket_context(seed, multi, 'user', key, salt, None) + assert _bucket_context(seed, context2, 'kind2', key, salt, None) == \ + _bucket_context(seed, multi, 'kind2', key, salt, None) + assert _bucket_context(seed, multi, 'user', key, salt, None) != \ + _bucket_context(seed, multi, 'kind2', key, salt, None) From 396e914fe3061832243a853b8d7f5d7eea75bc5a Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 13 Dec 2022 11:01:07 -0800 Subject: [PATCH 325/356] pass logger from client --- ldclient/client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ldclient/client.py b/ldclient/client.py index d429c2e0..812f70f9 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -106,7 +106,8 @@ def __init__(self, config: Config, start_wait: float=5): self._evaluator = Evaluator( lambda key: _get_store_item(store, FEATURES, key), lambda key: _get_store_item(store, SEGMENTS, key), - lambda key: big_segment_store_manager.get_user_membership(key) + lambda key: big_segment_store_manager.get_user_membership(key), + log ) if self._config.offline: From 4f8b4357a21fe3bf216e0db63abf36e3331b2735 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 20 Dec 2022 19:41:06 -0800 Subject: [PATCH 326/356] context kind logic for big segments + enable big segment contract tests --- contract-tests/big_segment_store_fixture.py | 37 ++++++++++++++++++ contract-tests/client_entity.py | 35 +++++++++++++++-- contract-tests/service.py | 3 ++ ldclient/impl/big_segments.py | 1 + ldclient/impl/evaluator.py | 42 +++++++++++++++++---- ldclient/impl/model/segment.py | 8 +++- ldclient/interfaces.py | 20 +++++----- testing/builders.py | 3 ++ testing/impl/evaluator_util.py | 8 +++- testing/impl/test_evaluator_big_segment.py | 21 +++++++++-- 10 files changed, 151 insertions(+), 27 deletions(-) create mode 100644 contract-tests/big_segment_store_fixture.py diff --git a/contract-tests/big_segment_store_fixture.py b/contract-tests/big_segment_store_fixture.py new file mode 100644 index 00000000..14907573 --- /dev/null +++ b/contract-tests/big_segment_store_fixture.py @@ -0,0 +1,37 @@ +import json +import os +import sys +from typing import Optional +import urllib3 + +# Import ldclient from parent directory +sys.path.insert(1, os.path.join(sys.path[0], '..')) +from ldclient.interfaces import BigSegmentStore, BigSegmentStoreMetadata + + +http = urllib3.PoolManager() + + +class BigSegmentStoreFixture(BigSegmentStore): + def __init__(self, callback_uri: str): + self._callback_uri = callback_uri + + def get_metadata(self) -> BigSegmentStoreMetadata: + resp_data = self._post_callback('/getMetadata', None) + return BigSegmentStoreMetadata(resp_data.get("lastUpToDate")) + + def get_membership(self, context_hash: str) -> Optional[dict]: + resp_data = self._post_callback('/getMembership', {'contextHash': context_hash}) + return resp_data.get("values") + + def _post_callback(self, path: str, params: Optional[dict]) -> dict: + url = self._callback_uri + path + resp = http.request('POST', url, + body=None if params is None else json.dumps(params), + headers=None if params is None else {'Content-Type': 'application/json'}) + if resp.status != 200: + raise Exception("HTTP error %d from callback to %s" % (resp.status, url)) + return json.loads(resp.data.decode('utf-8')) + + def stop(self): + pass diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index 5f94c75d..e461a6e4 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -2,6 +2,11 @@ import logging import os import sys +from typing import Optional + +from big_segment_store_fixture import BigSegmentStoreFixture + +from ldclient.config import BigSegmentsConfig # Import ldclient from parent directory sys.path.insert(1, os.path.join(sys.path[0], '..')) @@ -17,8 +22,7 @@ def __init__(self, tag, config): streaming = config["streaming"] if streaming.get("baseUri") is not None: opts["stream_uri"] = streaming["baseUri"] - if streaming.get("initialRetryDelayMs") is not None: - opts["initial_reconnect_delay"] = streaming["initialRetryDelayMs"] / 1000.0 + _set_optional_time_prop(streaming, "initialRetryDelayMs", opts, "initial_reconnect_delay") if config.get("events") is not None: events = config["events"] @@ -29,11 +33,22 @@ def __init__(self, tag, config): opts["diagnostic_opt_out"] = not events.get("enableDiagnostics", False) opts["all_attributes_private"] = events.get("allAttributesPrivate", False) opts["private_attribute_names"] = events.get("globalPrivateAttributes", {}) - if events.get("flushIntervalMs") is not None: - opts["flush_interval"] = events["flushIntervalMs"] / 1000.0 + _set_optional_time_prop(events, "flushIntervalMs", opts, "flush_interval") else: opts["send_events"] = False + if config.get("bigSegments") is not None: + big_params = config["bigSegments"] + big_config = { + "store": BigSegmentStoreFixture(big_params["callbackUri"]) + } + if big_params.get("userCacheSize") is not None: + big_config["user_cache_size"] = big_params["userCacheSize"] + _set_optional_time_prop(big_params, "userCacheTimeMs", big_config, "user_cache_time") + _set_optional_time_prop(big_params, "statusPollIntervalMs", big_config, "status_poll_interval") + _set_optional_time_prop(big_params, "staleAfterMs", big_config, "stale_after") + opts["big_segments"] = BigSegmentsConfig(**big_config) + start_wait = config.get("startWaitTimeMs") or 5000 config = Config(**opts) @@ -114,6 +129,18 @@ def _context_response(self, c: Context) -> dict: return {"output": c.to_json_string()} return {"error": c.error} + def get_big_segment_store_status(self) -> dict: + status = self.client.big_segment_store_status_provider.status + return { + "available": status.available, + "stale": status.stale + } + def close(self): self.client.close() self.log.info('Test ended') + +def _set_optional_time_prop(params_in: dict, name_in: str, params_out: dict, name_out: str): + if params_in.get(name_in) is not None: + params_out[name_out] = params_in[name_in] / 1000.0 + return None diff --git a/contract-tests/service.py b/contract-tests/service.py index 70c923e7..c83a4b8e 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -64,6 +64,7 @@ def status(): 'all-flags-with-reasons', 'all-flags-client-side-only', 'all-flags-details-only-for-tracked-flags', + 'big-segments', 'context-type', 'secure-mode-hash', ] @@ -126,6 +127,8 @@ def post_client_command(id): response = client.context_build(sub_params) elif command == "contextConvert": response = client.context_convert(sub_params) + elif command == "getBigSegmentStoreStatus": + response = client.get_big_segment_store_status() else: return ('', 400) diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index bcd6e2b8..0ec1da43 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -88,6 +88,7 @@ def get_user_membership(self, user_key: str) -> Tuple[Optional[dict], str]: self.__cache[user_key] = membership except Exception as e: log.exception("Big Segment store membership query returned error: %s" % e) + return (None, BigSegmentsStatus.STORE_ERROR) status = self.__last_status if not status: status = self.poll_store_and_update_status() diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index b12511ba..9e0fa3ba 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -3,6 +3,7 @@ from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail from ldclient.impl.event_factory import _EventFactory from ldclient.impl.model import * +from ldclient.interfaces import BigSegmentStoreStatus import hashlib import logging @@ -50,8 +51,8 @@ class EvalResult: def __init__(self): self.detail = None self.events = None - self.big_segments_status = None - self.big_segments_membership = None + self.big_segments_status = None # type: Optional[str] + self.big_segments_membership = None # type: Optional[Dict[str, Optional[dict]]] def add_event(self, event): if self.events is None: @@ -267,12 +268,37 @@ def _big_segment_match_context(self, segment: Segment, context: Context, state: # that as a "not configured" condition. state.big_segments_status = BigSegmentsStatus.NOT_CONFIGURED return False - if state.big_segments_status is None: - result = self.__get_big_segments_membership(context.key) - state.big_segments_membership, state.big_segments_status = result - segment_ref = _make_big_segment_ref(segment) - membership = state.big_segments_membership - included = None if membership is None else membership.get(segment_ref, None) + + # A big segment can only apply to one context kind, so if we don't have a key for that kind, + # we don't need to bother querying the data. + match_context = context.get_individual_context(segment.unbounded_context_kind or Context.DEFAULT_KIND) + if match_context is None: + return False + key = match_context.key + + membership = None + has_cached_membership = False + if state.big_segments_membership is not None: + if key in state.big_segments_membership: + has_cached_membership = True + membership = state.big_segments_membership[key] + # Note that we could have cached a None result from a query, in which case membership + # will be None but has_cached_membership will be True. + if not has_cached_membership: + if self.__get_big_segments_membership is None: + state.big_segments_status = BigSegmentsStatus.NOT_CONFIGURED + return False + result = self.__get_big_segments_membership(key) + # Note that this query is just by key; the context kind doesn't matter because any given + # Big Segment can only reference one context kind. So if segment A for the "user" kind + # includes a "user" context with key X, and segment B for the "org" kind includes an "org" + # context with the same key X, it is fine to say that the membership for key X is + # segment A and segment B-- there is no ambiguity. + membership, state.big_segments_status = result + if state.big_segments_membership is None: + state.big_segments_membership = {} + state.big_segments_membership[key] = membership + included = None if membership is None else membership.get(_make_big_segment_ref(segment), None) if included is not None: return included return self._simple_segment_match_context(segment, context, state, False) diff --git a/ldclient/impl/model/segment.py b/ldclient/impl/model/segment.py index 92cfd14b..f3ee1afd 100644 --- a/ldclient/impl/model/segment.py +++ b/ldclient/impl/model/segment.py @@ -49,7 +49,8 @@ def weight(self) -> Optional[int]: class Segment(ModelEntity): __slots__ = ['_data', '_key', '_version', '_deleted', '_included', '_excluded', - '_included_contexts', '_excluded_contexts', '_rules', '_salt', '_unbounded', '_generation'] + '_included_contexts', '_excluded_contexts', '_rules', '_salt', '_unbounded', + '_unbounded_context_kind', '_generation'] def __init__(self, data: dict): super().__init__(data) @@ -69,6 +70,7 @@ def __init__(self, data: dict): self._rules = list(SegmentRule(item) for item in opt_dict_list(data, 'rules')) self._salt = opt_str(data, 'salt') or '' self._unbounded = opt_bool(data, 'unbounded') + self._unbounded_context_kind = opt_str(data, 'unboundedContextKind') self._generation = opt_int(data, 'generation') @property @@ -110,6 +112,10 @@ def salt(self) -> str: @property def unbounded(self) -> bool: return self._unbounded + + @property + def unbounded_context_kind(self) -> Optional[str]: + return self._unbounded_context_kind @property def generation(self) -> Optional[int]: diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 84fe92d7..05f210ab 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -288,30 +288,30 @@ def get_metadata(self) -> BigSegmentStoreMetadata: pass @abstractmethod - def get_membership(self, user_hash: str) -> Optional[dict]: + def get_membership(self, context_hash: str) -> Optional[dict]: """ - Queries the store for a snapshot of the current segment state for a specific user. + Queries the store for a snapshot of the current segment state for a specific context. - The user_hash is a base64-encoded string produced by hashing the user key as defined by - the Big Segments specification; the store implementation does not need to know the details + The context_hash is a base64-encoded string produced by hashing the context key as defined + by the Big Segments specification; the store implementation does not need to know the details of how this is done, because it deals only with already-hashed keys, but the string can be assumed to only contain characters that are valid in base64. - The return value should be either a `dict`, or None if the user is not referenced in any big + The return value should be either a `dict`, or None if the context is not referenced in any big segments. Each key in the dictionary is a "segment reference", which is how segments are identified in Big Segment data. This string is not identical to the segment key-- the SDK will add other information. The store implementation should not be concerned with the - format of the string. Each value in the dictionary is True if the user is explicitly included - in the segment, False if the user is explicitly excluded from the segment-- and is not also + format of the string. Each value in the dictionary is True if the context is explicitly included + in the segment, False if the context is explicitly excluded from the segment-- and is not also explicitly included (that is, if both an include and an exclude existed in the data, the - include would take precedence). If the user's status in a particular segment is undefined, + include would take precedence). If the context's status in a particular segment is undefined, there should be no key or value for that segment. This dictionary may be cached by the SDK, so it should not be modified after it is created. It is a snapshot of the segment membership state at one point in time. - :param user_hash: the hashed user key - :return: True/False values for Big Segments that reference this user + :param context_hash: the hashed context key + :return: True/False values for Big Segments that reference this context """ pass diff --git a/testing/builders.py b/testing/builders.py index 3111016a..5caf4c6f 100644 --- a/testing/builders.py +++ b/testing/builders.py @@ -155,6 +155,9 @@ def rules(self, *rules: dict) -> SegmentBuilder: def unbounded(self, value: bool) -> SegmentBuilder: return self._set('unbounded', value) + def unbounded_context_kind(self, value: Optional[str]) -> SegmentBuilder: + return self._set('unboundedContextKind', value) + def generation(self, value: Optional[int]) -> SegmentBuilder: return self._set('generation', value) diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index e72f29cd..f20caacd 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -40,6 +40,12 @@ def with_unknown_segment(self, key) -> 'EvaluatorBuilder': self.__segments[key] = None return self + def with_big_segment_for_key(self, key: str, segment: Segment, included: bool) -> 'EvaluatorBuilder': + if key not in self.__big_segments: + self.__big_segments[key] = {} + self.__big_segments[key][_make_big_segment_ref(segment)] = included + return self + def with_big_segment_for_user(self, user: dict, segment: Segment, included: bool) -> 'EvaluatorBuilder': user_key = user['key'] if user_key not in self.__big_segments: @@ -67,7 +73,7 @@ def _get_segment(self, key: str) -> Optional[Segment]: def _get_big_segments_membership(self, key: str) -> Tuple[Optional[dict], str]: if key not in self.__big_segments: - raise Exception("test made unexpected request for big segments for user key '%s'" % key) + raise Exception("test made unexpected request for big segments for context key '%s'" % key) return (self.__big_segments[key], self.__big_segments_status) basic_evaluator = EvaluatorBuilder().build() diff --git a/testing/impl/test_evaluator_big_segment.py b/testing/impl/test_evaluator_big_segment.py index 3b63d0ce..71f5d06e 100644 --- a/testing/impl/test_evaluator_big_segment.py +++ b/testing/impl/test_evaluator_big_segment.py @@ -17,14 +17,29 @@ def test_big_segment_with_no_generation_is_not_matched(): assert result.detail.value == False assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.NOT_CONFIGURED -def test_big_segment_matched_with_include(): +def test_big_segment_matched_with_include_for_default_kind(): + _test_matched_with_include(False, False) + _test_matched_with_include(False, True) + +def test_big_segment_matched_with_include_for_non_default_kind(): + _test_matched_with_include(True, False) + _test_matched_with_include(True, True) + +def _test_matched_with_include(non_default_kind: bool, multi_kind_context: bool): + target_key = 'contextkey' + single_kind_context = Context.create(target_key, 'kind1') if non_default_kind else Context.create(target_key) + eval_context = Context.create_multi(single_kind_context, Context.create('key2', 'kind2')) if multi_kind_context \ + else single_kind_context + segment = SegmentBuilder('key').version(1) \ .unbounded(True) \ + .unbounded_context_kind('kind1' if non_default_kind else None) \ .generation(2) \ .build() - evaluator = EvaluatorBuilder().with_segment(segment).with_big_segment_for_user(basic_user, segment, True).build() flag = make_boolean_flag_matching_segment(segment) - result = evaluator.evaluate(flag, basic_user, event_factory) + evaluator = EvaluatorBuilder().with_segment(segment).with_big_segment_for_key(target_key, segment, True).build() + + result = evaluator.evaluate(flag, eval_context, event_factory) assert result.detail.value == True assert result.detail.reason['bigSegmentsStatus'] == BigSegmentsStatus.HEALTHY From ed7655184ec9ac3a53e370dc62b62144a3f2f0ea Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 20 Dec 2022 19:44:25 -0800 Subject: [PATCH 327/356] formatting fixes + test cleanup --- ldclient/impl/evaluator.py | 10 +++++----- testing/impl/evaluator_util.py | 11 ++--------- testing/impl/test_evaluator_big_segment.py | 6 +++--- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 9e0fa3ba..78492184 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -270,7 +270,7 @@ def _big_segment_match_context(self, segment: Segment, context: Context, state: return False # A big segment can only apply to one context kind, so if we don't have a key for that kind, - # we don't need to bother querying the data. + # we don't need to bother querying the data. match_context = context.get_individual_context(segment.unbounded_context_kind or Context.DEFAULT_KIND) if match_context is None: return False @@ -290,10 +290,10 @@ def _big_segment_match_context(self, segment: Segment, context: Context, state: return False result = self.__get_big_segments_membership(key) # Note that this query is just by key; the context kind doesn't matter because any given - # Big Segment can only reference one context kind. So if segment A for the "user" kind - # includes a "user" context with key X, and segment B for the "org" kind includes an "org" - # context with the same key X, it is fine to say that the membership for key X is - # segment A and segment B-- there is no ambiguity. + # Big Segment can only reference one context kind. So if segment A for the "user" kind + # includes a "user" context with key X, and segment B for the "org" kind includes an "org" + # context with the same key X, it is fine to say that the membership for key X is + # segment A and segment B-- there is no ambiguity. membership, state.big_segments_status = result if state.big_segments_membership is None: state.big_segments_membership = {} diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index f20caacd..a6bbad3d 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -46,15 +46,8 @@ def with_big_segment_for_key(self, key: str, segment: Segment, included: bool) - self.__big_segments[key][_make_big_segment_ref(segment)] = included return self - def with_big_segment_for_user(self, user: dict, segment: Segment, included: bool) -> 'EvaluatorBuilder': - user_key = user['key'] - if user_key not in self.__big_segments: - self.__big_segments[user_key] = {} - self.__big_segments[user_key][_make_big_segment_ref(segment)] = included - return self - - def with_no_big_segments_for_user(self, user: dict) -> 'EvaluatorBuilder': - self.__big_segments[user['key']] = {} + def with_no_big_segments_for_key(self, key: str) -> 'EvaluatorBuilder': + self.__big_segments[key] = {} return self def with_big_segments_status(self, status: str) -> 'EvaluatorBuilder': diff --git a/testing/impl/test_evaluator_big_segment.py b/testing/impl/test_evaluator_big_segment.py index 71f5d06e..959728d6 100644 --- a/testing/impl/test_evaluator_big_segment.py +++ b/testing/impl/test_evaluator_big_segment.py @@ -51,7 +51,7 @@ def test_big_segment_matched_with_rule(): make_segment_rule_matching_context(basic_user) ) \ .build() - evaluator = EvaluatorBuilder().with_segment(segment).with_no_big_segments_for_user(basic_user).build() + evaluator = EvaluatorBuilder().with_segment(segment).with_no_big_segments_for_key(basic_user.key).build() flag = make_boolean_flag_matching_segment(segment) result = evaluator.evaluate(flag, basic_user, event_factory) assert result.detail.value == True @@ -65,7 +65,7 @@ def test_big_segment_unmatched_by_exclude_regardless_of_rule(): make_segment_rule_matching_context(basic_user) ) \ .build() - evaluator = EvaluatorBuilder().with_segment(segment).with_big_segment_for_user(basic_user, segment, False).build() + evaluator = EvaluatorBuilder().with_segment(segment).with_big_segment_for_key(basic_user.key, segment, False).build() flag = make_boolean_flag_matching_segment(segment) result = evaluator.evaluate(flag, basic_user, event_factory) assert result.detail.value == False @@ -76,7 +76,7 @@ def test_big_segment_status_is_returned_by_provider(): .unbounded(True) \ .generation(1) \ .build() - evaluator = EvaluatorBuilder().with_segment(segment).with_no_big_segments_for_user(basic_user). \ + evaluator = EvaluatorBuilder().with_segment(segment).with_no_big_segments_for_key(basic_user.key). \ with_big_segments_status(BigSegmentsStatus.NOT_CONFIGURED).build() flag = make_boolean_flag_matching_segment(segment) result = evaluator.evaluate(flag, basic_user, event_factory) From 4a53780b64d126a36527adde531853f9ab1dfb86 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 20 Dec 2022 20:05:33 -0800 Subject: [PATCH 328/356] prerequisite cycle detection --- Makefile | 1 - ldclient/impl/evaluator.py | 63 ++++++++++++----- testing/impl/test_evaluator.py | 45 ------------ testing/impl/test_evaluator_prerequisites.py | 72 ++++++++++++++++++++ 4 files changed, 118 insertions(+), 63 deletions(-) create mode 100644 testing/impl/test_evaluator_prerequisites.py diff --git a/Makefile b/Makefile index cb0d12f2..9c511c49 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,6 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # - "evaluation/parameterized/segment recursion": Segment recursion is not yet implemented. # - "events": These test suites will be unavailable until more of the U2C implementation is done. TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ - -skip 'evaluation/parameterized/prerequisites' \ -skip 'evaluation/parameterized/segment recursion' \ -skip 'events' diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 78492184..fc1a6d1f 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -48,13 +48,18 @@ def _context_to_user_dict(context: Context) -> dict: # prerequisite evaluations, and the cached state of any Big Segments query that we may have # ended up having to do for the context. class EvalResult: + __slots__ = ['detail', 'events', 'big_segments_status', 'big_segments_membership', + 'original_flag_key', 'prereq_stack'] + def __init__(self): self.detail = None - self.events = None + self.events = None # type: Optional[List[dict]] self.big_segments_status = None # type: Optional[str] self.big_segments_membership = None # type: Optional[Dict[str, Optional[dict]]] + self.original_flag_key = None # type: Optional[str] + self.prereq_stack = None # type: Optional[List[str]] - def add_event(self, event): + def add_event(self, event: dict): if self.events is None: self.events = [] self.events.append(event) @@ -138,22 +143,46 @@ def _evaluate(self, flag: FeatureFlag, context: Context, state: EvalResult, even def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalResult, event_factory: _EventFactory) -> Optional[dict]: failed_prereq = None prereq_res = None - for prereq in flag.prerequisites: - prereq_flag = self.__get_flag(prereq.key) - if prereq_flag is None: - log.warning("Missing prereq flag: " + prereq.key) - failed_prereq = prereq - else: - prereq_res = self._evaluate(prereq_flag, context, state, event_factory) - # Note that if the prerequisite flag is off, we don't consider it a match no matter what its - # off variation was. But we still need to evaluate it in order to generate an event. - if (not prereq_flag.on) or prereq_res.variation_index != prereq.variation: + if flag.prerequisites.count == 0: + return None + + try: + # We use the state object to guard against circular references in prerequisites. To avoid + # the overhead of creating the state.prereq_stack list in the most common case where + # there's only a single level prerequisites, we treat state.original_flag_key as the first + # element in the stack. + flag_key = flag.key + if flag_key != state.original_flag_key: + if state.prereq_stack is None: + state.prereq_stack = [] + state.prereq_stack.append(flag_key) + + for prereq in flag.prerequisites: + prereq_key = prereq.key + if (prereq_key == state.original_flag_key or + (flag_key != state.original_flag_key and prereq_key == flag_key) or + (state.prereq_stack is not None and prereq.key in state.prereq_stack)): + raise EvaluationException(('prerequisite relationship to "%s" caused a circular reference;' + + ' this is probably a temporary condition due to an incomplete update') % prereq_key) + + prereq_flag = self.__get_flag(prereq_key) + if prereq_flag is None: + log.warning("Missing prereq flag: " + prereq_key) failed_prereq = prereq - event = event_factory.new_eval_event(prereq_flag, _context_to_user_dict(context), prereq_res, None, flag) - state.add_event(event) - if failed_prereq: - return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key} - return None + else: + prereq_res = self._evaluate(prereq_flag, context, state, event_factory) + # Note that if the prerequisite flag is off, we don't consider it a match no matter what its + # off variation was. But we still need to evaluate it in order to generate an event. + if (not prereq_flag.on) or prereq_res.variation_index != prereq.variation: + failed_prereq = prereq + event = event_factory.new_eval_event(prereq_flag, _context_to_user_dict(context), prereq_res, None, flag) + state.add_event(event) + if failed_prereq: + return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key} + return None + finally: + if state.prereq_stack is not None and state.prereq_stack.count != 0: + state.prereq_stack.pop() def _check_targets(self, flag: FeatureFlag, context: Context) -> Optional[EvaluationDetail]: user_targets = flag.targets diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index 17b3827a..bfd39b6c 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -29,51 +29,6 @@ def test_flag_returns_error_if_off_variation_is_negative(): detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) assert_eval_result(basic_evaluator.evaluate(flag, user, event_factory), detail, None) -def test_flag_returns_off_variation_if_prerequisite_not_found(): - flag = FlagBuilder('feature').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ - .prerequisite('badfeature', 1).build() - evaluator = EvaluatorBuilder().with_unknown_flag('badfeature').build() - user = Context.create('x') - detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'}) - assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, None) - -def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): - flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ - .prerequisite('feature1', 1).build() - flag1 = FlagBuilder('feature1').version(2).on(False).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ - .build() - # note that even though flag1 returns the desired variation, it is still off and therefore not a match - evaluator = EvaluatorBuilder().with_flag(flag1).build() - user = Context.create('x') - detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, - 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] - assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) - -def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): - flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ - .prerequisite('feature1', 1).build() - flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(0) \ - .build() - evaluator = EvaluatorBuilder().with_flag(flag1).build() - user = Context.create('x') - detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None, - 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] - assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) - -def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules(): - flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(0) \ - .prerequisite('feature1', 1).build() - flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ - .build() - evaluator = EvaluatorBuilder().with_flag(flag1).build() - user = Context.create('x') - detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, - 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] - assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) - def test_flag_returns_error_if_fallthrough_variation_is_too_high(): flag = FlagBuilder('feature').on(True).variations('a', 'b', 'c').fallthrough_variation(999).build() user = Context.create('x') diff --git a/testing/impl/test_evaluator_prerequisites.py b/testing/impl/test_evaluator_prerequisites.py new file mode 100644 index 00000000..1cb99f70 --- /dev/null +++ b/testing/impl/test_evaluator_prerequisites.py @@ -0,0 +1,72 @@ +import pytest + +from testing.builders import * + +from ldclient.client import Context +from ldclient.evaluation import EvaluationDetail +from ldclient.impl.evaluator import _context_to_user_dict +from testing.builders import * +from testing.impl.evaluator_util import * + + +def test_flag_returns_off_variation_if_prerequisite_not_found(): + flag = FlagBuilder('feature').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ + .prerequisite('badfeature', 1).build() + evaluator = EvaluatorBuilder().with_unknown_flag('badfeature').build() + user = Context.create('x') + detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'badfeature'}) + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, None) + +def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): + flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ + .prerequisite('feature1', 1).build() + flag1 = FlagBuilder('feature1').version(2).on(False).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ + .build() + # note that even though flag1 returns the desired variation, it is still off and therefore not a match + evaluator = EvaluatorBuilder().with_flag(flag1).build() + user = Context.create('x') + detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) + events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, + 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) + +def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): + flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(1) \ + .prerequisite('feature1', 1).build() + flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(0) \ + .build() + evaluator = EvaluatorBuilder().with_flag(flag1).build() + user = Context.create('x') + detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) + events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None, + 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) + +def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules(): + flag = FlagBuilder('feature0').on(True).off_variation(1).variations('a', 'b', 'c').fallthrough_variation(0) \ + .prerequisite('feature1', 1).build() + flag1 = FlagBuilder('feature1').version(2).on(True).off_variation(1).variations('d', 'e').fallthrough_variation(1) \ + .build() + evaluator = EvaluatorBuilder().with_flag(flag1).build() + user = Context.create('x') + detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) + events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, + 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] + assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) + +@pytest.mark.parametrize("depth", [1, 2, 3, 4]) +def test_prerequisite_cycle_detection(depth: int): + flag_keys = list("flagkey%d" % i for i in range(depth)) + flags = [] + for i in range(depth): + flags.append( + FlagBuilder(flag_keys[i]).on(True).variations(False, True).off_variation(0) \ + .prerequisite(flag_keys[(i + 1) % depth], 0) \ + .build()) + evaluator_builder = EvaluatorBuilder() + for f in flags: + evaluator_builder.with_flag(f) + evaluator = evaluator_builder.build() + context = Context.create('x') + detail = EvaluationDetail(None, None, {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'}) + assert_eval_result(evaluator.evaluate(flags[0], context, event_factory), detail, None) From 2f2363ce86dc9ff4adf8734f40460f4a7d9c9ba1 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Tue, 20 Dec 2022 20:15:54 -0800 Subject: [PATCH 329/356] segment recursion --- Makefile | 3 --- ldclient/impl/evaluator.py | 22 ++++++++++++++++++---- testing/impl/test_evaluator_segment.py | 24 ++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 9c511c49..ce9186b3 100644 --- a/Makefile +++ b/Makefile @@ -20,11 +20,8 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # TEST_HARNESS_PARAMS can be set to add -skip parameters for any contract tests that cannot yet pass # Explanation of current skips: -# - "evaluation/parameterized/prerequisites": Can't pass yet because prerequisite cycle detection is not implemented. -# - "evaluation/parameterized/segment recursion": Segment recursion is not yet implemented. # - "events": These test suites will be unavailable until more of the U2C implementation is done. TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ - -skip 'evaluation/parameterized/segment recursion' \ -skip 'events' # port 8000 and 9000 is already used in the CI environment because we're diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index fc1a6d1f..64833837 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -49,7 +49,7 @@ def _context_to_user_dict(context: Context) -> dict: # ended up having to do for the context. class EvalResult: __slots__ = ['detail', 'events', 'big_segments_status', 'big_segments_membership', - 'original_flag_key', 'prereq_stack'] + 'original_flag_key', 'prereq_stack', 'segment_stack'] def __init__(self): self.detail = None @@ -58,6 +58,7 @@ def __init__(self): self.big_segments_membership = None # type: Optional[Dict[str, Optional[dict]]] self.original_flag_key = None # type: Optional[str] self.prereq_stack = None # type: Optional[List[str]] + self.segment_stack = None # type: Optional[List[str]] def add_event(self, event: dict): if self.events is None: @@ -253,6 +254,9 @@ def _clause_matches_context(self, clause: Clause, context: Context, state: EvalR return _maybe_negate(clause, _match_single_context_value(op, context_value, clause_values)) def _segment_matches_context(self, segment: Segment, context: Context, state: EvalResult) -> bool: + if state.segment_stack is not None and segment.key in state.segment_stack: + raise EvaluationException(('segment rule referencing segment "%s" caused a circular reference;' + + ' this is probably a temporary condition due to an incomplete update') % segment.key) if segment.unbounded: return self._big_segment_match_context(segment, context, state) return self._simple_segment_match_context(segment, context, state, True) @@ -269,9 +273,19 @@ def _simple_segment_match_context(self, segment: Segment, context: Context, stat for t in segment.excluded_contexts: if _context_key_is_in_target_list(context, t.context_kind, t.values): return False - for rule in segment.rules: - if self._segment_rule_matches_context(rule, context, state, segment.key, segment.salt): - return True + if segment.rules.count != 0: + # Evaluating rules means we might be doing recursive segment matches, so we'll push the current + # segment key onto the stack for cycle detection. + if state.segment_stack is None: + state.segment_stack = [] + state.segment_stack.append(segment.key) + try: + for rule in segment.rules: + if self._segment_rule_matches_context(rule, context, state, segment.key, segment.salt): + return True + return False + finally: + state.segment_stack.pop() return False def _segment_rule_matches_context(self, rule: SegmentRule, context: Context, state: EvalResult, segment_key: str, salt: str) -> bool: diff --git a/testing/impl/test_evaluator_segment.py b/testing/impl/test_evaluator_segment.py index 3cd12390..2dddc34d 100644 --- a/testing/impl/test_evaluator_segment.py +++ b/testing/impl/test_evaluator_segment.py @@ -158,3 +158,27 @@ def test_non_matching_rule_with_multiple_clauses(): ) \ .build() assert _segment_matches_context(segment, context) is False + +@pytest.mark.parametrize("depth", [1, 2, 3, 4]) +def test_segment_cycle_detection(depth: int): + segment_keys = list("segmentkey%d" % i for i in range(depth)) + segments = [] + for i in range(depth): + segments.append( + SegmentBuilder(segment_keys[i]) \ + .rules( + SegmentRuleBuilder().clauses( + make_clause_matching_segment_key(segment_keys[(i + 1) % depth]) + ) + .build() + ) + .build()) + evaluator_builder = EvaluatorBuilder() + for s in segments: + evaluator_builder.with_segment(s) + evaluator = evaluator_builder.build() + flag = make_boolean_flag_matching_segment(segments[0]) + context = Context.create('x') + result = evaluator.evaluate(flag, context, event_factory) + assert result.detail.value is None + assert result.detail.reason == {'kind': 'ERROR', 'errorKind': 'MALFORMED_FLAG'} From 8e0108d1ce100b0a4d5eb7d55b8acf9e56f0fb7e Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 21 Dec 2022 10:33:52 -0800 Subject: [PATCH 330/356] define custom classes for event data --- ldclient/client.py | 10 +- ldclient/impl/evaluator.py | 14 +- ldclient/impl/event_factory.py | 113 ------ ldclient/{ => impl/events}/event_processor.py | 205 +++++----- .../{ => impl/events}/event_summarizer.py | 29 +- ldclient/impl/events/types.py | 173 +++++++++ ldclient/impl/util.py | 5 + ldclient/util.py | 5 +- testing/builders.py | 3 + testing/impl/evaluator_util.py | 6 +- testing/impl/events/__init__.py | 0 .../{ => impl/events}/test_event_factory.py | 20 +- .../{ => impl/events}/test_event_processor.py | 350 ++++++----------- testing/impl/events/test_event_summarizer.py | 47 +++ testing/impl/test_evaluator.py | 1 - testing/impl/test_evaluator_prerequisites.py | 18 +- testing/test_event_summarizer.py | 63 ---- testing/test_ldclient.py | 352 +----------------- testing/test_ldclient_evaluation.py | 26 +- testing/test_ldclient_events.py | 296 +++++++++++++++ 20 files changed, 813 insertions(+), 923 deletions(-) delete mode 100644 ldclient/impl/event_factory.py rename ldclient/{ => impl/events}/event_processor.py (78%) rename ldclient/{ => impl/events}/event_summarizer.py (52%) create mode 100644 ldclient/impl/events/types.py create mode 100644 ldclient/impl/util.py create mode 100644 testing/impl/events/__init__.py rename testing/{ => impl/events}/test_event_factory.py (85%) rename testing/{ => impl/events}/test_event_processor.py (58%) create mode 100644 testing/impl/events/test_event_summarizer.py delete mode 100644 testing/test_event_summarizer.py create mode 100644 testing/test_ldclient_events.py diff --git a/ldclient/client.py b/ldclient/client.py index 812f70f9..103021c5 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -14,13 +14,13 @@ from ldclient.config import Config from ldclient.context import Context from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator -from ldclient.event_processor import DefaultEventProcessor from ldclient.feature_requester import FeatureRequesterImpl from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.evaluation import EvaluationDetail, FeatureFlagsState from ldclient.impl.big_segments import BigSegmentStoreManager from ldclient.impl.evaluator import Evaluator, error_reason, _context_to_user_dict -from ldclient.impl.event_factory import _EventFactory +from ldclient.impl.events.event_processor import DefaultEventProcessor +from ldclient.impl.events.types import EventFactory from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor from ldclient.interfaces import BigSegmentStoreStatusProvider, FeatureRequester, FeatureStore from ldclient.polling import PollingUpdateProcessor @@ -94,8 +94,8 @@ def __init__(self, config: Config, start_wait: float=5): self._event_processor = None self._lock = Lock() - self._event_factory_default = _EventFactory(False) - self._event_factory_with_reasons = _EventFactory(True) + self._event_factory_default = EventFactory(False) + self._event_factory_with_reasons = EventFactory(True) store = _FeatureStoreClientWrapper(self._config.feature_store) self._store = store # type: FeatureStore @@ -411,7 +411,7 @@ def all_flags_state(self, context: Union[Context, dict], **kwargs) -> FeatureFla reason = {'kind': 'ERROR', 'errorKind': 'EXCEPTION'} detail = EvaluationDetail(None, None, reason) - requires_experiment_data = _EventFactory.is_experiment(flag, detail.reason) + requires_experiment_data = EventFactory.is_experiment(flag, detail.reason) flag_state = { 'key': flag['key'], 'value': detail.value, diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 64833837..e21e9d2e 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -1,9 +1,8 @@ from ldclient import operators from ldclient.context import Context, _USER_STRING_ATTRS from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail -from ldclient.impl.event_factory import _EventFactory +from ldclient.impl.events.types import EventFactory, EventInputEvaluation from ldclient.impl.model import * -from ldclient.interfaces import BigSegmentStoreStatus import hashlib import logging @@ -53,7 +52,7 @@ class EvalResult: def __init__(self): self.detail = None - self.events = None # type: Optional[List[dict]] + self.events = None # type: Optional[List[EventInputEvaluation]] self.big_segments_status = None # type: Optional[str] self.big_segments_membership = None # type: Optional[Dict[str, Optional[dict]]] self.original_flag_key = None # type: Optional[str] @@ -65,6 +64,9 @@ def add_event(self, event: dict): self.events = [] self.events.append(event) + def __repr__(self) -> str: # used only in test debugging + return "EvalResult(detail=%s, events=%s)" % (self.detail, self.events) + class EvaluationException(Exception): def __init__(self, message: str, error_kind: str = 'MALFORMED_FLAG'): @@ -106,7 +108,7 @@ def __init__( self.__get_big_segments_membership = get_big_segments_membership self.__logger = logger - def evaluate(self, flag: FeatureFlag, context: Context, event_factory: _EventFactory) -> EvalResult: + def evaluate(self, flag: FeatureFlag, context: Context, event_factory: EventFactory) -> EvalResult: state = EvalResult() try: state.detail = self._evaluate(flag, context, state, event_factory) @@ -119,7 +121,7 @@ def evaluate(self, flag: FeatureFlag, context: Context, event_factory: _EventFac state.detail.reason['bigSegmentsStatus'] = state.big_segments_status return state - def _evaluate(self, flag: FeatureFlag, context: Context, state: EvalResult, event_factory: _EventFactory) -> EvaluationDetail: + def _evaluate(self, flag: FeatureFlag, context: Context, state: EvalResult, event_factory: EventFactory) -> EvaluationDetail: if not flag.on: return _get_off_value(flag, {'kind': 'OFF'}) @@ -141,7 +143,7 @@ def _evaluate(self, flag: FeatureFlag, context: Context, state: EvalResult, even # Walk through fallthrough and see if it matches return _get_value_for_variation_or_rollout(flag, flag.fallthrough, context, {'kind': 'FALLTHROUGH'}) - def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalResult, event_factory: _EventFactory) -> Optional[dict]: + def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalResult, event_factory: EventFactory) -> Optional[dict]: failed_prereq = None prereq_res = None if flag.prerequisites.count == 0: diff --git a/ldclient/impl/event_factory.py b/ldclient/impl/event_factory.py deleted file mode 100644 index d32291ea..00000000 --- a/ldclient/impl/event_factory.py +++ /dev/null @@ -1,113 +0,0 @@ -from ldclient.impl.model import * - -from typing import Optional - -# Event constructors are centralized here to avoid mistakes and repetitive logic. -# The LDClient owns two instances of _EventFactory: one that always embeds evaluation reasons -# in the events (for when variation_detail is called) and one that doesn't. -# -# Note that none of these methods fill in the "creationDate" property, because in the Python -# client, that is done by DefaultEventProcessor.send_event(). - -class _EventFactory: - def __init__(self, with_reasons): - self._with_reasons = with_reasons - - def new_eval_event(self, flag: FeatureFlag, user, detail, default_value, prereq_of_flag: Optional[FeatureFlag] = None) -> dict: - add_experiment_data = self.is_experiment(flag, detail.reason) - e = { - 'kind': 'feature', - 'key': flag.key, - 'user': user, - 'value': detail.value, - 'variation': detail.variation_index, - 'default': default_value, - 'version': flag.version - } - # the following properties are handled separately so we don't waste bandwidth on unused keys - if add_experiment_data or flag.track_events: - e['trackEvents'] = True - if flag.debug_events_until_date: - e['debugEventsUntilDate'] = flag.debug_events_until_date - if prereq_of_flag is not None: - e['prereqOf'] = prereq_of_flag.key - if add_experiment_data or self._with_reasons: - e['reason'] = detail.reason - if user is not None and user.get('anonymous'): - e['contextKind'] = self._user_to_context_kind(user) - return e - - def new_default_event(self, flag: FeatureFlag, user, default_value, reason) -> dict: - e = { - 'kind': 'feature', - 'key': flag.key, - 'user': user, - 'value': default_value, - 'default': default_value, - 'version': flag.version - } - # the following properties are handled separately so we don't waste bandwidth on unused keys - if flag.track_events: - e['trackEvents'] = True - if flag.debug_events_until_date: - e['debugEventsUntilDate'] = flag.debug_events_until_date - if self._with_reasons: - e['reason'] = reason - if user is not None and user.get('anonymous'): - e['contextKind'] = self._user_to_context_kind(user) - return e - - def new_unknown_flag_event(self, key, user, default_value, reason): - e = { - 'kind': 'feature', - 'key': key, - 'user': user, - 'value': default_value, - 'default': default_value - } - if self._with_reasons: - e['reason'] = reason - if user is not None and user.get('anonymous'): - e['contextKind'] = self._user_to_context_kind(user) - return e - - def new_identify_event(self, user): - return { - 'kind': 'identify', - 'key': str(user.get('key')), - 'user': user - } - - def new_custom_event(self, event_name, user, data, metric_value): - e = { - 'kind': 'custom', - 'key': event_name, - 'user': user - } - if data is not None: - e['data'] = data - if metric_value is not None: - e['metricValue'] = metric_value - if user.get('anonymous'): - e['contextKind'] = self._user_to_context_kind(user) - return e - - def _user_to_context_kind(self, user): - if user.get('anonymous'): - return "anonymousUser" - else: - return "user" - - @staticmethod - def is_experiment(flag: FeatureFlag, reason): - if reason is not None: - if reason.get('inExperiment'): - return True - kind = reason['kind'] - if kind == 'RULE_MATCH': - index = reason['ruleIndex'] - rules = flag.rules - return index >= 0 and index < len(rules) and rules[index].track_events - elif kind == 'FALLTHROUGH': - return flag.track_events_fallthrough - return False diff --git a/ldclient/event_processor.py b/ldclient/impl/events/event_processor.py similarity index 78% rename from ldclient/event_processor.py rename to ldclient/impl/events/event_processor.py index 1f90f805..01a121ce 100644 --- a/ldclient/event_processor.py +++ b/ldclient/impl/events/event_processor.py @@ -1,29 +1,29 @@ """ Implementation details of the analytics event delivery component. """ -# currently excluded from documentation - see docs/README.md from calendar import timegm from collections import namedtuple from email.utils import parsedate -import errno import json from threading import Event, Lock, Thread +from typing import Any, List import time import uuid import queue import urllib3 -from ldclient.event_summarizer import EventSummarizer +from ldclient.diagnostics import create_diagnostic_init +from ldclient.impl.events.event_summarizer import EventSummarizer, EventSummary from ldclient.fixed_thread_pool import FixedThreadPool +from ldclient.impl.events.types import EventInput, EventInputCustom, EventInputEvaluation, EventInputIdentify from ldclient.impl.http import _http_factory from ldclient.impl.repeating_task import RepeatingTask +from ldclient.impl.util import current_time_millis from ldclient.lru_cache import SimpleLRUCache from ldclient.user_filter import UserFilter from ldclient.interfaces import EventProcessor -from ldclient.util import log -from ldclient.util import check_if_error_is_recoverable_and_log, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response, _headers -from ldclient.diagnostics import create_diagnostic_init +from ldclient.util import check_if_error_is_recoverable_and_log, is_http_error_recoverable, log, _headers __MAX_FLUSH_THREADS__ = 5 __CURRENT_EVENT_SCHEMA__ = 3 @@ -33,75 +33,71 @@ EventProcessorMessage = namedtuple('EventProcessorMessage', ['type', 'param']) +class DebugEvent: + __slots__ = ['original_input'] + + def __init__(self, original_input: EventInputEvaluation): + self.original_input = original_input + +class IndexEvent: + __slots__ = ['timestamp', 'user'] + + def __init__(self, timestamp: int, user: dict): + self.timestamp = timestamp + self.user = user + + class EventOutputFormatter: def __init__(self, config): self._user_filter = UserFilter(config) - def make_output_events(self, events, summary): + def make_output_events(self, events: List[Any], summary: EventSummary): events_out = [ self.make_output_event(e) for e in events ] if len(summary.counters) > 0: events_out.append(self.make_summary_event(summary)) return events_out - def make_output_event(self, e): - kind = e['kind'] - if kind == 'feature': - is_debug = e.get('debug') - out = { - 'kind': 'debug' if is_debug else 'feature', - 'creationDate': e['creationDate'], - 'key': e['key'], - 'version': e.get('version'), - 'variation': e.get('variation'), - 'value': e.get('value'), - 'default': e.get('default') - } - if 'prereqOf' in e: - out['prereqOf'] = e.get('prereqOf') - if is_debug: - out['user'] = self._process_user(e) - else: - out['userKey'] = self._get_userkey(e) - if e.get('reason'): - out['reason'] = e.get('reason') - if e.get('contextKind'): - out['contextKind'] = e.get('contextKind') + def make_output_event(self, e: Any): + if isinstance(e, EventInputEvaluation): + out = self._base_eval_props(e, 'feature') + out['userKey'] = e.user['key'] + return out + elif isinstance(e, DebugEvent): + out = self._base_eval_props(e.original_input, 'debug') + out['user'] = self._process_user(e.original_input.user) return out - elif kind == 'identify': + elif isinstance(e, EventInputIdentify): return { 'kind': 'identify', - 'creationDate': e['creationDate'], - 'key': self._get_userkey(e), - 'user': self._process_user(e) + 'creationDate': e.timestamp, + 'key': e.user['key'], + 'user': self._process_user(e.user) } - elif kind == 'custom': + elif isinstance(e, IndexEvent): + return { + 'kind': 'index', + 'creationDate': e.timestamp, + 'user': self._process_user(e.user) + } + elif isinstance(e, EventInputCustom): out = { 'kind': 'custom', - 'creationDate': e['creationDate'], - 'key': e['key'] + 'creationDate': e.timestamp, + 'key': e.key, + 'userKey': e.user['key'] } - out['userKey'] = self._get_userkey(e) - if e.get('data') is not None: - out['data'] = e['data'] - if e.get('metricValue') is not None: - out['metricValue'] = e['metricValue'] - if e.get('contextKind'): - out['contextKind'] = e.get('contextKind') + if e.data is not None: + out['data'] = e.data + if e.metric_value is not None: + out['metricValue'] = e.metric_value return out - elif kind == 'index': - return { - 'kind': 'index', - 'creationDate': e['creationDate'], - 'user': self._process_user(e) - } - else: - return e + return None """ Transform summarizer data into the format used for the event payload. """ - def make_summary_event(self, summary): - flags_out = dict() + def make_summary_event(self, summary: EventSummary): + flags_out = dict() # type: dict[str, Any] for ckey, cval in summary.counters.items(): flag_key, variation, version = ckey flag_data = flags_out.get(flag_key) @@ -126,12 +122,26 @@ def make_summary_event(self, summary): 'features': flags_out } - def _process_user(self, event): - filtered = self._user_filter.filter_user_props(event['user']) - return stringify_attrs(filtered, __USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__) + def _process_user(self, user: dict): + return self._user_filter.filter_user_props(user) - def _get_userkey(self, event): - return str(event['user'].get('key')) + def _base_eval_props(self, e: EventInputEvaluation, kind: str) -> dict: + out = { + 'kind': kind, + 'creationDate': e.timestamp, + 'key': e.key, + 'value': e.value, + 'default': e.default_value + } + if e.flag is not None: + out['version'] = e.flag.version + if e.variation is not None: + out['variation'] = e.variation + if e.reason is not None: + out['reason'] = e.reason + if e.prereq_of is not None: + out['prereqOf'] = e.prereq_of + return out class EventPayloadSendTask: @@ -208,7 +218,7 @@ def __init__(self, capacity): self._exceeded_capacity = False self._dropped_events = 0 - def add_event(self, event): + def add_event(self, event: Any): if len(self._events) >= self._capacity: self._dropped_events += 1 if not self._exceeded_capacity: @@ -218,7 +228,7 @@ def add_event(self, event): self._events.append(event) self._exceeded_capacity = False - def add_to_summary(self, event): + def add_to_summary(self, event: EventInputEvaluation): self._summarizer.summarize_event(event) def get_and_clear_dropped_count(self): @@ -283,60 +293,54 @@ def _run_main_loop(self): self._do_shutdown() message.param.set() return - except Exception: + except Exception as e: log.error('Unhandled exception in event processor', exc_info=True) - def _process_event(self, event): + def _process_event(self, event: EventInput): if self._disabled: return - # Always record the event in the summarizer. - self._outbox.add_to_summary(event) - # Decide whether to add the event to the payload. Feature events may be added twice, once for # the event (if tracked) and once for debugging. - add_full_event = False - add_debug_event = False - add_index_event = False - if event['kind'] == "feature": - add_full_event = event.get('trackEvents') - add_debug_event = self._should_debug_event(event) - else: - add_full_event = True + user = event.user # type: dict + can_add_index = True + full_event = None + debug_event = None + + if isinstance(event, EventInputEvaluation): + self._outbox.add_to_summary(event) + if event.track_events: + full_event = event + if self._should_debug_event(event): + debug_event = DebugEvent(event) + elif isinstance(event, EventInputIdentify): + full_event = event + can_add_index = False # an index event would be redundant if there's an identify event + elif isinstance(event, EventInputCustom): + full_event = event # For each user we haven't seen before, we add an index event - unless this is already - # an identify event for that user. - user = event.get('user') - if user and 'key' in user: - is_identify_event = event['kind'] == 'identify' - already_seen = self.notice_user(user) - add_index_event = not is_identify_event and not already_seen - if not is_identify_event and already_seen: + # an identify event. + already_seen = self._user_keys.put(user['key'], True) + if can_add_index: + if already_seen: self._deduplicated_users += 1 + else: + self._outbox.add_event(IndexEvent(event.timestamp, user)) - if add_index_event: - ie = { 'kind': 'index', 'creationDate': event['creationDate'], 'user': user } - self._outbox.add_event(ie) - if add_full_event: - self._outbox.add_event(event) - if add_debug_event: - debug_event = event.copy() - debug_event['debug'] = True + if full_event: + self._outbox.add_event(full_event) + + if debug_event: self._outbox.add_event(debug_event) - # Add to the set of users we've noticed, and return true if the user was already known to us. - def notice_user(self, user): - if user is None or 'key' not in user: + def _should_debug_event(self, event: EventInputEvaluation): + if event.flag is None: return False - key = user['key'] - return self._user_keys.put(key, True) - - def _should_debug_event(self, event): - debug_until = event.get('debugEventsUntilDate') + debug_until = event.flag.debug_events_until_date if debug_until is not None: last_past = self._last_known_past_time - now = int(time.time() * 1000) - if debug_until > last_past and debug_until > now: + if debug_until > last_past and debug_until > current_time_millis(): return True return False @@ -402,8 +406,7 @@ def __init__(self, config, http=None, dispatcher_class=None, diagnostic_accumula (dispatcher_class or EventDispatcher)(self._inbox, config, http, diagnostic_accumulator) - def send_event(self, event): - event['creationDate'] = int(time.time() * 1000) + def send_event(self, event: EventInput): self._post_to_inbox(EventProcessorMessage('event', event)) def flush(self): diff --git a/ldclient/event_summarizer.py b/ldclient/impl/events/event_summarizer.py similarity index 52% rename from ldclient/event_summarizer.py rename to ldclient/impl/events/event_summarizer.py index c0b10eef..c144a4d3 100644 --- a/ldclient/event_summarizer.py +++ b/ldclient/impl/events/event_summarizer.py @@ -5,6 +5,8 @@ from collections import namedtuple +from ldclient.impl.events.types import EventInputEvaluation + EventSummary = namedtuple('EventSummary', ['start_date', 'end_date', 'counters']) @@ -18,20 +20,19 @@ def __init__(self): """ Add this event to our counters, if it is a type of event we need to count. """ - def summarize_event(self, event): - if event['kind'] == 'feature': - counter_key = (event['key'], event.get('variation'), event.get('version')) - counter_val = self.counters.get(counter_key) - if counter_val is None: - counter_val = { 'count': 1, 'value': event['value'], 'default': event.get('default') } - self.counters[counter_key] = counter_val - else: - counter_val['count'] = counter_val['count'] + 1 - date = event['creationDate'] - if self.start_date == 0 or date < self.start_date: - self.start_date = date - if date > self.end_date: - self.end_date = date + def summarize_event(self, event: EventInputEvaluation): + counter_key = (event.key, event.variation, None if event.flag is None else event.flag.version) + counter_val = self.counters.get(counter_key) + if counter_val is None: + counter_val = { 'count': 1, 'value': event.value, 'default': event.default_value } + self.counters[counter_key] = counter_val + else: + counter_val['count'] = counter_val['count'] + 1 + date = event.timestamp + if self.start_date == 0 or date < self.start_date: + self.start_date = date + if date > self.end_date: + self.end_date = date """ Return the current summarized event data. diff --git a/ldclient/impl/events/types.py b/ldclient/impl/events/types.py new file mode 100644 index 00000000..5aee18d6 --- /dev/null +++ b/ldclient/impl/events/types.py @@ -0,0 +1,173 @@ +from ldclient.evaluation import EvaluationDetail +from ldclient.impl.model import * +from ldclient.impl.util import current_time_millis + +from typing import Callable, Optional + +# These event types are not the event data that is sent to LaunchDarkly; they're the input +# parameters that are passed to EventProcessor, which translates them into event data (for +# instance, many evaluations may produce just one summary event). Since the SDK generates +# these at high volume, we want them to be efficient so we use attributes and slots rather +# than dictionaries. + +class EventInput: + def __repr__(self) -> str: # used only in test debugging + return "%s(%s)" % (self.__class__.__name__, json.dumps(self.to_debugging_dict())) + + def __eq__(self, other) -> bool: # used only in tests + return isinstance(other, EventInput) and self.to_debugging_dict() == other.to_debugging_dict() + + def to_debugging_dict(self) -> dict: + pass + +class EventInputEvaluation(EventInput): + __slots__ = ['timestamp', 'context', 'key', 'variation', 'value', 'reason', 'default_value', + 'prereq_of', 'track_events'] + + def __init__(self, timestamp: int, user: dict, key: str, flag: Optional[FeatureFlag], + variation: Optional[int], value: Any, reason: Optional[dict], + default_value: Any, prereq_of: Optional[FeatureFlag] = None, track_events: bool = False): + self.timestamp = timestamp + self.user = user + self.key = key + self.flag = flag + self.variation = variation + self.value = value + self.reason = reason + self.default_value = default_value + self.prereq_of = prereq_of + self.track_events = track_events + + def to_debugging_dict(self) -> dict: + return { + "timestamp": self.timestamp, + "user": self.user, + "key": self.key, + "flag": {"key": self.flag.key} if self.flag else None, + "variation": self.variation, + "value": self.value, + "reason": self.reason, + "default_value": self.default_value, + "prereq_of": {"key": self.prereq_of.key} if self.prereq_of else None, + "track_events": self.track_events + } + +class EventInputIdentify(EventInput): + __slots__ = ['timestamp', 'context'] + + def __init__(self, timestamp: int, user: dict): + self.timestamp = timestamp + self.user = user + + def to_debugging_dict(self) -> dict: + return { + "timestamp": self.timestamp, + "user": self.user + } + +class EventInputCustom(EventInput): + __slots__ = ['timestamp', 'user', 'key', 'data', 'metric_value'] + + def __init__(self, timestamp: int, user: dict, key: str, data: Any = None, metric_value: Optional[float] = None): + self.timestamp = timestamp + self.user = user + self.key = key + self.data = data + self.metric_value = metric_value + + def to_debugging_dict(self) -> dict: + return { + "timestamp": self.timestamp, + "user": self.user, + "key": self.key, + "data": self.data, + "metric_value": self.metric_value + } + +# Event constructors are centralized here to avoid mistakes and repetitive logic. +# The LDClient owns two instances of EventFactory: one that always embeds evaluation reasons +# in the events (for when variation_detail is called) and one that doesn't. +# +# Note that none of these methods fill in the "creationDate" property, because in the Python +# client, that is done by DefaultEventProcessor.send_event(). + +class EventFactory: + def __init__(self, with_reasons: bool, timestamp_fn: Callable[[], int] = current_time_millis): + self._with_reasons = with_reasons + self._timestamp_fn = timestamp_fn + + def new_eval_event(self, flag: FeatureFlag, user: dict, detail: EvaluationDetail, + default_value: Any, prereq_of_flag: Optional[FeatureFlag] = None) -> EventInputEvaluation: + add_experiment_data = self.is_experiment(flag, detail.reason) + return EventInputEvaluation( + self._timestamp_fn(), + user, + flag.key, + flag, + detail.variation_index, + detail.value, + detail.reason if self._with_reasons or add_experiment_data else None, + default_value, + prereq_of_flag, + flag.track_events or add_experiment_data + ) + + def new_default_event(self, flag: FeatureFlag, user: dict, default_value: Any, + reason: Optional[dict]) -> EventInputEvaluation: + return EventInputEvaluation( + self._timestamp_fn(), + user, + flag.key, + flag, + None, + default_value, + reason if self._with_reasons else None, + default_value, + None, + flag.track_events + ) + + def new_unknown_flag_event(self, key: str, user: dict, default_value: Any, + reason: Optional[dict]) -> EventInputEvaluation: + return EventInputEvaluation( + self._timestamp_fn(), + user, + key, + None, + None, + default_value, + reason if self._with_reasons else None, + default_value, + None, + False + ) + + def new_identify_event(self, user: dict) -> EventInputIdentify: + return EventInputIdentify( + self._timestamp_fn(), + user + ) + + def new_custom_event(self, event_name: str, user: dict, data: Any, metric_value: Optional[float]) \ + -> EventInputCustom: + return EventInputCustom( + self._timestamp_fn(), + user, + event_name, + data, + metric_value + ) + + @staticmethod + def is_experiment(flag: FeatureFlag, reason: Optional[dict]) -> bool: + if reason is not None: + if reason.get('inExperiment'): + return True + kind = reason['kind'] + if kind == 'RULE_MATCH': + index = reason['ruleIndex'] + rules = flag.rules + return index >= 0 and index < len(rules) and rules[index].track_events + elif kind == 'FALLTHROUGH': + return flag.track_events_fallthrough + return False diff --git a/ldclient/impl/util.py b/ldclient/impl/util.py new file mode 100644 index 00000000..f6c89db6 --- /dev/null +++ b/ldclient/impl/util.py @@ -0,0 +1,5 @@ +import time + + +def current_time_millis() -> int: + return int(time.time() * 1000) diff --git a/ldclient/util.py b/ldclient/util.py index 66c0c70b..00da8838 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -4,11 +4,10 @@ # currently excluded from documentation - see docs/README.md import logging -from os import environ import sys -import urllib3 +import time -from ldclient.impl.http import HTTPFactory, _base_headers +from ldclient.impl.http import _base_headers log = logging.getLogger(sys.modules[__name__].__name__) diff --git a/testing/builders.py b/testing/builders.py index 5caf4c6f..202627bb 100644 --- a/testing/builders.py +++ b/testing/builders.py @@ -179,6 +179,9 @@ def weight(self, value: Optional[int]) -> SegmentRuleBuilder: return self._set('weight', value) +def build_off_flag_with_value(key: str, value: Any) -> FlagBuilder: + return FlagBuilder(key).version(100).on(False).variations(value).off_variation(0) + def make_boolean_flag_matching_segment(segment: Segment) -> FeatureFlag: return make_boolean_flag_with_clauses(make_clause_matching_segment_key(segment.key)) diff --git a/testing/impl/evaluator_util.py b/testing/impl/evaluator_util.py index a6bbad3d..deb7fe25 100644 --- a/testing/impl/evaluator_util.py +++ b/testing/impl/evaluator_util.py @@ -1,14 +1,16 @@ from ldclient import Context from ldclient.evaluation import BigSegmentsStatus from ldclient.impl.evaluator import Evaluator, _make_big_segment_ref -from ldclient.impl.event_factory import _EventFactory +from ldclient.impl.events.types import EventFactory from ldclient.impl.model import * from testing.builders import * from typing import Any, Optional, Tuple, Union basic_user = Context.create('user-key') -event_factory = _EventFactory(False) +fake_timestamp = 0 +event_factory = EventFactory(False, lambda: fake_timestamp) + class EvaluatorBuilder: def __init__(self): diff --git a/testing/impl/events/__init__.py b/testing/impl/events/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testing/test_event_factory.py b/testing/impl/events/test_event_factory.py similarity index 85% rename from testing/test_event_factory.py rename to testing/impl/events/test_event_factory.py index f00bc7ad..f1e40b2d 100644 --- a/testing/test_event_factory.py +++ b/testing/impl/events/test_event_factory.py @@ -1,11 +1,11 @@ -import pytest +from ldclient.context import Context from ldclient.evaluation import EvaluationDetail -from ldclient.impl.event_factory import _EventFactory +from ldclient.impl.events.types import EventFactory from testing.builders import * -_event_factory_default = _EventFactory(False) -_user = { 'key': 'x' } +_event_factory_default = EventFactory(False) +_user = Context.create('x') def make_basic_flag_with_rules(kind, should_track_events): rule_builder = FlagRuleBuilder().rollout({ @@ -28,39 +28,39 @@ def test_fallthrough_track_event_false(): detail = EvaluationDetail('b', 1, {'kind': 'FALLTHROUGH'}) eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) - assert eval.get('trackEvents') is None + assert eval.track_events is False def test_fallthrough_track_event_true(): flag = make_basic_flag_with_rules('fallthrough', True) detail = EvaluationDetail('b', 1, {'kind': 'FALLTHROUGH'}) eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) - assert eval['trackEvents'] == True + assert eval.track_events is True def test_fallthrough_track_event_false_with_experiment(): flag = make_basic_flag_with_rules('fallthrough', False) detail = EvaluationDetail('b', 1, {'kind': 'FALLTHROUGH', 'inExperiment': True}) eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) - assert eval['trackEvents'] == True + assert eval.track_events is True def test_rulematch_track_event_false(): flag = make_basic_flag_with_rules('rulematch', False) detail = EvaluationDetail('b', 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0}) eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) - assert eval.get('trackEvents') is None + assert eval.track_events is False def test_rulematch_track_event_true(): flag = make_basic_flag_with_rules('rulematch', True) detail = EvaluationDetail('b', 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0}) eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) - assert eval['trackEvents'] == True + assert eval.track_events is True def test_rulematch_track_event_false_with_experiment(): flag = make_basic_flag_with_rules('rulematch', False) detail = EvaluationDetail('b', 1, {'kind': 'RULE_MATCH', 'ruleIndex': 0, 'inExperiment': True}) eval = _event_factory_default.new_eval_event(flag, _user, detail, 'b', None) - assert eval['trackEvents'] == True + assert eval.track_events is True diff --git a/testing/test_event_processor.py b/testing/impl/events/test_event_processor.py similarity index 58% rename from testing/test_event_processor.py rename to testing/impl/events/test_event_processor.py index 758f694f..0c2180cf 100644 --- a/testing/test_event_processor.py +++ b/testing/impl/events/test_event_processor.py @@ -1,57 +1,27 @@ import json -import pytest from threading import Thread import time import uuid -from ldclient.config import Config, HTTPConfig +from ldclient.config import Config from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator -from ldclient.event_processor import DefaultEventProcessor -from ldclient.util import log -from testing.http_util import start_server, BasicResponse +from ldclient.impl.events.event_processor import DefaultEventProcessor +from ldclient.impl.events.types import EventInput, EventInputCustom, EventInputEvaluation, EventInputIdentify + +from testing.builders import * from testing.proxy_test_util import do_proxy_tests -from testing.stub_util import MockResponse, MockHttp +from testing.stub_util import MockHttp default_config = Config("fake_sdk_key") -user = { - 'key': 'userkey', - 'name': 'Red' -} +user = {'key': 'userkey', 'name': 'Red'} +user_key = user['key'] filtered_user = { 'key': 'userkey', 'privateAttrs': [ 'name' ] } -numeric_user = { - 'key': 1, - 'secondary': 2, - 'ip': 3, - 'country': 4, - 'email': 5, - 'firstName': 6, - 'lastName': 7, - 'avatar': 8, - 'name': 9, - 'anonymous': False, - 'custom': { - 'age': 99 - } -} -stringified_numeric_user = { - 'key': '1', - 'secondary': '2', - 'ip': '3', - 'country': '4', - 'email': '5', - 'firstName': '6', - 'lastName': '7', - 'avatar': '8', - 'name': '9', - 'anonymous': False, - 'custom': { - 'age': 99 - } -} +flag = FlagBuilder('flagkey').version(2).build() +timestamp = 10000 ep = None mock_http = None @@ -77,156 +47,107 @@ def __init__(self, **kwargs): def test_identify_event_is_queued(): with DefaultTestProcessor() as ep: - e = { 'kind': 'identify', 'user': user } - ep.send_event(e) + ep.send_event(EventInputIdentify(timestamp, user)) output = flush_and_get_events(ep) assert len(output) == 1 assert output == [{ 'kind': 'identify', - 'creationDate': e['creationDate'], - 'key': user['key'], + 'creationDate': timestamp, + 'key': user_key, 'user': user }] -def test_user_is_filtered_in_identify_event(): +def test_context_is_filtered_in_identify_event(): with DefaultTestProcessor(all_attributes_private = True) as ep: - e = { 'kind': 'identify', 'user': user } - ep.send_event(e) + ep.send_event(EventInputIdentify(timestamp, user)) output = flush_and_get_events(ep) assert len(output) == 1 assert output == [{ 'kind': 'identify', - 'creationDate': e['creationDate'], - 'key': user['key'], + 'creationDate': timestamp, + 'key': user_key, 'user': filtered_user }] -def test_user_attrs_are_stringified_in_identify_event(): - with DefaultTestProcessor() as ep: - e = { 'kind': 'identify', 'user': numeric_user } - ep.send_event(e) - - output = flush_and_get_events(ep) - assert len(output) == 1 - assert output == [{ - 'kind': 'identify', - 'creationDate': e['creationDate'], - 'key': stringified_numeric_user['key'], - 'user': stringified_numeric_user - }] - def test_individual_feature_event_is_queued_with_index_event(): with DefaultTestProcessor() as ep: - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } + e = EventInputEvaluation(timestamp, user, flag.key, flag, 1, 'value', None, 'default', None, True) ep.send_event(e) output = flush_and_get_events(ep) assert len(output) == 3 - check_index_event(output[0], e, user) - check_feature_event(output[1], e, False, None, None) + check_index_event(output[0], e) + check_feature_event(output[1], e) check_summary_event(output[2]) -def test_user_is_filtered_in_index_event(): +def test_context_is_filtered_in_index_event(): with DefaultTestProcessor(all_attributes_private = True) as ep: - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True, - 'prereqOf': 'prereqFlagKey' - } + e = EventInputEvaluation(timestamp, user, flag.key, flag, 1, 'value', None, 'default', None, True) ep.send_event(e) output = flush_and_get_events(ep) assert len(output) == 3 check_index_event(output[0], e, filtered_user) - check_feature_event(output[1], e, False, None, 'prereqFlagKey') - check_summary_event(output[2]) - -def test_user_attrs_are_stringified_in_index_event(): - with DefaultTestProcessor() as ep: - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': numeric_user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - ep.send_event(e) - - output = flush_and_get_events(ep) - assert len(output) == 3 - check_index_event(output[0], e, stringified_numeric_user) - check_feature_event(output[1], e, False, None, None) + check_feature_event(output[1], e) check_summary_event(output[2]) -def test_two_events_for_same_user_only_produce_one_index_event(): +def test_two_events_for_same_context_only_produce_one_index_event(): with DefaultTestProcessor(user_keys_flush_interval = 300) as ep: - e0 = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - e1 = e0.copy() + e0 = EventInputEvaluation(timestamp, user, flag.key, flag, 1, 'value1', None, 'default', None, True) + e1 = EventInputEvaluation(timestamp, user, flag.key, flag, 2, 'value2', None, 'default', None, True) ep.send_event(e0) ep.send_event(e1) output = flush_and_get_events(ep) assert len(output) == 4 - check_index_event(output[0], e0, user) - check_feature_event(output[1], e0, False, None, None) - check_feature_event(output[2], e1, False, None, None) + check_index_event(output[0], e0) + check_feature_event(output[1], e0) + check_feature_event(output[2], e1) check_summary_event(output[3]) -def test_new_index_event_is_added_if_user_cache_has_been_cleared(): +def test_new_index_event_is_added_if_context_cache_has_been_cleared(): with DefaultTestProcessor(user_keys_flush_interval = 0.1) as ep: - e0 = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - e1 = e0.copy() + e0 = EventInputEvaluation(timestamp, user, flag.key, flag, 1, 'value1', None, 'default', None, True) + e1 = EventInputEvaluation(timestamp, user, flag.key, flag, 2, 'value2', None, 'default', None, True) ep.send_event(e0) time.sleep(0.2) ep.send_event(e1) output = flush_and_get_events(ep) assert len(output) == 5 - check_index_event(output[0], e0, user) - check_feature_event(output[1], e0, False, None, None) - check_index_event(output[2], e1, user) - check_feature_event(output[3], e1, False, None, None) + check_index_event(output[0], e0) + check_feature_event(output[1], e0) + check_index_event(output[2], e1) + check_feature_event(output[3], e1) check_summary_event(output[4]) def test_event_kind_is_debug_if_flag_is_temporarily_in_debug_mode(): with DefaultTestProcessor() as ep: future_time = now() + 100000 - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', - 'trackEvents': False, 'debugEventsUntilDate': future_time - } + debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(future_time).build() + e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) ep.send_event(e) output = flush_and_get_events(ep) assert len(output) == 3 - check_index_event(output[0], e, user) - check_feature_event(output[1], e, True, user, None) + check_index_event(output[0], e) + check_debug_event(output[1], e) check_summary_event(output[2]) def test_event_can_be_both_tracked_and_debugged(): with DefaultTestProcessor() as ep: future_time = now() + 100000 - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', - 'trackEvents': True, 'debugEventsUntilDate': future_time - } + debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(future_time).build() + e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, True) ep.send_event(e) output = flush_and_get_events(ep) assert len(output) == 4 - check_index_event(output[0], e, user) - check_feature_event(output[1], e, False, None, None) - check_feature_event(output[2], e, True, user, None) + check_index_event(output[0], e) + check_feature_event(output[1], e) + check_debug_event(output[2], e) check_summary_event(output[3]) def test_debug_mode_does_not_expire_if_both_client_time_and_server_time_are_before_expiration_time(): @@ -236,24 +157,21 @@ def test_debug_mode_does_not_expire_if_both_client_time_and_server_time_are_befo # Send and flush an event we don't care about, just to set the last server time mock_http.set_server_time(server_time) - ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) + ep.send_event(EventInputIdentify(timestamp, {'key': 'otherUser'})) flush_and_get_events(ep) # Now send an event with debug mode on, with a "debug until" time that is further in # the future than both the client time and the server time debug_until = server_time + 10000 - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', - 'trackEvents': False, 'debugEventsUntilDate': debug_until - } + debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(debug_until).build() + e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) ep.send_event(e) # Should get a summary event only, not a full feature event output = flush_and_get_events(ep) assert len(output) == 3 - check_index_event(output[0], e, user) - check_feature_event(output[1], e, True, user, None) # debug event + check_index_event(output[0], e) + check_debug_event(output[1], e) check_summary_event(output[2]) def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_server_time(): @@ -263,23 +181,20 @@ def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_se # Send and flush an event we don't care about, just to set the last server time mock_http.set_server_time(server_time) - ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) + ep.send_event(EventInputIdentify(timestamp, {'key': 'otherUser'})) flush_and_get_events(ep) # Now send an event with debug mode on, with a "debug until" time that is further in # the future than the server time, but in the past compared to the client. debug_until = server_time + 1000 - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', - 'trackEvents': False, 'debugEventsUntilDate': debug_until - } + debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(debug_until).build() + e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) ep.send_event(e) # Should get a summary event only, not a full feature event output = flush_and_get_events(ep) assert len(output) == 2 - check_index_event(output[0], e, user) + check_index_event(output[0], e) check_summary_event(output[1]) def test_debug_mode_expires_based_on_server_time_if_server_time_is_later_than_client_time(): @@ -289,63 +204,39 @@ def test_debug_mode_expires_based_on_server_time_if_server_time_is_later_than_cl # Send and flush an event we don't care about, just to set the last server time mock_http.set_server_time(server_time) - ep.send_event({ 'kind': 'identify', 'user': { 'key': 'otherUser' }}) + ep.send_event(EventInputIdentify(timestamp, {'key': 'otherUser'})) flush_and_get_events(ep) # Now send an event with debug mode on, with a "debug until" time that is further in # the future than the client time, but in the past compared to the server. debug_until = server_time - 1000 - e = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', - 'trackEvents': False, 'debugEventsUntilDate': debug_until - } + debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(debug_until).build() + e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) ep.send_event(e) # Should get a summary event only, not a full feature event output = flush_and_get_events(ep) assert len(output) == 2 - check_index_event(output[0], e, user) - check_summary_event(output[1]) - -def test_two_feature_events_for_same_user_generate_only_one_index_event(): - with DefaultTestProcessor() as ep: - e1 = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value1', 'default': 'default', 'trackEvents': False - } - e2 = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 2, 'value': 'value2', 'default': 'default', 'trackEvents': False - } - ep.send_event(e1) - ep.send_event(e2) - - output = flush_and_get_events(ep) - assert len(output) == 2 - check_index_event(output[0], e1, user) + check_index_event(output[0], e) check_summary_event(output[1]) def test_nontracked_events_are_summarized(): with DefaultTestProcessor() as ep: - e1 = { - 'kind': 'feature', 'key': 'flagkey1', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value1', 'default': 'default1', 'trackEvents': False - } - e2 = { - 'kind': 'feature', 'key': 'flagkey2', 'version': 22, 'user': user, - 'variation': 2, 'value': 'value2', 'default': 'default2', 'trackEvents': False - } + flag1 = FlagBuilder('flagkey1').version(11).build() + flag2 = FlagBuilder('flagkey2').version(22).build() + earlier_time, later_time = 1111111, 2222222 + e1 = EventInputEvaluation(later_time, user, flag1.key, flag1, 1, 'value1', None, 'default1', None, False) + e2 = EventInputEvaluation(earlier_time, user, flag2.key, flag2, 2, 'value2', None, 'default2', None, False) ep.send_event(e1) ep.send_event(e2) output = flush_and_get_events(ep) assert len(output) == 2 - check_index_event(output[0], e1, user) + check_index_event(output[0], e1) se = output[1] assert se['kind'] == 'summary' - assert se['startDate'] == e1['creationDate'] - assert se['endDate'] == e2['creationDate'] + assert se['startDate'] == earlier_time + assert se['endDate'] == later_time assert se['features'] == { 'flagkey1': { 'default': 'default1', @@ -359,13 +250,13 @@ def test_nontracked_events_are_summarized(): def test_custom_event_is_queued_with_user(): with DefaultTestProcessor() as ep: - e = { 'kind': 'custom', 'key': 'eventkey', 'user': user, 'data': { 'thing': 'stuff '}, 'metricValue': 1.5 } + e = EventInputCustom(timestamp, user, 'eventkey', { 'thing': 'stuff '}, 1.5) ep.send_event(e) output = flush_and_get_events(ep) assert len(output) == 2 - check_index_event(output[0], e, user) - check_custom_event(output[1], e, None) + check_index_event(output[0], e) + check_custom_event(output[1], e) def test_nothing_is_sent_if_there_are_no_events(): with DefaultTestProcessor() as ep: @@ -375,7 +266,7 @@ def test_nothing_is_sent_if_there_are_no_events(): def test_sdk_key_is_sent(): with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() @@ -383,7 +274,7 @@ def test_sdk_key_is_sent(): def test_wrapper_header_not_sent_when_not_set(): with DefaultTestProcessor() as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() @@ -391,7 +282,7 @@ def test_wrapper_header_not_sent_when_not_set(): def test_wrapper_header_sent_when_set(): with DefaultTestProcessor(wrapper_name = "Flask", wrapper_version = "0.0.1") as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() @@ -399,7 +290,7 @@ def test_wrapper_header_sent_when_set(): def test_wrapper_header_sent_without_version(): with DefaultTestProcessor(wrapper_name = "Flask") as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() @@ -407,7 +298,7 @@ def test_wrapper_header_sent_without_version(): def test_event_schema_set_on_event_send(): with DefaultTestProcessor() as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() @@ -435,7 +326,7 @@ def test_periodic_diagnostic_includes_events_in_batch(): # Ignore init event flush_and_get_events(ep) # Send a payload with a single event - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) flush_and_get_events(ep) ep._send_diagnostic() @@ -449,12 +340,9 @@ def test_periodic_diagnostic_includes_deduplicated_users(): with DefaultTestProcessor(diagnostic_opt_out=False) as ep: # Ignore init event flush_and_get_events(ep) - # Send two eval events with the same user to cause a user deduplication - e0 = { - 'kind': 'feature', 'key': 'flagkey', 'version': 11, 'user': user, - 'variation': 1, 'value': 'value', 'default': 'default', 'trackEvents': True - } - e1 = e0.copy(); + # Send two custom events with the same user to cause a user deduplication + e0 = EventInputCustom(timestamp, user, 'event1', None, None) + e1 = EventInputCustom(timestamp, user, 'event2', None, None) ep.send_event(e0) ep.send_event(e1) flush_and_get_events(ep) @@ -500,8 +388,8 @@ def start_consuming_events(): with DefaultEventProcessor(config, mock_http, dispatcher_factory) as ep: ep_inbox = ep_inbox_holder[0] - event1 = { 'kind': 'custom', 'key': 'event1', 'user': user } - event2 = { 'kind': 'custom', 'key': 'event2', 'user': user } + event1 = EventInputCustom(timestamp, user, 'event1') + event2 = EventInputCustom(timestamp, user, 'event2') ep.send_event(event1) ep.send_event(event2) # this event should be dropped - inbox is full message1 = ep_inbox.get(block=False) @@ -513,7 +401,7 @@ def start_consuming_events(): def test_http_proxy(monkeypatch): def _event_processor_proxy_test(server, config, secure): with DefaultEventProcessor(config) as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() do_proxy_tests(_event_processor_proxy_test, 'POST', monkeypatch) @@ -521,12 +409,12 @@ def _event_processor_proxy_test(server, config, secure): def verify_unrecoverable_http_error(status): with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: mock_http.set_response_status(status) - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() mock_http.reset() - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() assert mock_http.request_data is None @@ -534,19 +422,19 @@ def verify_unrecoverable_http_error(status): def verify_recoverable_http_error(status): with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: mock_http.set_response_status(status) - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() mock_http.reset() - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() assert mock_http.request_data is not None def test_event_payload_id_is_sent(): with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() @@ -557,11 +445,11 @@ def test_event_payload_id_is_sent(): def test_event_payload_id_changes_between_requests(): with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() - ep.send_event({ 'kind': 'identify', 'user': user }) + ep.send_event(EventInputIdentify(timestamp, user)) ep.flush() ep._wait_until_inactive() @@ -577,38 +465,40 @@ def flush_and_get_events(ep): else: return json.loads(mock_http.request_data) -def check_index_event(data, source, user): +def check_index_event(data, source: EventInput, user: Optional[dict] = None): assert data['kind'] == 'index' - assert data['creationDate'] == source['creationDate'] - assert data['user'] == user - -def check_feature_event(data, source, debug, inline_user, prereq_of): - assert data['kind'] == ('debug' if debug else 'feature') - assert data['creationDate'] == source['creationDate'] - assert data['key'] == source['key'] - assert data.get('version') == source.get('version') - assert data.get('variation') == source.get('variation') - assert data.get('value') == source.get('value') - assert data.get('default') == source.get('default') - if inline_user is None: - assert data['userKey'] == str(source['user']['key']) - else: - assert data['user'] == inline_user - if prereq_of is None: - assert "prereqOf" not in data - else: - assert data['prereqOf'] == prereq_of - -def check_custom_event(data, source, inline_user): + assert data['creationDate'] == source.timestamp + assert data['user'] == source.user if user is None else user + +def check_feature_event(data, source: EventInputEvaluation): + assert data['kind'] == 'feature' + assert data['creationDate'] == source.timestamp + assert data['key'] == source.key + assert data.get('version') == None if source.flag is None else source.flag.version + assert data.get('variation') == source.variation + assert data.get('value') == source.value + assert data.get('default') == source.default_value + assert data['userKey'] == source.user['key'] + assert data.get('prereq_of') == None if source.prereq_of is None else source.prereq_of.key + +def check_debug_event(data, source: EventInputEvaluation, user: Optional[dict] = None): + assert data['kind'] == 'debug' + assert data['creationDate'] == source.timestamp + assert data['key'] == source.key + assert data.get('version') == None if source.flag is None else source.flag.version + assert data.get('variation') == source.variation + assert data.get('value') == source.value + assert data.get('default') == source.default_value + assert data['user'] == source.user + assert data.get('prereq_of') == None if source.prereq_of is None else source.prereq_of.key + +def check_custom_event(data, source: EventInputCustom): assert data['kind'] == 'custom' - assert data['creationDate'] == source['creationDate'] - assert data['key'] == source['key'] - assert data['data'] == source['data'] - if inline_user is None: - assert data['userKey'] == source['user']['key'] - else: - assert data['user'] == inline_user - assert data.get('metricValue') == source.get('metricValue') + assert data['creationDate'] == source.timestamp + assert data['key'] == source.key + assert data['data'] == source.data + assert data['userKey'] == source.user['key'] + assert data.get('metricValue') == source.metric_value def check_summary_event(data): assert data['kind'] == 'summary' diff --git a/testing/impl/events/test_event_summarizer.py b/testing/impl/events/test_event_summarizer.py new file mode 100644 index 00000000..f8e55f1a --- /dev/null +++ b/testing/impl/events/test_event_summarizer.py @@ -0,0 +1,47 @@ +import pytest + +from ldclient.impl.events.event_summarizer import EventSummarizer +from ldclient.impl.events.types import * + +from testing.builders import * + + +user = { 'key': 'user1' } +flag1 = FlagBuilder('flag1').version(11).build() +flag2 = FlagBuilder('flag2').version(22).build() + + +def test_summarize_event_sets_start_and_end_dates(): + es = EventSummarizer() + event1 = EventInputEvaluation(2000, user, flag1.key, flag1, 0, '', None, None) + event2 = EventInputEvaluation(1000, user, flag1.key, flag1, 0, '', None, None) + event3 = EventInputEvaluation(1500, user, flag1.key, flag1, 0, '', None, None) + es.summarize_event(event1) + es.summarize_event(event2) + es.summarize_event(event3) + data = es.snapshot() + + assert data.start_date == 1000 + assert data.end_date == 2000 + +def test_summarize_event_increments_counters(): + es = EventSummarizer() + event1 = EventInputEvaluation(1000, user, flag1.key, flag1, 1, 'value1', None, 'default1') + event2 = EventInputEvaluation(1000, user, flag1.key, flag1, 2, 'value2', None, 'default1') + event3 = EventInputEvaluation(1000, user, flag2.key, flag2, 1, 'value99', None, 'default2') + event4 = EventInputEvaluation(1000, user, flag1.key, flag1, 1, 'value1', None, 'default1') + event5 = EventInputEvaluation(1000, user, 'badkey', None, None, 'default3', None, 'default3') + es.summarize_event(event1) + es.summarize_event(event2) + es.summarize_event(event3) + es.summarize_event(event4) + es.summarize_event(event5) + data = es.snapshot() + + expected = { + ('flag1', 1, 11): { 'count': 2, 'value': 'value1', 'default': 'default1' }, + ('flag1', 2, 11): { 'count': 1, 'value': 'value2', 'default': 'default1' }, + ('flag2', 1, 22): { 'count': 1, 'value': 'value99', 'default': 'default2' }, + ('badkey', None, None): { 'count': 1, 'value': 'default3', 'default': 'default3' } + } + assert data.counters == expected diff --git a/testing/impl/test_evaluator.py b/testing/impl/test_evaluator.py index bfd39b6c..5e0e8044 100644 --- a/testing/impl/test_evaluator.py +++ b/testing/impl/test_evaluator.py @@ -1,6 +1,5 @@ from ldclient.client import Context from ldclient.evaluation import EvaluationDetail -from ldclient.impl.evaluator import _context_to_user_dict from testing.builders import * from testing.impl.evaluator_util import * diff --git a/testing/impl/test_evaluator_prerequisites.py b/testing/impl/test_evaluator_prerequisites.py index 1cb99f70..e6080a68 100644 --- a/testing/impl/test_evaluator_prerequisites.py +++ b/testing/impl/test_evaluator_prerequisites.py @@ -1,10 +1,11 @@ import pytest +from ldclient.impl.evaluator import _context_to_user_dict +from ldclient.impl.events.types import EventInputEvaluation from testing.builders import * from ldclient.client import Context from ldclient.evaluation import EvaluationDetail -from ldclient.impl.evaluator import _context_to_user_dict from testing.builders import * from testing.impl.evaluator_util import * @@ -26,8 +27,9 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): evaluator = EvaluatorBuilder().with_flag(flag1).build() user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, - 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] + events_should_be = [ + EventInputEvaluation(0, _context_to_user_dict(user), flag1.key, flag1, 1, 'e', None, None, flag, False) + ] assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): @@ -38,8 +40,9 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): evaluator = EvaluatorBuilder().with_flag(flag1).build() user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 0, 'value': 'd', 'default': None, - 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] + events_should_be = [ + EventInputEvaluation(0, _context_to_user_dict(user), flag1.key, flag1, 0, 'd', None, None, flag, False) + ] assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_rules(): @@ -50,8 +53,9 @@ def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_ru evaluator = EvaluatorBuilder().with_flag(flag1).build() user = Context.create('x') detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) - events_should_be = [{'kind': 'feature', 'key': 'feature1', 'variation': 1, 'value': 'e', 'default': None, - 'version': 2, 'user': _context_to_user_dict(user), 'prereqOf': 'feature0'}] + events_should_be = [ + EventInputEvaluation(0, _context_to_user_dict(user), flag1.key, flag1, 1, 'e', None, None, flag, False) + ] assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) @pytest.mark.parametrize("depth", [1, 2, 3, 4]) diff --git a/testing/test_event_summarizer.py b/testing/test_event_summarizer.py deleted file mode 100644 index ae411aaf..00000000 --- a/testing/test_event_summarizer.py +++ /dev/null @@ -1,63 +0,0 @@ -import pytest - -from ldclient.event_summarizer import EventSummarizer - - -user = { 'key': 'user1' } - -def test_summarize_event_does_nothing_for_identify_event(): - es = EventSummarizer() - snapshot = es.snapshot() - es.summarize_event({ 'kind': 'identify', 'creationDate': 1000, 'user': user }) - - assert es.snapshot() == snapshot - -def test_summarize_event_does_nothing_for_custom_event(): - es = EventSummarizer() - snapshot = es.snapshot() - es.summarize_event({ 'kind': 'custom', 'creationDate': 1000, 'key': 'eventkey', 'user': user }) - - assert es.snapshot() == snapshot - -def test_summarize_event_sets_start_and_end_dates(): - es = EventSummarizer() - event1 = { 'kind': 'feature', 'creationDate': 2000, 'key': 'flag', 'user': user, - 'version': 1, 'variation': 0, 'value': '', 'default': None } - event2 = { 'kind': 'feature', 'creationDate': 1000, 'key': 'flag', 'user': user, - 'version': 1, 'variation': 0, 'value': '', 'default': None } - event3 = { 'kind': 'feature', 'creationDate': 1500, 'key': 'flag', 'user': user, - 'version': 1, 'variation': 0, 'value': '', 'default': None } - es.summarize_event(event1) - es.summarize_event(event2) - es.summarize_event(event3) - data = es.snapshot() - - assert data.start_date == 1000 - assert data.end_date == 2000 - -def test_summarize_event_increments_counters(): - es = EventSummarizer() - event1 = { 'kind': 'feature', 'creationDate': 1000, 'key': 'flag1', 'user': user, - 'version': 11, 'variation': 1, 'value': 'value1', 'default': 'default1' } - event2 = { 'kind': 'feature', 'creationDate': 1000, 'key': 'flag1', 'user': user, - 'version': 11, 'variation': 2, 'value': 'value2', 'default': 'default1' } - event3 = { 'kind': 'feature', 'creationDate': 1000, 'key': 'flag2', 'user': user, - 'version': 22, 'variation': 1, 'value': 'value99', 'default': 'default2' } - event4 = { 'kind': 'feature', 'creationDate': 1000, 'key': 'flag1', 'user': user, - 'version': 11, 'variation': 1, 'value': 'value1', 'default': 'default1' } - event5 = { 'kind': 'feature', 'creationDate': 1000, 'key': 'badkey', 'user': user, - 'version': None, 'variation': None, 'value': 'default3', 'default': 'default3' } - es.summarize_event(event1) - es.summarize_event(event2) - es.summarize_event(event3) - es.summarize_event(event4) - es.summarize_event(event5) - data = es.snapshot() - - expected = { - ('flag1', 1, 11): { 'count': 2, 'value': 'value1', 'default': 'default1' }, - ('flag1', 2, 11): { 'count': 1, 'value': 'value2', 'default': 'default1' }, - ('flag2', 1, 22): { 'count': 1, 'value': 'value99', 'default': 'default2' }, - ('badkey', None, None): { 'count': 1, 'value': 'default3', 'default': 'default3' } - } - assert data.counters == expected diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index a77203f7..6fd9bbc3 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -1,17 +1,14 @@ from ldclient.client import LDClient, Config, Context -from ldclient.event_processor import DefaultEventProcessor from ldclient.feature_store import InMemoryFeatureStore -from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor +from ldclient.impl.stubs import NullUpdateProcessor from ldclient.interfaces import UpdateProcessor from ldclient.polling import PollingUpdateProcessor from ldclient.streaming import StreamingUpdateProcessor from ldclient.versioned_data_kind import FEATURES, SEGMENTS -import logging import pytest +from testing.builders import * from testing.stub_util import CapturingFeatureStore, MockEventProcessor, MockUpdateProcessor -from testing.sync_util import wait_until -import queue unreachable_uri="http://fake" @@ -56,17 +53,6 @@ def make_ldd_client(): stream_uri=unreachable_uri)) -def make_off_flag_with_value(key, value): - return { - u'key': key, - u'version': 100, - u'salt': u'', - u'on': False, - u'variations': [value], - u'offVariation': 0 - } - - def get_first_event(c): e = c._event_processor._events.pop(0) c._event_processor._events = [] @@ -79,23 +65,6 @@ def count_events(c): return n -def test_client_has_null_event_processor_if_offline(): - with make_offline_client() as client: - assert isinstance(client._event_processor, NullEventProcessor) - - -def test_client_has_null_event_processor_if_send_events_off(): - config = Config(sdk_key="secret", base_uri=unreachable_uri, - update_processor_class = MockUpdateProcessor, send_events=False) - with LDClient(config=config) as client: - assert isinstance(client._event_processor, NullEventProcessor) - - -def test_client_has_normal_event_processor_in_ldd_mode(): - with make_ldd_client() as client: - assert isinstance(client._event_processor, DefaultEventProcessor) - - def test_client_has_null_update_processor_in_offline_mode(): with make_offline_client() as client: assert isinstance(client._update_processor, NullUpdateProcessor) @@ -125,91 +94,6 @@ def test_toggle_offline(): assert client.variation('feature.key', user, default=None) is None -def test_identify(): - with make_client() as client: - client.identify(context) - e = get_first_event(client) - assert e['kind'] == 'identify' and e['key'] == u'xyz' and e['user'] == user - - -def test_identify_with_user_dict(): - with make_client() as client: - client.identify(user) - e = get_first_event(client) - assert e['kind'] == 'identify' and e['key'] == u'xyz' and e['user'] == user - - -def test_identify_no_user(): - with make_client() as client: - client.identify(None) - assert count_events(client) == 0 - - -def test_identify_no_user_key(): - with make_client() as client: - client.identify({ 'name': 'nokey' }) - assert count_events(client) == 0 - - -def test_identify_invalid_context(): - with make_client() as client: - client.identify(Context.create('')) - assert count_events(client) == 0 - - -def test_track(): - with make_client() as client: - client.track('my_event', context) - e = get_first_event(client) - assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e.get('data') is None and e.get('metricValue') is None - - -def test_track_with_user_dict(): - with make_client() as client: - client.track('my_event', user) - e = get_first_event(client) - assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e.get('data') is None and e.get('metricValue') is None - - -def test_track_with_data(): - with make_client() as client: - client.track('my_event', context, 42) - e = get_first_event(client) - assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 and e.get('metricValue') is None - - -def test_track_with_metric_value(): - with make_client() as client: - client.track('my_event', context, 42, 1.5) - e = get_first_event(client) - assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == user and e['data'] == 42 and e.get('metricValue') == 1.5 - - -def test_track_no_user(): - with make_client() as client: - client.track('my_event', None) - assert count_events(client) == 0 - - -def test_track_no_user_key(): - with make_client() as client: - client.track('my_event', { 'name': 'nokey' }) - assert count_events(client) == 0 - - -def test_track_invalid_context(): - with make_client() as client: - client.track('my_event', Context.create('')) - assert count_events(client) == 0 - - -def test_track_anonymous_user(): - with make_client() as client: - client.track('my_event', anonymous_user) - e = get_first_event(client) - assert e['kind'] == 'custom' and e['key'] == 'my_event' and e['user'] == anonymous_user and e.get('data') is None and e.get('metricValue') is None and e.get('contextKind') == 'anonymousUser' - - def test_defaults(): config=Config("SDK_KEY", base_uri="http://localhost:3000", defaults={"foo": "bar"}, offline=True) with LDClient(config=config) as client: @@ -226,8 +110,6 @@ def test_defaults_and_online(): feature_store=InMemoryFeatureStore())) actual = my_client.variation('foo', user, default="originalDefault") assert actual == expected - e = get_first_event(my_client) - assert e['kind'] == 'feature' and e['key'] == u'foo' and e['user'] == user def test_defaults_and_online_no_default(): @@ -237,8 +119,6 @@ def test_defaults_and_online_no_default(): event_processor_class=MockEventProcessor, update_processor_class=MockUpdateProcessor)) assert "jim" == my_client.variation('baz', user, default="jim") - e = get_first_event(my_client) - assert e['kind'] == 'feature' and e['key'] == u'baz' and e['user'] == user def test_no_defaults(): @@ -246,234 +126,6 @@ def test_no_defaults(): assert "bar" == client.variation('foo', user, default="bar") -def test_event_for_existing_feature(): - feature = make_off_flag_with_value('feature.key', 'value') - feature['trackEvents'] = True - feature['debugEventsUntilDate'] = 1000 - store = InMemoryFeatureStore() - store.init({FEATURES: {'feature.key': feature}}) - with make_client(store) as client: - assert 'value' == client.variation('feature.key', user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == user and - e['version'] == feature['version'] and - e['value'] == 'value' and - e['variation'] == 0 and - e.get('reason') is None and - e['default'] == 'default' and - e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000 and - e.get('contextKind') is None) - - -def test_event_for_existing_feature_anonymous_user(): - feature = make_off_flag_with_value('feature.key', 'value') - feature['trackEvents'] = True - feature['debugEventsUntilDate'] = 1000 - store = InMemoryFeatureStore() - store.init({FEATURES: {'feature.key': feature}}) - with make_client(store) as client: - assert 'value' == client.variation('feature.key', anonymous_user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == anonymous_user and - e['version'] == feature['version'] and - e['value'] == 'value' and - e['variation'] == 0 and - e.get('reason') is None and - e['default'] == 'default' and - e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000 and - e['contextKind'] == 'anonymousUser') - - -def test_event_for_existing_feature_with_reason(): - feature = make_off_flag_with_value('feature.key', 'value') - feature['trackEvents'] = True - feature['debugEventsUntilDate'] = 1000 - store = InMemoryFeatureStore() - store.init({FEATURES: {'feature.key': feature}}) - with make_client(store) as client: - assert 'value' == client.variation_detail('feature.key', user, default='default').value - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == user and - e['version'] == feature['version'] and - e['value'] == 'value' and - e['variation'] == 0 and - e['reason'] == {'kind': 'OFF'} and - e['default'] == 'default' and - e['trackEvents'] == True and - e['debugEventsUntilDate'] == 1000) - - -def test_event_for_existing_feature_with_tracked_rule(): - feature = { - 'key': 'feature.key', - 'version': 100, - 'salt': u'', - 'on': True, - 'rules': [ - { - 'clauses': [ - { 'attribute': 'key', 'op': 'in', 'values': [ user['key'] ] } - ], - 'variation': 0, - 'trackEvents': True, - 'id': 'rule_id' - } - ], - 'variations': [ 'value' ] - } - store = InMemoryFeatureStore() - store.init({FEATURES: {feature['key']: feature}}) - client = make_client(store) - assert 'value' == client.variation(feature['key'], user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == feature['key'] and - e['user'] == user and - e['version'] == feature['version'] and - e['value'] == 'value' and - e['variation'] == 0 and - e['reason'] == { 'kind': 'RULE_MATCH', 'ruleIndex': 0, 'ruleId': 'rule_id' } and - e['default'] == 'default' and - e['trackEvents'] == True and - e.get('debugEventsUntilDate') is None) - - -def test_event_for_existing_feature_with_untracked_rule(): - feature = { - 'key': 'feature.key', - 'version': 100, - 'salt': u'', - 'on': True, - 'rules': [ - { - 'clauses': [ - { 'attribute': 'key', 'op': 'in', 'values': [ user['key'] ] } - ], - 'variation': 0, - 'trackEvents': False, - 'id': 'rule_id' - } - ], - 'variations': [ 'value' ] - } - store = InMemoryFeatureStore() - store.init({FEATURES: {feature['key']: feature}}) - client = make_client(store) - assert 'value' == client.variation(feature['key'], user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == feature['key'] and - e['user'] == user and - e['version'] == feature['version'] and - e['value'] == 'value' and - e['variation'] == 0 and - e.get('reason') is None and - e['default'] == 'default' and - e.get('trackEvents', False) == False and - e.get('debugEventsUntilDate') is None) - - -def test_event_for_existing_feature_with_tracked_fallthrough(): - feature = { - 'key': 'feature.key', - 'version': 100, - 'salt': u'', - 'on': True, - 'rules': [], - 'fallthrough': { 'variation': 0 }, - 'variations': [ 'value' ], - 'trackEventsFallthrough': True - } - store = InMemoryFeatureStore() - store.init({FEATURES: {feature['key']: feature}}) - client = make_client(store) - assert 'value' == client.variation(feature['key'], user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == feature['key'] and - e['user'] == user and - e['version'] == feature['version'] and - e['value'] == 'value' and - e['variation'] == 0 and - e['reason'] == { 'kind': 'FALLTHROUGH' } and - e['default'] == 'default' and - e['trackEvents'] == True and - e.get('debugEventsUntilDate') is None) - - -def test_event_for_existing_feature_with_untracked_fallthrough(): - feature = { - 'key': 'feature.key', - 'version': 100, - 'salt': u'', - 'on': True, - 'rules': [], - 'fallthrough': { 'variation': 0 }, - 'variations': [ 'value' ], - 'trackEventsFallthrough': False - } - store = InMemoryFeatureStore() - store.init({FEATURES: {feature['key']: feature}}) - client = make_client(store) - assert 'value' == client.variation(feature['key'], user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == feature['key'] and - e['user'] == user and - e['version'] == feature['version'] and - e['value'] == 'value' and - e['variation'] == 0 and - e.get('reason') is None and - e['default'] == 'default' and - e.get('trackEvents', False) == False and - e.get('debugEventsUntilDate') is None) - - -def test_event_for_unknown_feature(): - store = InMemoryFeatureStore() - store.init({FEATURES: {}}) - with make_client(store) as client: - assert 'default' == client.variation('feature.key', user, default='default') - e = get_first_event(client) - assert (e['kind'] == 'feature' and - e['key'] == 'feature.key' and - e['user'] == user and - e['value'] == 'default' and - e.get('variation') is None and - e['default'] == 'default') - - -def test_no_event_for_existing_feature_with_no_user(): - feature = make_off_flag_with_value('feature.key', 'value') - feature['trackEvents'] = True - feature['debugEventsUntilDate'] = 1000 - store = InMemoryFeatureStore() - store.init({FEATURES: {'feature.key': feature}}) - with make_client(store) as client: - assert 'default' == client.variation('feature.key', None, default='default') - assert count_events(client) == 0 - - -def test_no_event_for_existing_feature_with_invalid_context(): - feature = make_off_flag_with_value('feature.key', 'value') - feature['trackEvents'] = True - feature['debugEventsUntilDate'] = 1000 - store = InMemoryFeatureStore() - store.init({FEATURES: {'feature.key': feature}}) - with make_client(store) as client: - bad_context = Context.create('') - assert 'default' == client.variation('feature.key', bad_context, default='default') - assert count_events(client) == 0 - - def test_secure_mode_hash(): context_to_hash = Context.create('Message') equivalent_user_to_hash = {'key': 'Message'} diff --git a/testing/test_ldclient_evaluation.py b/testing/test_ldclient_evaluation.py index 84986f7c..ff3753fb 100644 --- a/testing/test_ldclient_evaluation.py +++ b/testing/test_ldclient_evaluation.py @@ -1,5 +1,3 @@ -import pytest -import json import time from ldclient.client import LDClient, Config, Context from ldclient.config import BigSegmentsConfig @@ -13,10 +11,9 @@ from testing.builders import * from testing.mock_components import MockBigSegmentStore from testing.stub_util import MockEventProcessor, MockUpdateProcessor -from testing.test_ldclient import make_off_flag_with_value +from testing.test_ldclient import make_client, user -user = { 'key': 'userkey' } flag1 = { 'key': 'key1', 'version': 100, @@ -55,13 +52,6 @@ def init(self, data): def initialized(self): return True -def make_client(store): - return LDClient(config=Config(sdk_key='SDK_KEY', - base_uri='http://test', - event_processor_class=MockEventProcessor, - update_processor_class=MockUpdateProcessor, - feature_store=store)) - def get_log_lines(caplog, level): loglines = caplog.records if callable(loglines): @@ -71,7 +61,7 @@ def get_log_lines(caplog, level): def test_variation_for_existing_feature(): - feature = make_off_flag_with_value('feature.key', 'value') + feature = build_off_flag_with_value('feature.key', 'value').build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) @@ -99,14 +89,14 @@ def test_variation_for_unknown_feature(): assert 'default' == client.variation('feature.key', user, default='default') def test_variation_when_user_is_none(): - feature = make_off_flag_with_value('feature.key', 'value') + feature = build_off_flag_with_value('feature.key', 'value').build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) assert 'default' == client.variation('feature.key', None, default='default') def test_variation_when_user_has_no_key(): - feature = make_off_flag_with_value('feature.key', 'value') + feature = build_off_flag_with_value('feature.key', 'value').build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) @@ -114,7 +104,7 @@ def test_variation_when_user_has_no_key(): def test_variation_for_invalid_context(): c = Context.create('') - feature = make_off_flag_with_value('feature.key', 'value') + feature = build_off_flag_with_value('feature.key', 'value').build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) @@ -128,7 +118,7 @@ def test_variation_for_flag_that_evaluates_to_none(): assert 'default' == client.variation('feature.key', user, default='default') def test_variation_detail_for_existing_feature(): - feature = make_off_flag_with_value('feature.key', 'value') + feature = build_off_flag_with_value('feature.key', 'value').build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) @@ -142,7 +132,7 @@ def test_variation_detail_for_unknown_feature(): assert expected == client.variation_detail('feature.key', user, default='default') def test_variation_detail_when_user_is_none(): - feature = make_off_flag_with_value('feature.key', 'value') + feature = build_off_flag_with_value('feature.key', 'value').build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) @@ -150,7 +140,7 @@ def test_variation_detail_when_user_is_none(): assert expected == client.variation_detail('feature.key', None, default='default') def test_variation_detail_when_user_has_no_key(): - feature = make_off_flag_with_value('feature.key', 'value') + feature = build_off_flag_with_value('feature.key', 'value').build() store = InMemoryFeatureStore() store.init({FEATURES: {'feature.key': feature}}) client = make_client(store) diff --git a/testing/test_ldclient_events.py b/testing/test_ldclient_events.py new file mode 100644 index 00000000..e1c6a2af --- /dev/null +++ b/testing/test_ldclient_events.py @@ -0,0 +1,296 @@ +from ldclient.client import LDClient, Config, Context +from ldclient.impl.events.event_processor import DefaultEventProcessor +from ldclient.feature_store import InMemoryFeatureStore +from ldclient.impl.events.types import EventInputCustom, EventInputEvaluation, EventInputIdentify +from ldclient.impl.stubs import NullEventProcessor +from ldclient.versioned_data_kind import FEATURES + +from testing.builders import * +from testing.stub_util import MockUpdateProcessor +from testing.test_ldclient import context, make_client, make_ldd_client, make_offline_client, unreachable_uri, user + + +def get_first_event(c): + e = c._event_processor._events.pop(0) + c._event_processor._events = [] + return e + + +def count_events(c): + n = len(c._event_processor._events) + c._event_processor._events = [] + return n + + +def test_client_has_null_event_processor_if_offline(): + with make_offline_client() as client: + assert isinstance(client._event_processor, NullEventProcessor) + + +def test_client_has_null_event_processor_if_send_events_off(): + config = Config(sdk_key="secret", base_uri=unreachable_uri, + update_processor_class = MockUpdateProcessor, send_events=False) + with LDClient(config=config) as client: + assert isinstance(client._event_processor, NullEventProcessor) + + +def test_client_has_normal_event_processor_in_ldd_mode(): + with make_ldd_client() as client: + assert isinstance(client._event_processor, DefaultEventProcessor) + + +def test_identify(): + with make_client() as client: + client.identify(context) + e = get_first_event(client) + assert isinstance(e, EventInputIdentify) + assert e.user == user + + +def test_identify_with_user_dict(): + with make_client() as client: + client.identify(user) + e = get_first_event(client) + assert isinstance(e, EventInputIdentify) + assert e.user == user + + +def test_identify_no_user(): + with make_client() as client: + client.identify(None) + assert count_events(client) == 0 + + +def test_identify_no_user_key(): + with make_client() as client: + client.identify({ 'name': 'nokey' }) + assert count_events(client) == 0 + + +def test_identify_invalid_context(): + with make_client() as client: + client.identify(Context.create('')) + assert count_events(client) == 0 + + +def test_track(): + with make_client() as client: + client.track('my_event', context) + e = get_first_event(client) + assert isinstance(e, EventInputCustom) + assert e.key == 'my_event' + assert e.user == user + assert e.data is None + assert e.metric_value is None + + +def test_track_with_user_dict(): + with make_client() as client: + client.track('my_event', user) + e = get_first_event(client) + assert isinstance(e, EventInputCustom) + assert e.key == 'my_event' + assert e.user == user + assert e.data is None + assert e.metric_value is None + + +def test_track_with_data(): + with make_client() as client: + client.track('my_event', context, 42) + e = get_first_event(client) + assert isinstance(e, EventInputCustom) + assert e.key == 'my_event' + assert e.user == user + assert e.data == 42 + assert e.metric_value is None + + +def test_track_with_metric_value(): + with make_client() as client: + client.track('my_event', context, 42, 1.5) + e = get_first_event(client) + assert isinstance(e, EventInputCustom) + assert e.key == 'my_event' + assert e.user == user + assert e.data == 42 + assert e.metric_value == 1.5 + + +def test_track_no_user(): + with make_client() as client: + client.track('my_event', None) + assert count_events(client) == 0 + + +def test_track_no_user_key(): + with make_client() as client: + client.track('my_event', { 'name': 'nokey' }) + assert count_events(client) == 0 + + +def test_track_invalid_context(): + with make_client() as client: + client.track('my_event', Context.create('')) + assert count_events(client) == 0 + + +def test_event_for_existing_feature(): + feature = build_off_flag_with_value('feature.key', 'value').track_events(True).build() + store = InMemoryFeatureStore() + store.init({FEATURES: {feature.key: feature.to_json_dict()}}) + with make_client(store) as client: + assert 'value' == client.variation(feature.key, user, default='default') + e = get_first_event(client) + assert isinstance(e, EventInputEvaluation) + assert (e.key == feature.key and + e.flag == feature and + e.user == user and + e.value == 'value' and + e.variation == 0 and + e.reason is None and + e.default_value == 'default' and + e.track_events is True) + + +def test_event_for_existing_feature_with_reason(): + feature = build_off_flag_with_value('feature.key', 'value').track_events(True).build() + store = InMemoryFeatureStore() + store.init({FEATURES: {feature.key: feature.to_json_dict()}}) + with make_client(store) as client: + assert 'value' == client.variation_detail(feature.key, user, default='default').value + e = get_first_event(client) + assert isinstance(e, EventInputEvaluation) + assert (e.key == feature.key and + e.flag == feature and + e.user == user and + e.value == 'value' and + e.variation == 0 and + e.reason == {'kind': 'OFF'} and + e.default_value == 'default' and + e.track_events is True) + + +def test_event_for_existing_feature_with_tracked_rule(): + feature = FlagBuilder('feature.key').version(100).on(True).variations('value') \ + .rules( + FlagRuleBuilder().variation(0).id('rule_id').track_events(True) \ + .clauses(make_clause(None, 'key', 'in', user['key'])) \ + .build() + ) \ + .build() + store = InMemoryFeatureStore() + store.init({FEATURES: {feature.key: feature.to_json_dict()}}) + client = make_client(store) + assert 'value' == client.variation(feature.key, user, default='default') + e = get_first_event(client) + assert isinstance(e, EventInputEvaluation) + assert (e.key == feature.key and + e.flag == feature and + e.user == user and + e.value == 'value' and + e.variation == 0 and + e.reason == { 'kind': 'RULE_MATCH', 'ruleIndex': 0, 'ruleId': 'rule_id' } and + e.default_value == 'default' and + e.track_events is True) + + +def test_event_for_existing_feature_with_untracked_rule(): + feature = FlagBuilder('feature.key').version(100).on(True).variations('value') \ + .rules( + FlagRuleBuilder().variation(0).id('rule_id') \ + .clauses(make_clause(None, 'key', 'in', user['key'])) \ + .build() + ) \ + .build() + store = InMemoryFeatureStore() + store.init({FEATURES: {feature.key: feature.to_json_dict()}}) + client = make_client(store) + assert 'value' == client.variation(feature.key, user, default='default') + e = get_first_event(client) + assert isinstance(e, EventInputEvaluation) + assert (e.key == feature.key and + e.flag == feature and + e.user == user and + e.value == 'value' and + e.variation == 0 and + e.reason is None and + e.default_value == 'default' and + e.track_events is False) + + +def test_event_for_existing_feature_with_tracked_fallthrough(): + feature = FlagBuilder('feature.key').version(100).on(True).variations('value') \ + .fallthrough_variation(0).track_events_fallthrough(True) \ + .build() + store = InMemoryFeatureStore() + store.init({FEATURES: {feature.key: feature.to_json_dict()}}) + client = make_client(store) + assert 'value' == client.variation(feature.key, user, default='default') + e = get_first_event(client) + assert isinstance(e, EventInputEvaluation) + assert (e.key == feature.key and + e.flag == feature and + e.user == user and + e.value == 'value' and + e.variation == 0 and + e.reason == { 'kind': 'FALLTHROUGH' } and + e.default_value == 'default' and + e.track_events is True) + + +def test_event_for_existing_feature_with_untracked_fallthrough(): + feature = FlagBuilder('feature.key').version(100).on(True).variations('value') \ + .fallthrough_variation(0) \ + .build() + store = InMemoryFeatureStore() + store.init({FEATURES: {feature.key: feature.to_json_dict()}}) + client = make_client(store) + detail = client.variation_detail(feature.key, user, default='default') + assert 'value' == detail.value + e = get_first_event(client) + assert isinstance(e, EventInputEvaluation) + assert (e.key == feature.key and + e.flag == feature and + e.user == user and + e.value == 'value' and + e.variation == 0 and + e.reason == { 'kind': 'FALLTHROUGH' } and + e.default_value == 'default' and + e.track_events is False) + + +def test_event_for_unknown_feature(): + store = InMemoryFeatureStore() + store.init({FEATURES: {}}) + with make_client(store) as client: + assert 'default' == client.variation('feature.key', user, default='default') + e = get_first_event(client) + assert isinstance(e, EventInputEvaluation) + assert (e.key == 'feature.key' and + e.flag is None and + e.user == user and + e.value == 'default' and + e.variation is None and + e.reason is None and + e.default_value == 'default' and + e.track_events is False) + + +def test_no_event_for_existing_feature_with_no_user(): + feature = build_off_flag_with_value('feature.key', 'value').track_events(True).build() + store = InMemoryFeatureStore() + store.init({FEATURES: {feature.key: feature.to_json_dict()}}) + with make_client(store) as client: + assert 'default' == client.variation(feature.key, None, default='default') + assert count_events(client) == 0 + + +def test_no_event_for_existing_feature_with_invalid_context(): + feature = build_off_flag_with_value('feature.key', 'value').track_events(True).build() + store = InMemoryFeatureStore() + store.init({FEATURES: {feature.key: feature.to_json_dict()}}) + with make_client(store) as client: + bad_context = Context.create('') + assert 'default' == client.variation('feature.key', bad_context, default='default') + assert count_events(client) == 0 From b5ba0cb401287d715a38e4198acf9506a2c297ff Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 21 Dec 2022 10:36:53 -0800 Subject: [PATCH 331/356] add module init file --- ldclient/impl/events/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 ldclient/impl/events/__init__.py diff --git a/ldclient/impl/events/__init__.py b/ldclient/impl/events/__init__.py new file mode 100644 index 00000000..e69de29b From 39e3ad1c6d9f550b0fcc9866abe27af6427165d5 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 21 Dec 2022 10:50:51 -0800 Subject: [PATCH 332/356] linting --- ldclient/client.py | 3 ++- ldclient/impl/evaluator.py | 2 +- ldclient/impl/events/event_processor.py | 24 ++++++++++------- ldclient/impl/events/types.py | 35 ++++++++++++------------- 4 files changed, 34 insertions(+), 30 deletions(-) diff --git a/ldclient/client.py b/ldclient/client.py index 103021c5..9ed4de48 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -190,7 +190,8 @@ def __exit__(self, type, value, traceback): def _send_event(self, event): self._event_processor.send_event(event) - def track(self, event_name: str, context: Union[dict, Context], data: Optional[Any]=None, metric_value: Optional[AnyNum]=None): + def track(self, event_name: str, context: Union[dict, Context], data: Optional[Any]=None, + metric_value: Optional[AnyNum]=None): """Tracks that an application-defined event occurred. This method creates a "custom" analytics event containing the specified event name (key) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index e21e9d2e..e2c80359 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -59,7 +59,7 @@ def __init__(self): self.prereq_stack = None # type: Optional[List[str]] self.segment_stack = None # type: Optional[List[str]] - def add_event(self, event: dict): + def add_event(self, event: EventInputEvaluation): if self.events is None: self.events = [] self.events.append(event) diff --git a/ldclient/impl/events/event_processor.py b/ldclient/impl/events/event_processor.py index 01a121ce..650ab462 100644 --- a/ldclient/impl/events/event_processor.py +++ b/ldclient/impl/events/event_processor.py @@ -7,7 +7,7 @@ from email.utils import parsedate import json from threading import Event, Lock, Thread -from typing import Any, List +from typing import Any, List, Optional import time import uuid import queue @@ -302,31 +302,35 @@ def _process_event(self, event: EventInput): # Decide whether to add the event to the payload. Feature events may be added twice, once for # the event (if tracked) and once for debugging. - user = event.user # type: dict + user = None # type: Optional[dict] can_add_index = True - full_event = None - debug_event = None + full_event = None # type: Any + debug_event = None # type: Optional[DebugEvent] if isinstance(event, EventInputEvaluation): + user = event.user self._outbox.add_to_summary(event) if event.track_events: full_event = event if self._should_debug_event(event): debug_event = DebugEvent(event) elif isinstance(event, EventInputIdentify): + user = event.user full_event = event can_add_index = False # an index event would be redundant if there's an identify event elif isinstance(event, EventInputCustom): + user = event.user full_event = event # For each user we haven't seen before, we add an index event - unless this is already # an identify event. - already_seen = self._user_keys.put(user['key'], True) - if can_add_index: - if already_seen: - self._deduplicated_users += 1 - else: - self._outbox.add_event(IndexEvent(event.timestamp, user)) + if user is not None: + already_seen = self._user_keys.put(user['key'], True) + if can_add_index: + if already_seen: + self._deduplicated_users += 1 + else: + self._outbox.add_event(IndexEvent(event.timestamp, user)) if full_event: self._outbox.add_event(full_event) diff --git a/ldclient/impl/events/types.py b/ldclient/impl/events/types.py index 5aee18d6..1778261c 100644 --- a/ldclient/impl/events/types.py +++ b/ldclient/impl/events/types.py @@ -1,8 +1,10 @@ from ldclient.evaluation import EvaluationDetail -from ldclient.impl.model import * +from ldclient.impl import AnyNum +from ldclient.impl.model import FeatureFlag from ldclient.impl.util import current_time_millis -from typing import Callable, Optional +import json +from typing import Any, Callable, Optional, Union # These event types are not the event data that is sent to LaunchDarkly; they're the input # parameters that are passed to EventProcessor, which translates them into event data (for @@ -11,6 +13,12 @@ # than dictionaries. class EventInput: + __slots__ = ['timestamp', 'user'] + + def __init__(self, timestamp: int, user: dict): + self.timestamp = timestamp + self.user = user + def __repr__(self) -> str: # used only in test debugging return "%s(%s)" % (self.__class__.__name__, json.dumps(self.to_debugging_dict())) @@ -21,14 +29,12 @@ def to_debugging_dict(self) -> dict: pass class EventInputEvaluation(EventInput): - __slots__ = ['timestamp', 'context', 'key', 'variation', 'value', 'reason', 'default_value', - 'prereq_of', 'track_events'] + __slots__ = ['key', 'flag', 'variation', 'value', 'reason', 'default_value', 'prereq_of', 'track_events'] def __init__(self, timestamp: int, user: dict, key: str, flag: Optional[FeatureFlag], variation: Optional[int], value: Any, reason: Optional[dict], default_value: Any, prereq_of: Optional[FeatureFlag] = None, track_events: bool = False): - self.timestamp = timestamp - self.user = user + super().__init__(timestamp, user) self.key = key self.flag = flag self.variation = variation @@ -53,12 +59,6 @@ def to_debugging_dict(self) -> dict: } class EventInputIdentify(EventInput): - __slots__ = ['timestamp', 'context'] - - def __init__(self, timestamp: int, user: dict): - self.timestamp = timestamp - self.user = user - def to_debugging_dict(self) -> dict: return { "timestamp": self.timestamp, @@ -66,14 +66,13 @@ def to_debugging_dict(self) -> dict: } class EventInputCustom(EventInput): - __slots__ = ['timestamp', 'user', 'key', 'data', 'metric_value'] + __slots__ = ['key', 'data', 'metric_value'] - def __init__(self, timestamp: int, user: dict, key: str, data: Any = None, metric_value: Optional[float] = None): - self.timestamp = timestamp - self.user = user + def __init__(self, timestamp: int, user: dict, key: str, data: Any = None, metric_value: Optional[AnyNum] = None): + super().__init__(timestamp, user) self.key = key self.data = data - self.metric_value = metric_value + self.metric_value = metric_value # type: Optional[int|float|complex] def to_debugging_dict(self) -> dict: return { @@ -148,7 +147,7 @@ def new_identify_event(self, user: dict) -> EventInputIdentify: user ) - def new_custom_event(self, event_name: str, user: dict, data: Any, metric_value: Optional[float]) \ + def new_custom_event(self, event_name: str, user: dict, data: Any, metric_value: Optional[AnyNum]) \ -> EventInputCustom: return EventInputCustom( self._timestamp_fn(), From a6b9f05090e0c88c3c3c3249859b2a824068a55e Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 21 Dec 2022 12:31:43 -0800 Subject: [PATCH 333/356] fix prereq stack logic --- ldclient/impl/evaluator.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index fc1a6d1f..d08f286e 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -107,6 +107,7 @@ def __init__( def evaluate(self, flag: FeatureFlag, context: Context, event_factory: _EventFactory) -> EvalResult: state = EvalResult() + state.original_flag_key = flag.key try: state.detail = self._evaluate(flag, context, state, event_factory) except EvaluationException as e: @@ -160,7 +161,6 @@ def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalR for prereq in flag.prerequisites: prereq_key = prereq.key if (prereq_key == state.original_flag_key or - (flag_key != state.original_flag_key and prereq_key == flag_key) or (state.prereq_stack is not None and prereq.key in state.prereq_stack)): raise EvaluationException(('prerequisite relationship to "%s" caused a circular reference;' + ' this is probably a temporary condition due to an incomplete update') % prereq_key) @@ -181,7 +181,7 @@ def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalR return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key} return None finally: - if state.prereq_stack is not None and state.prereq_stack.count != 0: + if state.prereq_stack is not None and len(state.prereq_stack) != 0: state.prereq_stack.pop() def _check_targets(self, flag: FeatureFlag, context: Context) -> Optional[EvaluationDetail]: From 42af25ade7ad2188011176897bd15fd7f0c8155f Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 21 Dec 2022 13:18:06 -0800 Subject: [PATCH 334/356] (U2C 17) U2C changes for events, not including private attributes (#205) --- Makefile | 9 +- contract-tests/service.py | 8 +- ldclient/client.py | 19 ++- ldclient/impl/evaluator.py | 28 +--- ldclient/impl/events/event_processor.py | 119 ++++++++-------- ldclient/impl/events/event_summarizer.py | 74 ++++++++-- ldclient/impl/events/types.py | 43 +++--- testing/impl/events/test_event_processor.py | 135 ++++++++++--------- testing/impl/events/test_event_summarizer.py | 23 ++-- testing/impl/test_evaluator_prerequisites.py | 12 +- testing/test_ldclient_events.py | 50 +++---- 11 files changed, 283 insertions(+), 237 deletions(-) diff --git a/Makefile b/Makefile index ce9186b3..a34ca195 100644 --- a/Makefile +++ b/Makefile @@ -20,9 +20,14 @@ TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # TEST_HARNESS_PARAMS can be set to add -skip parameters for any contract tests that cannot yet pass # Explanation of current skips: -# - "events": These test suites will be unavailable until more of the U2C implementation is done. +# - "events/context properties/allAttributesPrivate": private attribute redaction is not yet implemented +# - "events/context properties/specific private attributes": private attribute redaction is not yet implemented +# - "events/context properties/private attribute nested": private attribute redaction is not yet implemented TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ - -skip 'events' + -skip 'events/context properties/allAttributesPrivate' \ + -skip 'events/context properties/specific private attributes' \ + -skip 'events/context properties/private attribute nested' + # port 8000 and 9000 is already used in the CI environment because we're # running a DynamoDB container and an SSE contract test diff --git a/contract-tests/service.py b/contract-tests/service.py index c83a4b8e..e455d8ad 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -12,6 +12,7 @@ default_port = 8000 + # logging configuration dictConfig({ 'version': 1, @@ -30,11 +31,10 @@ 'level': 'INFO', 'handlers': ['console'] }, - 'ldclient.util': { - 'level': 'INFO', - 'handlers': ['console'] - }, 'loggers': { + 'ldclient': { + 'level': 'INFO', # change to 'DEBUG' to enable SDK debug logging + }, 'werkzeug': { 'level': 'ERROR' } # disable irrelevant Flask app logging } }) diff --git a/ldclient/client.py b/ldclient/client.py index 9ed4de48..fb5a1688 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -18,7 +18,7 @@ from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.evaluation import EvaluationDetail, FeatureFlagsState from ldclient.impl.big_segments import BigSegmentStoreManager -from ldclient.impl.evaluator import Evaluator, error_reason, _context_to_user_dict +from ldclient.impl.evaluator import Evaluator, error_reason from ldclient.impl.events.event_processor import DefaultEventProcessor from ldclient.impl.events.types import EventFactory from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor @@ -213,7 +213,7 @@ def track(self, event_name: str, context: Union[dict, Context], data: Optional[A log.warning("Invalid context for track (%s)" % context.error) else: self._send_event(self._event_factory_default.new_custom_event(event_name, - _context_to_user_dict(context), data, metric_value)) + context, data, metric_value)) def identify(self, context: Union[Context, dict]): """Reports details about an evaluation context. @@ -237,7 +237,7 @@ def identify(self, context: Union[Context, dict]): # but an identify event with an empty key is no good. log.warning("Empty user key for identify") else: - self._send_event(self._event_factory_default.new_identify_event(_context_to_user_dict(context))) + self._send_event(self._event_factory_default.new_identify_event(context)) def is_offline(self) -> bool: """Returns true if the client is in offline mode. @@ -299,9 +299,6 @@ def _evaluate_internal(self, key: str, context: Union[Context, dict], default: A if self._config.offline: return EvaluationDetail(default, None, error_reason('CLIENT_NOT_READY')) - user = context if isinstance(context, dict) or context is None \ - else _context_to_user_dict(context) # temporary until the event processor is updated to use contexts - if not self.is_initialized(): if self._store.initialized: log.warning("Feature Flag evaluation attempted before client has initialized - using last known values from feature store for feature key: " + key) @@ -309,7 +306,7 @@ def _evaluate_internal(self, key: str, context: Union[Context, dict], default: A log.warning("Feature Flag evaluation attempted before client has initialized! Feature store unavailable - returning default: " + str(default) + " for feature key: " + key) reason = error_reason('CLIENT_NOT_READY') - self._send_event(event_factory.new_unknown_flag_event(key, user, default, reason)) + self._send_event(event_factory.new_unknown_flag_event(key, context, default, reason)) return EvaluationDetail(default, None, reason) if not isinstance(context, Context): @@ -324,11 +321,11 @@ def _evaluate_internal(self, key: str, context: Union[Context, dict], default: A log.error("Unexpected error while retrieving feature flag \"%s\": %s" % (key, repr(e))) log.debug(traceback.format_exc()) reason = error_reason('EXCEPTION') - self._send_event(event_factory.new_unknown_flag_event(key, user, default, reason)) + self._send_event(event_factory.new_unknown_flag_event(key, context, default, reason)) return EvaluationDetail(default, None, reason) if not flag: reason = error_reason('FLAG_NOT_FOUND') - self._send_event(event_factory.new_unknown_flag_event(key, user, default, reason)) + self._send_event(event_factory.new_unknown_flag_event(key, context, default, reason)) return EvaluationDetail(default, None, reason) else: try: @@ -338,13 +335,13 @@ def _evaluate_internal(self, key: str, context: Union[Context, dict], default: A detail = result.detail if detail.is_default_value(): detail = EvaluationDetail(default, None, detail.reason) - self._send_event(event_factory.new_eval_event(flag, user, detail, default)) + self._send_event(event_factory.new_eval_event(flag, context, detail, default)) return detail except Exception as e: log.error("Unexpected error while evaluating feature flag \"%s\": %s" % (key, repr(e))) log.debug(traceback.format_exc()) reason = error_reason('EXCEPTION') - self._send_event(event_factory.new_default_event(flag, user, default, reason)) + self._send_event(event_factory.new_default_event(flag, context, default, reason)) return EvaluationDetail(default, None, reason) def all_flags_state(self, context: Union[Context, dict], **kwargs) -> FeatureFlagsState: diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 4193b051..3ad8dae6 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -1,5 +1,5 @@ from ldclient import operators -from ldclient.context import Context, _USER_STRING_ATTRS +from ldclient.context import Context from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail from ldclient.impl.events.types import EventFactory, EventInputEvaluation from ldclient.impl.model import * @@ -18,30 +18,6 @@ "firstName", "lastName", "avatar", "name", "anonymous"] -def _context_to_user_dict(context: Context) -> dict: - # temporary helper to allow us to update some parts of the SDK to use Context while others are - # still using the user model - ret = {'key': context.key} # type: Dict[str, Any] - if context.name is not None: - ret['name'] = context.name - if context.anonymous: - ret['anonymous'] = True - custom = None - for attr in context.custom_attributes: - if attr in _USER_STRING_ATTRS: - ret[attr] = context.get(attr) - continue - if custom is None: - custom = {} - custom[attr] = context.get(attr) - if custom is not None: - ret['custom'] = custom - private = list(context.private_attributes) - if len(private) != 0: - ret['privateAttributeNames'] = private - return ret - - # EvalResult is used internally to hold the EvaluationDetail result of an evaluation along with # other side effects that are not exposed to the application, such as events generated by # prerequisite evaluations, and the cached state of any Big Segments query that we may have @@ -178,7 +154,7 @@ def _check_prerequisites(self, flag: FeatureFlag, context: Context, state: EvalR # off variation was. But we still need to evaluate it in order to generate an event. if (not prereq_flag.on) or prereq_res.variation_index != prereq.variation: failed_prereq = prereq - event = event_factory.new_eval_event(prereq_flag, _context_to_user_dict(context), prereq_res, None, flag) + event = event_factory.new_eval_event(prereq_flag, context, prereq_res, None, flag) state.add_event(event) if failed_prereq: return {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': failed_prereq.key} diff --git a/ldclient/impl/events/event_processor.py b/ldclient/impl/events/event_processor.py index 650ab462..57cf141f 100644 --- a/ldclient/impl/events/event_processor.py +++ b/ldclient/impl/events/event_processor.py @@ -13,6 +13,7 @@ import queue import urllib3 +from ldclient.context import Context from ldclient.diagnostics import create_diagnostic_init from ldclient.impl.events.event_summarizer import EventSummarizer, EventSummary from ldclient.fixed_thread_pool import FixedThreadPool @@ -26,8 +27,7 @@ from ldclient.util import check_if_error_is_recoverable_and_log, is_http_error_recoverable, log, _headers __MAX_FLUSH_THREADS__ = 5 -__CURRENT_EVENT_SCHEMA__ = 3 -__USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__ = [ "key", "secondary", "ip", "country", "email", "firstName", "lastName", "avatar", "name" ] +__CURRENT_EVENT_SCHEMA__ = 4 EventProcessorMessage = namedtuple('EventProcessorMessage', ['type', 'param']) @@ -40,11 +40,11 @@ def __init__(self, original_input: EventInputEvaluation): self.original_input = original_input class IndexEvent: - __slots__ = ['timestamp', 'user'] + __slots__ = ['timestamp', 'context'] - def __init__(self, timestamp: int, user: dict): + def __init__(self, timestamp: int, context: Context): self.timestamp = timestamp - self.user = user + self.context = context class EventOutputFormatter: @@ -53,38 +53,37 @@ def __init__(self, config): def make_output_events(self, events: List[Any], summary: EventSummary): events_out = [ self.make_output_event(e) for e in events ] - if len(summary.counters) > 0: + if not summary.is_empty(): events_out.append(self.make_summary_event(summary)) return events_out def make_output_event(self, e: Any): if isinstance(e, EventInputEvaluation): out = self._base_eval_props(e, 'feature') - out['userKey'] = e.user['key'] + out['contextKeys'] = self._context_keys(e.context) return out elif isinstance(e, DebugEvent): out = self._base_eval_props(e.original_input, 'debug') - out['user'] = self._process_user(e.original_input.user) + out['context'] = self._process_context(e.original_input.context) return out elif isinstance(e, EventInputIdentify): return { 'kind': 'identify', 'creationDate': e.timestamp, - 'key': e.user['key'], - 'user': self._process_user(e.user) + 'context': self._process_context(e.context) } elif isinstance(e, IndexEvent): return { 'kind': 'index', 'creationDate': e.timestamp, - 'user': self._process_user(e.user) + 'context': self._process_context(e.context) } elif isinstance(e, EventInputCustom): out = { 'kind': 'custom', 'creationDate': e.timestamp, 'key': e.key, - 'userKey': e.user['key'] + 'contextKeys': self._context_keys(e.context) } if e.data is not None: out['data'] = e.data @@ -98,23 +97,24 @@ def make_output_event(self, e: Any): """ def make_summary_event(self, summary: EventSummary): flags_out = dict() # type: dict[str, Any] - for ckey, cval in summary.counters.items(): - flag_key, variation, version = ckey - flag_data = flags_out.get(flag_key) - if flag_data is None: - flag_data = { 'default': cval['default'], 'counters': [] } - flags_out[flag_key] = flag_data - counter = { - 'count': cval['count'], - 'value': cval['value'] - } - if variation is not None: - counter['variation'] = variation - if version is None: - counter['unknown'] = True - else: - counter['version'] = version - flag_data['counters'].append(counter) + for key, flag_data in summary.flags.items(): + flag_data_out = {'default': flag_data.default, 'contextKinds': list(flag_data.context_kinds)} + counters = [] # type: list[dict[str, Any]] + for ckey, cval in flag_data.counters.items(): + variation, version = ckey + counter = { + 'count': cval.count, + 'value': cval.value + } + if variation is not None: + counter['variation'] = variation + if version is None: + counter['unknown'] = True + else: + counter['version'] = version + counters.append(counter) + flag_data_out['counters'] = counters + flags_out[key] = flag_data_out return { 'kind': 'summary', 'startDate': summary.start_date, @@ -122,8 +122,17 @@ def make_summary_event(self, summary: EventSummary): 'features': flags_out } - def _process_user(self, user: dict): - return self._user_filter.filter_user_props(user) + def _process_context(self, context: Context): + # TODO: implement context redaction + return context.to_dict() + + def _context_keys(self, context: Context): + out = {} + for i in range(context.individual_context_count): + c = context.get_individual_context(i) + if c is not None: + out[c.kind] = c.key + return out def _base_eval_props(self, e: EventInputEvaluation, kind: str) -> dict: out = { @@ -140,7 +149,7 @@ def _base_eval_props(self, e: EventInputEvaluation, kind: str) -> dict: if e.reason is not None: out['reason'] = e.reason if e.prereq_of is not None: - out['prereqOf'] = e.prereq_of + out['prereqOf'] = e.prereq_of.key return out @@ -156,7 +165,7 @@ def run(self): try: output_events = self._formatter.make_output_events(self._payload.events, self._payload.summary) resp = self._do_send(output_events) - except Exception: + except Exception as e: log.warning( 'Unhandled exception in event processor. Analytics events were not processed.', exc_info=True) @@ -164,7 +173,7 @@ def run(self): def _do_send(self, output_events): # noinspection PyBroadException try: - json_body = json.dumps(output_events) + json_body = json.dumps(output_events, separators=(',',':')) log.debug('Sending events payload: ' + json_body) payload_id = str(uuid.uuid4()) r = _post_events_with_retry( @@ -252,10 +261,10 @@ def __init__(self, inbox, config, http_client, diagnostic_accumulator=None): self._close_http = (http_client is None) # so we know whether to close it later self._disabled = False self._outbox = EventBuffer(config.events_max_pending) - self._user_keys = SimpleLRUCache(config.user_keys_capacity) + self._context_keys = SimpleLRUCache(config.user_keys_capacity) self._formatter = EventOutputFormatter(config) self._last_known_past_time = 0 - self._deduplicated_users = 0 + self._deduplicated_contexts = 0 self._diagnostic_accumulator = None if config.diagnostic_opt_out else diagnostic_accumulator self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__, "ldclient.flush") @@ -280,8 +289,8 @@ def _run_main_loop(self): self._process_event(message.param) elif message.type == 'flush': self._trigger_flush() - elif message.type == 'flush_users': - self._user_keys.clear() + elif message.type == 'flush_contexts': + self._context_keys.clear() elif message.type == 'diagnostic': self._send_and_reset_diagnostics() elif message.type == 'test_sync': @@ -302,35 +311,35 @@ def _process_event(self, event: EventInput): # Decide whether to add the event to the payload. Feature events may be added twice, once for # the event (if tracked) and once for debugging. - user = None # type: Optional[dict] + context = None # type: Optional[Context] can_add_index = True full_event = None # type: Any debug_event = None # type: Optional[DebugEvent] if isinstance(event, EventInputEvaluation): - user = event.user + context = event.context self._outbox.add_to_summary(event) if event.track_events: full_event = event if self._should_debug_event(event): debug_event = DebugEvent(event) elif isinstance(event, EventInputIdentify): - user = event.user + context = event.context full_event = event can_add_index = False # an index event would be redundant if there's an identify event elif isinstance(event, EventInputCustom): - user = event.user + context = event.context full_event = event - # For each user we haven't seen before, we add an index event - unless this is already + # For each context we haven't seen before, we add an index event - unless this is already # an identify event. - if user is not None: - already_seen = self._user_keys.put(user['key'], True) + if context is not None: + already_seen = self._context_keys.put(context.fully_qualified_key, True) if can_add_index: if already_seen: - self._deduplicated_users += 1 + self._deduplicated_contexts += 1 else: - self._outbox.add_event(IndexEvent(event.timestamp, user)) + self._outbox.add_event(IndexEvent(event.timestamp, context)) if full_event: self._outbox.add_event(full_event) @@ -354,7 +363,7 @@ def _trigger_flush(self): payload = self._outbox.get_payload() if self._diagnostic_accumulator: self._diagnostic_accumulator.record_events_in_batch(len(payload.events)) - if len(payload.events) > 0 or len(payload.summary.counters) > 0: + if len(payload.events) > 0 or not payload.summary.is_empty(): task = EventPayloadSendTask(self._http, self._config, self._formatter, payload, self._handle_response) if self._flush_workers.execute(task.run): @@ -378,8 +387,8 @@ def _handle_response(self, r): def _send_and_reset_diagnostics(self): if self._diagnostic_accumulator is not None: dropped_event_count = self._outbox.get_and_clear_dropped_count() - stats_event = self._diagnostic_accumulator.create_event_and_reset(dropped_event_count, self._deduplicated_users) - self._deduplicated_users = 0 + stats_event = self._diagnostic_accumulator.create_event_and_reset(dropped_event_count, self._deduplicated_contexts) + self._deduplicated_contexts = 0 task = DiagnosticEventSendTask(self._http, self._config, stats_event) self._diagnostic_flush_workers.execute(task.run) @@ -395,9 +404,9 @@ def __init__(self, config, http=None, dispatcher_class=None, diagnostic_accumula self._inbox = queue.Queue(config.events_max_pending) self._inbox_full = False self._flush_timer = RepeatingTask(config.flush_interval, config.flush_interval, self.flush) - self._users_flush_timer = RepeatingTask(config.user_keys_flush_interval, config.user_keys_flush_interval, self._flush_users) + self._contexts_flush_timer = RepeatingTask(config.user_keys_flush_interval, config.user_keys_flush_interval, self._flush_contexts) self._flush_timer.start() - self._users_flush_timer.start() + self._contexts_flush_timer.start() if diagnostic_accumulator is not None: self._diagnostic_event_timer = RepeatingTask(config.diagnostic_recording_interval, config.diagnostic_recording_interval, self._send_diagnostic) @@ -422,7 +431,7 @@ def stop(self): return self._closed = True self._flush_timer.stop() - self._users_flush_timer.stop() + self._contexts_flush_timer.stop() if self._diagnostic_event_timer: self._diagnostic_event_timer.stop() self.flush() @@ -439,8 +448,8 @@ def _post_to_inbox(self, message): self._inbox_full = True log.warning("Events are being produced faster than they can be processed; some events will be dropped") - def _flush_users(self): - self._inbox.put(EventProcessorMessage('flush_users', None)) + def _flush_contexts(self): + self._inbox.put(EventProcessorMessage('flush_contexts', None)) def _send_diagnostic(self): self._inbox.put(EventProcessorMessage('diagnostic', None)) diff --git a/ldclient/impl/events/event_summarizer.py b/ldclient/impl/events/event_summarizer.py index c144a4d3..1577d278 100644 --- a/ldclient/impl/events/event_summarizer.py +++ b/ldclient/impl/events/event_summarizer.py @@ -1,33 +1,85 @@ """ Implementation details of the analytics event delivery component. """ -# currently excluded from documentation - see docs/README.md from collections import namedtuple +from typing import Any, Dict, List, Optional, Set, Tuple from ldclient.impl.events.types import EventInputEvaluation -EventSummary = namedtuple('EventSummary', ['start_date', 'end_date', 'counters']) +class EventSummaryCounter: + __slots__ = ['count', 'value'] + + def __init__(self, count: int, value: Any): + self.count = count + self.value = value + + def __eq__(self, other: Any) -> bool: # used only in tests + return isinstance(other, EventSummaryCounter) and \ + other.count == self.count and other.value == self.value + + def __repr__(self) -> str: # used only in test debugging + return "EventSummaryCounter(%d, %s)" % (self.count, self.value) + + +class EventSummaryFlag: + __slots__ = ['context_kinds', 'default', 'counters'] + + def __init__(self, context_kinds: Set[str], default: Any, counters: Dict[Tuple[Optional[int], Optional[int]], EventSummaryCounter]): + self.context_kinds = context_kinds + self.counters = counters + self.default = default + + def __eq__(self, other: Any) -> bool: # used only in tests + return isinstance(other, EventSummaryFlag) and \ + other.context_kinds == self.context_kinds and other.counters == self.counters and other.default == self.default + + def __repr__(self) -> str: # used only in test debugging + return "EventSummaryFlag(%s, %s, %s)" % (self.context_kinds, self.counters, self.default) + + +class EventSummary: + __slots__ = ['start_date', 'end_date', 'flags'] + + def __init__(self, start_date: int, end_date: int, flags: Dict[str, EventSummaryFlag]): + self.start_date = start_date + self.end_date = end_date + self.flags = flags + + def is_empty(self) -> bool: + return len(self.flags) == 0 class EventSummarizer: def __init__(self): self.start_date = 0 self.end_date = 0 - self.counters = dict() + self.flags = dict() # type: Dict[str, EventSummaryFlag] """ Add this event to our counters, if it is a type of event we need to count. """ def summarize_event(self, event: EventInputEvaluation): - counter_key = (event.key, event.variation, None if event.flag is None else event.flag.version) - counter_val = self.counters.get(counter_key) - if counter_val is None: - counter_val = { 'count': 1, 'value': event.value, 'default': event.default_value } - self.counters[counter_key] = counter_val + flag_data = self.flags.get(event.key) + if flag_data is None: + flag_data = EventSummaryFlag(set(), event.default_value, dict()) + self.flags[event.key] = flag_data + + context = event.context + for i in range(context.individual_context_count): + c = context.get_individual_context(i) + if c is not None: + flag_data.context_kinds.add(c.kind) + + counter_key = (event.variation, None if event.flag is None else event.flag.version) + counter = flag_data.counters.get(counter_key) + if counter is None: + counter = EventSummaryCounter(1, event.value) + flag_data.counters[counter_key] = counter else: - counter_val['count'] = counter_val['count'] + 1 + counter.count += 1 + date = event.timestamp if self.start_date == 0 or date < self.start_date: self.start_date = date @@ -38,9 +90,9 @@ def summarize_event(self, event: EventInputEvaluation): Return the current summarized event data. """ def snapshot(self): - return EventSummary(start_date = self.start_date, end_date = self.end_date, counters = self.counters) + return EventSummary(start_date = self.start_date, end_date = self.end_date, flags = self.flags) def clear(self): self.start_date = 0 self.end_date = 0 - self.counters = dict() + self.flags = dict() diff --git a/ldclient/impl/events/types.py b/ldclient/impl/events/types.py index 1778261c..0526a7db 100644 --- a/ldclient/impl/events/types.py +++ b/ldclient/impl/events/types.py @@ -1,10 +1,11 @@ +from ldclient.context import Context from ldclient.evaluation import EvaluationDetail from ldclient.impl import AnyNum from ldclient.impl.model import FeatureFlag from ldclient.impl.util import current_time_millis import json -from typing import Any, Callable, Optional, Union +from typing import Any, Callable, Optional # These event types are not the event data that is sent to LaunchDarkly; they're the input # parameters that are passed to EventProcessor, which translates them into event data (for @@ -13,11 +14,11 @@ # than dictionaries. class EventInput: - __slots__ = ['timestamp', 'user'] + __slots__ = ['timestamp', 'context'] - def __init__(self, timestamp: int, user: dict): + def __init__(self, timestamp: int, context: Context): self.timestamp = timestamp - self.user = user + self.context = context def __repr__(self) -> str: # used only in test debugging return "%s(%s)" % (self.__class__.__name__, json.dumps(self.to_debugging_dict())) @@ -31,10 +32,10 @@ def to_debugging_dict(self) -> dict: class EventInputEvaluation(EventInput): __slots__ = ['key', 'flag', 'variation', 'value', 'reason', 'default_value', 'prereq_of', 'track_events'] - def __init__(self, timestamp: int, user: dict, key: str, flag: Optional[FeatureFlag], + def __init__(self, timestamp: int, context: Context, key: str, flag: Optional[FeatureFlag], variation: Optional[int], value: Any, reason: Optional[dict], default_value: Any, prereq_of: Optional[FeatureFlag] = None, track_events: bool = False): - super().__init__(timestamp, user) + super().__init__(timestamp, context) self.key = key self.flag = flag self.variation = variation @@ -47,7 +48,7 @@ def __init__(self, timestamp: int, user: dict, key: str, flag: Optional[FeatureF def to_debugging_dict(self) -> dict: return { "timestamp": self.timestamp, - "user": self.user, + "context": self.context.to_dict(), "key": self.key, "flag": {"key": self.flag.key} if self.flag else None, "variation": self.variation, @@ -62,14 +63,14 @@ class EventInputIdentify(EventInput): def to_debugging_dict(self) -> dict: return { "timestamp": self.timestamp, - "user": self.user + "context": self.context.to_dict() } class EventInputCustom(EventInput): __slots__ = ['key', 'data', 'metric_value'] - def __init__(self, timestamp: int, user: dict, key: str, data: Any = None, metric_value: Optional[AnyNum] = None): - super().__init__(timestamp, user) + def __init__(self, timestamp: int, context: Context, key: str, data: Any = None, metric_value: Optional[AnyNum] = None): + super().__init__(timestamp, context) self.key = key self.data = data self.metric_value = metric_value # type: Optional[int|float|complex] @@ -77,7 +78,7 @@ def __init__(self, timestamp: int, user: dict, key: str, data: Any = None, metri def to_debugging_dict(self) -> dict: return { "timestamp": self.timestamp, - "user": self.user, + "context": self.context.to_dict(), "key": self.key, "data": self.data, "metric_value": self.metric_value @@ -95,12 +96,12 @@ def __init__(self, with_reasons: bool, timestamp_fn: Callable[[], int] = current self._with_reasons = with_reasons self._timestamp_fn = timestamp_fn - def new_eval_event(self, flag: FeatureFlag, user: dict, detail: EvaluationDetail, + def new_eval_event(self, flag: FeatureFlag, context: Context, detail: EvaluationDetail, default_value: Any, prereq_of_flag: Optional[FeatureFlag] = None) -> EventInputEvaluation: add_experiment_data = self.is_experiment(flag, detail.reason) return EventInputEvaluation( self._timestamp_fn(), - user, + context, flag.key, flag, detail.variation_index, @@ -111,11 +112,11 @@ def new_eval_event(self, flag: FeatureFlag, user: dict, detail: EvaluationDetail flag.track_events or add_experiment_data ) - def new_default_event(self, flag: FeatureFlag, user: dict, default_value: Any, + def new_default_event(self, flag: FeatureFlag, context: Context, default_value: Any, reason: Optional[dict]) -> EventInputEvaluation: return EventInputEvaluation( self._timestamp_fn(), - user, + context, flag.key, flag, None, @@ -126,11 +127,11 @@ def new_default_event(self, flag: FeatureFlag, user: dict, default_value: Any, flag.track_events ) - def new_unknown_flag_event(self, key: str, user: dict, default_value: Any, + def new_unknown_flag_event(self, key: str, context: Context, default_value: Any, reason: Optional[dict]) -> EventInputEvaluation: return EventInputEvaluation( self._timestamp_fn(), - user, + context, key, None, None, @@ -141,17 +142,17 @@ def new_unknown_flag_event(self, key: str, user: dict, default_value: Any, False ) - def new_identify_event(self, user: dict) -> EventInputIdentify: + def new_identify_event(self, context: Context) -> EventInputIdentify: return EventInputIdentify( self._timestamp_fn(), - user + context ) - def new_custom_event(self, event_name: str, user: dict, data: Any, metric_value: Optional[AnyNum]) \ + def new_custom_event(self, event_name: str, context: Context, data: Any, metric_value: Optional[AnyNum]) \ -> EventInputCustom: return EventInputCustom( self._timestamp_fn(), - user, + context, event_name, data, metric_value diff --git a/testing/impl/events/test_event_processor.py b/testing/impl/events/test_event_processor.py index 0c2180cf..140db8bd 100644 --- a/testing/impl/events/test_event_processor.py +++ b/testing/impl/events/test_event_processor.py @@ -4,6 +4,7 @@ import uuid from ldclient.config import Config +from ldclient.context import Context from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.impl.events.event_processor import DefaultEventProcessor from ldclient.impl.events.types import EventInput, EventInputCustom, EventInputEvaluation, EventInputIdentify @@ -14,12 +15,12 @@ default_config = Config("fake_sdk_key") -user = {'key': 'userkey', 'name': 'Red'} -user_key = user['key'] -filtered_user = { - 'key': 'userkey', - 'privateAttrs': [ 'name' ] -} +context = Context.builder('userkey').name('Red').build() +filtered_context = context.to_dict() # TODO: implement attribute redaction +# filtered_context = { +# 'key': 'userkey', +# 'privateAttrs': [ 'name' ] +# } flag = FlagBuilder('flagkey').version(2).build() timestamp = 10000 @@ -35,6 +36,15 @@ def teardown_function(): if ep is not None: ep.stop() +def make_context_keys(context: Context) -> dict: + ret = {} # type: dict[str, str] + for i in range(context.individual_context_count): + c = context.get_individual_context(i) + if c is not None: + ret[c.kind] = c.key + return ret + + class DefaultTestProcessor(DefaultEventProcessor): def __init__(self, **kwargs): if not 'diagnostic_opt_out' in kwargs: @@ -47,33 +57,25 @@ def __init__(self, **kwargs): def test_identify_event_is_queued(): with DefaultTestProcessor() as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + e = EventInputIdentify(timestamp, context) + ep.send_event(e) output = flush_and_get_events(ep) assert len(output) == 1 - assert output == [{ - 'kind': 'identify', - 'creationDate': timestamp, - 'key': user_key, - 'user': user - }] + check_identify_event(output[0], e) def test_context_is_filtered_in_identify_event(): with DefaultTestProcessor(all_attributes_private = True) as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + e = EventInputIdentify(timestamp, context) + ep.send_event(e) output = flush_and_get_events(ep) assert len(output) == 1 - assert output == [{ - 'kind': 'identify', - 'creationDate': timestamp, - 'key': user_key, - 'user': filtered_user - }] + check_identify_event(output[0], e, filtered_context) def test_individual_feature_event_is_queued_with_index_event(): with DefaultTestProcessor() as ep: - e = EventInputEvaluation(timestamp, user, flag.key, flag, 1, 'value', None, 'default', None, True) + e = EventInputEvaluation(timestamp, context, flag.key, flag, 1, 'value', None, 'default', None, True) ep.send_event(e) output = flush_and_get_events(ep) @@ -84,19 +86,19 @@ def test_individual_feature_event_is_queued_with_index_event(): def test_context_is_filtered_in_index_event(): with DefaultTestProcessor(all_attributes_private = True) as ep: - e = EventInputEvaluation(timestamp, user, flag.key, flag, 1, 'value', None, 'default', None, True) + e = EventInputEvaluation(timestamp, context, flag.key, flag, 1, 'value', None, 'default', None, True) ep.send_event(e) output = flush_and_get_events(ep) assert len(output) == 3 - check_index_event(output[0], e, filtered_user) + check_index_event(output[0], e, filtered_context) check_feature_event(output[1], e) check_summary_event(output[2]) def test_two_events_for_same_context_only_produce_one_index_event(): with DefaultTestProcessor(user_keys_flush_interval = 300) as ep: - e0 = EventInputEvaluation(timestamp, user, flag.key, flag, 1, 'value1', None, 'default', None, True) - e1 = EventInputEvaluation(timestamp, user, flag.key, flag, 2, 'value2', None, 'default', None, True) + e0 = EventInputEvaluation(timestamp, context, flag.key, flag, 1, 'value1', None, 'default', None, True) + e1 = EventInputEvaluation(timestamp, context, flag.key, flag, 2, 'value2', None, 'default', None, True) ep.send_event(e0) ep.send_event(e1) @@ -109,8 +111,8 @@ def test_two_events_for_same_context_only_produce_one_index_event(): def test_new_index_event_is_added_if_context_cache_has_been_cleared(): with DefaultTestProcessor(user_keys_flush_interval = 0.1) as ep: - e0 = EventInputEvaluation(timestamp, user, flag.key, flag, 1, 'value1', None, 'default', None, True) - e1 = EventInputEvaluation(timestamp, user, flag.key, flag, 2, 'value2', None, 'default', None, True) + e0 = EventInputEvaluation(timestamp, context, flag.key, flag, 1, 'value1', None, 'default', None, True) + e1 = EventInputEvaluation(timestamp, context, flag.key, flag, 2, 'value2', None, 'default', None, True) ep.send_event(e0) time.sleep(0.2) ep.send_event(e1) @@ -127,7 +129,7 @@ def test_event_kind_is_debug_if_flag_is_temporarily_in_debug_mode(): with DefaultTestProcessor() as ep: future_time = now() + 100000 debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(future_time).build() - e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) + e = EventInputEvaluation(timestamp, context, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) ep.send_event(e) output = flush_and_get_events(ep) @@ -140,7 +142,7 @@ def test_event_can_be_both_tracked_and_debugged(): with DefaultTestProcessor() as ep: future_time = now() + 100000 debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(future_time).build() - e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, True) + e = EventInputEvaluation(timestamp, context, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, True) ep.send_event(e) output = flush_and_get_events(ep) @@ -157,14 +159,14 @@ def test_debug_mode_does_not_expire_if_both_client_time_and_server_time_are_befo # Send and flush an event we don't care about, just to set the last server time mock_http.set_server_time(server_time) - ep.send_event(EventInputIdentify(timestamp, {'key': 'otherUser'})) + ep.send_event(EventInputIdentify(timestamp, Context.create('otherUser'))) flush_and_get_events(ep) # Now send an event with debug mode on, with a "debug until" time that is further in # the future than both the client time and the server time debug_until = server_time + 10000 debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(debug_until).build() - e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) + e = EventInputEvaluation(timestamp, context, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) ep.send_event(e) # Should get a summary event only, not a full feature event @@ -181,14 +183,14 @@ def test_debug_mode_expires_based_on_client_time_if_client_time_is_later_than_se # Send and flush an event we don't care about, just to set the last server time mock_http.set_server_time(server_time) - ep.send_event(EventInputIdentify(timestamp, {'key': 'otherUser'})) + ep.send_event(EventInputIdentify(timestamp, Context.create('otherUser'))) flush_and_get_events(ep) # Now send an event with debug mode on, with a "debug until" time that is further in # the future than the server time, but in the past compared to the client. debug_until = server_time + 1000 debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(debug_until).build() - e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) + e = EventInputEvaluation(timestamp, context, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) ep.send_event(e) # Should get a summary event only, not a full feature event @@ -204,14 +206,14 @@ def test_debug_mode_expires_based_on_server_time_if_server_time_is_later_than_cl # Send and flush an event we don't care about, just to set the last server time mock_http.set_server_time(server_time) - ep.send_event(EventInputIdentify(timestamp, {'key': 'otherUser'})) + ep.send_event(EventInputIdentify(timestamp, Context.create('otherUser'))) flush_and_get_events(ep) # Now send an event with debug mode on, with a "debug until" time that is further in # the future than the client time, but in the past compared to the server. debug_until = server_time - 1000 debugged_flag = FlagBuilder(flag.key).version(flag.version).debug_events_until_date(debug_until).build() - e = EventInputEvaluation(timestamp, user, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) + e = EventInputEvaluation(timestamp, context, debugged_flag.key, debugged_flag, 1, 'value', None, 'default', None, False) ep.send_event(e) # Should get a summary event only, not a full feature event @@ -225,8 +227,8 @@ def test_nontracked_events_are_summarized(): flag1 = FlagBuilder('flagkey1').version(11).build() flag2 = FlagBuilder('flagkey2').version(22).build() earlier_time, later_time = 1111111, 2222222 - e1 = EventInputEvaluation(later_time, user, flag1.key, flag1, 1, 'value1', None, 'default1', None, False) - e2 = EventInputEvaluation(earlier_time, user, flag2.key, flag2, 2, 'value2', None, 'default2', None, False) + e1 = EventInputEvaluation(later_time, context, flag1.key, flag1, 1, 'value1', None, 'default1', None, False) + e2 = EventInputEvaluation(earlier_time, context, flag2.key, flag2, 2, 'value2', None, 'default2', None, False) ep.send_event(e1) ep.send_event(e2) @@ -239,10 +241,12 @@ def test_nontracked_events_are_summarized(): assert se['endDate'] == later_time assert se['features'] == { 'flagkey1': { + 'contextKinds': ['user'], 'default': 'default1', 'counters': [ { 'version': 11, 'variation': 1, 'value': 'value1', 'count': 1 } ] }, 'flagkey2': { + 'contextKinds': ['user'], 'default': 'default2', 'counters': [ { 'version': 22, 'variation': 2, 'value': 'value2', 'count': 1 } ] } @@ -250,7 +254,7 @@ def test_nontracked_events_are_summarized(): def test_custom_event_is_queued_with_user(): with DefaultTestProcessor() as ep: - e = EventInputCustom(timestamp, user, 'eventkey', { 'thing': 'stuff '}, 1.5) + e = EventInputCustom(timestamp, context, 'eventkey', { 'thing': 'stuff '}, 1.5) ep.send_event(e) output = flush_and_get_events(ep) @@ -266,7 +270,7 @@ def test_nothing_is_sent_if_there_are_no_events(): def test_sdk_key_is_sent(): with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() @@ -274,7 +278,7 @@ def test_sdk_key_is_sent(): def test_wrapper_header_not_sent_when_not_set(): with DefaultTestProcessor() as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() @@ -282,7 +286,7 @@ def test_wrapper_header_not_sent_when_not_set(): def test_wrapper_header_sent_when_set(): with DefaultTestProcessor(wrapper_name = "Flask", wrapper_version = "0.0.1") as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() @@ -290,7 +294,7 @@ def test_wrapper_header_sent_when_set(): def test_wrapper_header_sent_without_version(): with DefaultTestProcessor(wrapper_name = "Flask") as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() @@ -298,11 +302,11 @@ def test_wrapper_header_sent_without_version(): def test_event_schema_set_on_event_send(): with DefaultTestProcessor() as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() - assert mock_http.request_headers.get('X-LaunchDarkly-Event-Schema') == "3" + assert mock_http.request_headers.get('X-LaunchDarkly-Event-Schema') == "4" def test_sdk_key_is_sent_on_diagnostic_request(): with DefaultTestProcessor(sdk_key = 'SDK_KEY', diagnostic_opt_out=False) as ep: @@ -326,7 +330,7 @@ def test_periodic_diagnostic_includes_events_in_batch(): # Ignore init event flush_and_get_events(ep) # Send a payload with a single event - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) flush_and_get_events(ep) ep._send_diagnostic() @@ -341,8 +345,8 @@ def test_periodic_diagnostic_includes_deduplicated_users(): # Ignore init event flush_and_get_events(ep) # Send two custom events with the same user to cause a user deduplication - e0 = EventInputCustom(timestamp, user, 'event1', None, None) - e1 = EventInputCustom(timestamp, user, 'event2', None, None) + e0 = EventInputCustom(timestamp, context, 'event1', None, None) + e1 = EventInputCustom(timestamp, context, 'event2', None, None) ep.send_event(e0) ep.send_event(e1) flush_and_get_events(ep) @@ -388,8 +392,8 @@ def start_consuming_events(): with DefaultEventProcessor(config, mock_http, dispatcher_factory) as ep: ep_inbox = ep_inbox_holder[0] - event1 = EventInputCustom(timestamp, user, 'event1') - event2 = EventInputCustom(timestamp, user, 'event2') + event1 = EventInputCustom(timestamp, context, 'event1') + event2 = EventInputCustom(timestamp, context, 'event2') ep.send_event(event1) ep.send_event(event2) # this event should be dropped - inbox is full message1 = ep_inbox.get(block=False) @@ -401,7 +405,7 @@ def start_consuming_events(): def test_http_proxy(monkeypatch): def _event_processor_proxy_test(server, config, secure): with DefaultEventProcessor(config) as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() do_proxy_tests(_event_processor_proxy_test, 'POST', monkeypatch) @@ -409,12 +413,12 @@ def _event_processor_proxy_test(server, config, secure): def verify_unrecoverable_http_error(status): with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: mock_http.set_response_status(status) - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() mock_http.reset() - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() assert mock_http.request_data is None @@ -422,19 +426,19 @@ def verify_unrecoverable_http_error(status): def verify_recoverable_http_error(status): with DefaultTestProcessor(sdk_key = 'SDK_KEY') as ep: mock_http.set_response_status(status) - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() mock_http.reset() - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() assert mock_http.request_data is not None def test_event_payload_id_is_sent(): with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() @@ -445,11 +449,11 @@ def test_event_payload_id_is_sent(): def test_event_payload_id_changes_between_requests(): with DefaultEventProcessor(Config(sdk_key = 'SDK_KEY'), mock_http) as ep: - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() - ep.send_event(EventInputIdentify(timestamp, user)) + ep.send_event(EventInputIdentify(timestamp, context)) ep.flush() ep._wait_until_inactive() @@ -465,10 +469,15 @@ def flush_and_get_events(ep): else: return json.loads(mock_http.request_data) -def check_index_event(data, source: EventInput, user: Optional[dict] = None): +def check_identify_event(data, source: EventInput, context_json: Optional[dict] = None): + assert data['kind'] == 'identify' + assert data['creationDate'] == source.timestamp + assert data['context'] == (source.context.to_dict() if context_json is None else context_json) + +def check_index_event(data, source: EventInput, context_json: Optional[dict] = None): assert data['kind'] == 'index' assert data['creationDate'] == source.timestamp - assert data['user'] == source.user if user is None else user + assert data['context'] == (source.context.to_dict() if context_json is None else context_json) def check_feature_event(data, source: EventInputEvaluation): assert data['kind'] == 'feature' @@ -478,10 +487,10 @@ def check_feature_event(data, source: EventInputEvaluation): assert data.get('variation') == source.variation assert data.get('value') == source.value assert data.get('default') == source.default_value - assert data['userKey'] == source.user['key'] + assert data['contextKeys'] == make_context_keys(source.context) assert data.get('prereq_of') == None if source.prereq_of is None else source.prereq_of.key -def check_debug_event(data, source: EventInputEvaluation, user: Optional[dict] = None): +def check_debug_event(data, source: EventInputEvaluation, context_json: Optional[dict] = None): assert data['kind'] == 'debug' assert data['creationDate'] == source.timestamp assert data['key'] == source.key @@ -489,7 +498,7 @@ def check_debug_event(data, source: EventInputEvaluation, user: Optional[dict] = assert data.get('variation') == source.variation assert data.get('value') == source.value assert data.get('default') == source.default_value - assert data['user'] == source.user + assert data['context'] == (source.context.to_dict() if context_json is None else context_json) assert data.get('prereq_of') == None if source.prereq_of is None else source.prereq_of.key def check_custom_event(data, source: EventInputCustom): @@ -497,7 +506,7 @@ def check_custom_event(data, source: EventInputCustom): assert data['creationDate'] == source.timestamp assert data['key'] == source.key assert data['data'] == source.data - assert data['userKey'] == source.user['key'] + assert data['contextKeys'] == make_context_keys(source.context) assert data.get('metricValue') == source.metric_value def check_summary_event(data): diff --git a/testing/impl/events/test_event_summarizer.py b/testing/impl/events/test_event_summarizer.py index f8e55f1a..5f4a76ac 100644 --- a/testing/impl/events/test_event_summarizer.py +++ b/testing/impl/events/test_event_summarizer.py @@ -1,12 +1,11 @@ -import pytest - -from ldclient.impl.events.event_summarizer import EventSummarizer +from ldclient.context import Context +from ldclient.impl.events.event_summarizer import EventSummarizer, EventSummaryCounter, EventSummaryFlag from ldclient.impl.events.types import * from testing.builders import * -user = { 'key': 'user1' } +user = Context.create('user1') flag1 = FlagBuilder('flag1').version(11).build() flag2 = FlagBuilder('flag2').version(22).build() @@ -39,9 +38,15 @@ def test_summarize_event_increments_counters(): data = es.snapshot() expected = { - ('flag1', 1, 11): { 'count': 2, 'value': 'value1', 'default': 'default1' }, - ('flag1', 2, 11): { 'count': 1, 'value': 'value2', 'default': 'default1' }, - ('flag2', 1, 22): { 'count': 1, 'value': 'value99', 'default': 'default2' }, - ('badkey', None, None): { 'count': 1, 'value': 'default3', 'default': 'default3' } + 'flag1': EventSummaryFlag({'user'}, 'default1', { + (1, flag1.version): EventSummaryCounter(2, 'value1'), + (2, flag1.version): EventSummaryCounter(1, 'value2') + }), + 'flag2': EventSummaryFlag({'user'}, 'default2', { + (1, flag2.version): EventSummaryCounter(1, 'value99') + }), + 'badkey': EventSummaryFlag({'user'}, 'default3', { + (None, None): EventSummaryCounter(1, 'default3') + }) } - assert data.counters == expected + assert data.flags == expected diff --git a/testing/impl/test_evaluator_prerequisites.py b/testing/impl/test_evaluator_prerequisites.py index e6080a68..9f29e538 100644 --- a/testing/impl/test_evaluator_prerequisites.py +++ b/testing/impl/test_evaluator_prerequisites.py @@ -1,11 +1,9 @@ import pytest -from ldclient.impl.evaluator import _context_to_user_dict -from ldclient.impl.events.types import EventInputEvaluation - -from testing.builders import * from ldclient.client import Context from ldclient.evaluation import EvaluationDetail +from ldclient.impl.events.types import EventInputEvaluation + from testing.builders import * from testing.impl.evaluator_util import * @@ -28,7 +26,7 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_off(): user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) events_should_be = [ - EventInputEvaluation(0, _context_to_user_dict(user), flag1.key, flag1, 1, 'e', None, None, flag, False) + EventInputEvaluation(0, user, flag1.key, flag1, 1, 'e', None, None, flag, False) ] assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) @@ -41,7 +39,7 @@ def test_flag_returns_off_variation_and_event_if_prerequisite_is_not_met(): user = Context.create('x') detail = EvaluationDetail('b', 1, {'kind': 'PREREQUISITE_FAILED', 'prerequisiteKey': 'feature1'}) events_should_be = [ - EventInputEvaluation(0, _context_to_user_dict(user), flag1.key, flag1, 0, 'd', None, None, flag, False) + EventInputEvaluation(0, user, flag1.key, flag1, 0, 'd', None, None, flag, False) ] assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) @@ -54,7 +52,7 @@ def test_flag_returns_fallthrough_and_event_if_prereq_is_met_and_there_are_no_ru user = Context.create('x') detail = EvaluationDetail('a', 0, {'kind': 'FALLTHROUGH'}) events_should_be = [ - EventInputEvaluation(0, _context_to_user_dict(user), flag1.key, flag1, 1, 'e', None, None, flag, False) + EventInputEvaluation(0, user, flag1.key, flag1, 1, 'e', None, None, flag, False) ] assert_eval_result(evaluator.evaluate(flag, user, event_factory), detail, events_should_be) diff --git a/testing/test_ldclient_events.py b/testing/test_ldclient_events.py index e1c6a2af..aa5ff1b6 100644 --- a/testing/test_ldclient_events.py +++ b/testing/test_ldclient_events.py @@ -44,7 +44,7 @@ def test_identify(): client.identify(context) e = get_first_event(client) assert isinstance(e, EventInputIdentify) - assert e.user == user + assert e.context == context def test_identify_with_user_dict(): @@ -52,7 +52,7 @@ def test_identify_with_user_dict(): client.identify(user) e = get_first_event(client) assert isinstance(e, EventInputIdentify) - assert e.user == user + assert e.context == context def test_identify_no_user(): @@ -79,7 +79,7 @@ def test_track(): e = get_first_event(client) assert isinstance(e, EventInputCustom) assert e.key == 'my_event' - assert e.user == user + assert e.context == context assert e.data is None assert e.metric_value is None @@ -90,7 +90,7 @@ def test_track_with_user_dict(): e = get_first_event(client) assert isinstance(e, EventInputCustom) assert e.key == 'my_event' - assert e.user == user + assert e.context == context assert e.data is None assert e.metric_value is None @@ -101,7 +101,7 @@ def test_track_with_data(): e = get_first_event(client) assert isinstance(e, EventInputCustom) assert e.key == 'my_event' - assert e.user == user + assert e.context == context assert e.data == 42 assert e.metric_value is None @@ -112,23 +112,17 @@ def test_track_with_metric_value(): e = get_first_event(client) assert isinstance(e, EventInputCustom) assert e.key == 'my_event' - assert e.user == user + assert e.context == context assert e.data == 42 assert e.metric_value == 1.5 -def test_track_no_user(): +def test_track_no_context(): with make_client() as client: client.track('my_event', None) assert count_events(client) == 0 -def test_track_no_user_key(): - with make_client() as client: - client.track('my_event', { 'name': 'nokey' }) - assert count_events(client) == 0 - - def test_track_invalid_context(): with make_client() as client: client.track('my_event', Context.create('')) @@ -140,12 +134,12 @@ def test_event_for_existing_feature(): store = InMemoryFeatureStore() store.init({FEATURES: {feature.key: feature.to_json_dict()}}) with make_client(store) as client: - assert 'value' == client.variation(feature.key, user, default='default') + assert 'value' == client.variation(feature.key, context, default='default') e = get_first_event(client) assert isinstance(e, EventInputEvaluation) assert (e.key == feature.key and e.flag == feature and - e.user == user and + e.context == context and e.value == 'value' and e.variation == 0 and e.reason is None and @@ -158,12 +152,12 @@ def test_event_for_existing_feature_with_reason(): store = InMemoryFeatureStore() store.init({FEATURES: {feature.key: feature.to_json_dict()}}) with make_client(store) as client: - assert 'value' == client.variation_detail(feature.key, user, default='default').value + assert 'value' == client.variation_detail(feature.key, context, default='default').value e = get_first_event(client) assert isinstance(e, EventInputEvaluation) assert (e.key == feature.key and e.flag == feature and - e.user == user and + e.context == context and e.value == 'value' and e.variation == 0 and e.reason == {'kind': 'OFF'} and @@ -182,12 +176,12 @@ def test_event_for_existing_feature_with_tracked_rule(): store = InMemoryFeatureStore() store.init({FEATURES: {feature.key: feature.to_json_dict()}}) client = make_client(store) - assert 'value' == client.variation(feature.key, user, default='default') + assert 'value' == client.variation(feature.key, context, default='default') e = get_first_event(client) assert isinstance(e, EventInputEvaluation) assert (e.key == feature.key and e.flag == feature and - e.user == user and + e.context == context and e.value == 'value' and e.variation == 0 and e.reason == { 'kind': 'RULE_MATCH', 'ruleIndex': 0, 'ruleId': 'rule_id' } and @@ -206,12 +200,12 @@ def test_event_for_existing_feature_with_untracked_rule(): store = InMemoryFeatureStore() store.init({FEATURES: {feature.key: feature.to_json_dict()}}) client = make_client(store) - assert 'value' == client.variation(feature.key, user, default='default') + assert 'value' == client.variation(feature.key, context, default='default') e = get_first_event(client) assert isinstance(e, EventInputEvaluation) assert (e.key == feature.key and e.flag == feature and - e.user == user and + e.context == context and e.value == 'value' and e.variation == 0 and e.reason is None and @@ -226,12 +220,12 @@ def test_event_for_existing_feature_with_tracked_fallthrough(): store = InMemoryFeatureStore() store.init({FEATURES: {feature.key: feature.to_json_dict()}}) client = make_client(store) - assert 'value' == client.variation(feature.key, user, default='default') + assert 'value' == client.variation(feature.key, context, default='default') e = get_first_event(client) assert isinstance(e, EventInputEvaluation) assert (e.key == feature.key and e.flag == feature and - e.user == user and + e.context == context and e.value == 'value' and e.variation == 0 and e.reason == { 'kind': 'FALLTHROUGH' } and @@ -246,13 +240,13 @@ def test_event_for_existing_feature_with_untracked_fallthrough(): store = InMemoryFeatureStore() store.init({FEATURES: {feature.key: feature.to_json_dict()}}) client = make_client(store) - detail = client.variation_detail(feature.key, user, default='default') + detail = client.variation_detail(feature.key, context, default='default') assert 'value' == detail.value e = get_first_event(client) assert isinstance(e, EventInputEvaluation) assert (e.key == feature.key and e.flag == feature and - e.user == user and + e.context == context and e.value == 'value' and e.variation == 0 and e.reason == { 'kind': 'FALLTHROUGH' } and @@ -264,12 +258,12 @@ def test_event_for_unknown_feature(): store = InMemoryFeatureStore() store.init({FEATURES: {}}) with make_client(store) as client: - assert 'default' == client.variation('feature.key', user, default='default') + assert 'default' == client.variation('feature.key', context, default='default') e = get_first_event(client) assert isinstance(e, EventInputEvaluation) assert (e.key == 'feature.key' and e.flag is None and - e.user == user and + e.context == context and e.value == 'default' and e.variation is None and e.reason is None and @@ -277,7 +271,7 @@ def test_event_for_unknown_feature(): e.track_events is False) -def test_no_event_for_existing_feature_with_no_user(): +def test_no_event_for_existing_feature_with_no_context(): feature = build_off_flag_with_value('feature.key', 'value').track_events(True).build() store = InMemoryFeatureStore() store.init({FEATURES: {feature.key: feature.to_json_dict()}}) From 56f4492c5da767382b20bfe9453aa35099e3b2de Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 21 Dec 2022 14:04:08 -0800 Subject: [PATCH 335/356] private attribute redaction --- Makefile | 9 +- ldclient/config.py | 27 ++-- .../impl/events/event_context_formatter.py | 95 ++++++++++++++ ldclient/impl/events/event_processor.py | 12 +- ldclient/impl/model/attribute_ref.py | 4 + ldclient/user_filter.py | 41 ------ .../events/test_event_context_formatter.py | 82 ++++++++++++ testing/impl/events/test_event_processor.py | 9 +- testing/test_user_filter.py | 117 ------------------ 9 files changed, 211 insertions(+), 185 deletions(-) create mode 100644 ldclient/impl/events/event_context_formatter.py delete mode 100644 ldclient/user_filter.py create mode 100644 testing/impl/events/test_event_context_formatter.py delete mode 100644 testing/test_user_filter.py diff --git a/Makefile b/Makefile index a34ca195..e007f6d9 100644 --- a/Makefile +++ b/Makefile @@ -19,14 +19,7 @@ docs: TEMP_TEST_OUTPUT=/tmp/contract-test-service.log # TEST_HARNESS_PARAMS can be set to add -skip parameters for any contract tests that cannot yet pass -# Explanation of current skips: -# - "events/context properties/allAttributesPrivate": private attribute redaction is not yet implemented -# - "events/context properties/specific private attributes": private attribute redaction is not yet implemented -# - "events/context properties/private attribute nested": private attribute redaction is not yet implemented -TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ - -skip 'events/context properties/allAttributesPrivate' \ - -skip 'events/context properties/specific private attributes' \ - -skip 'events/context properties/private attribute nested' +# TEST_HARNESS_PARAMS := $(TEST_HARNESS_PARAMS) \ # port 8000 and 9000 is already used in the CI environment because we're diff --git a/ldclient/config.py b/ldclient/config.py index feb4006f..3e1e655b 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -4,7 +4,7 @@ Note that the same class can also be imported from the ``ldclient.client`` submodule. """ -from typing import Optional, Callable, List, Any, Set +from typing import Optional, Callable, List, Set from ldclient.feature_store import InMemoryFeatureStore from ldclient.util import log @@ -161,7 +161,8 @@ def __init__(self, use_ldd: bool=False, feature_store: Optional[FeatureStore]=None, feature_requester_class=None, - event_processor_class: Callable[['Config'], EventProcessor]=None, + event_processor_class: Callable[['Config'], EventProcessor]=None, + private_attributes: Set[str]=set(), private_attribute_names: Set[str]=set(), all_attributes_private: bool=False, offline: bool=False, @@ -206,10 +207,14 @@ def __init__(self, ignored if this option is set to true. By default, this is false. For more information, read the LaunchDarkly documentation: https://docs.launchdarkly.com/home/relay-proxy/using#using-daemon-mode - :param array private_attribute_names: Marks a set of attribute names private. Any users sent to - LaunchDarkly with this configuration active will have attributes with these names removed. + :param array private_attribute: Marks a set of attributes private. Any users sent to LaunchDarkly + with this configuration active will have these attributes removed. Each item can be either the + name of an attribute ("email"), or a slash-delimited path ("/address/street") to mark a + property within a JSON object value as private. + :param array private_attribute_names: Deprecated alias for `private_attributes` ("names" is no longer + strictly accurate because these could also be attribute reference paths). :param all_attributes_private: If true, all user attributes (other than the key) will be - private, not just the attributes specified in `private_attribute_names`. + private, not just the attributes specified in `private_attribute`. :param feature_store: A FeatureStore implementation :param user_keys_capacity: The number of user keys that the event processor can remember at any one time, so that duplicate user details will not be sent in analytics events. @@ -255,7 +260,7 @@ def __init__(self, if offline is True: send_events = False self.__send_events = events_enabled if send_events is None else send_events - self.__private_attribute_names = private_attribute_names + self.__private_attributes = private_attributes or private_attribute_names self.__all_attributes_private = all_attributes_private self.__offline = offline self.__user_keys_capacity = user_keys_capacity @@ -288,7 +293,7 @@ def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': feature_store=self.__feature_store, feature_requester_class=self.__feature_requester_class, event_processor_class=self.__event_processor_class, - private_attribute_names=self.__private_attribute_names, + private_attributes=self.__private_attributes, all_attributes_private=self.__all_attributes_private, offline=self.__offline, user_keys_capacity=self.__user_keys_capacity, @@ -385,8 +390,12 @@ def flush_interval(self) -> float: return self.__flush_interval @property - def private_attribute_names(self) -> list: - return list(self.__private_attribute_names) + def private_attributes(self) -> List[str]: + return list(self.__private_attributes) + + @property + def private_attribute_names(self) -> List[str]: + return self.private_attributes @property def all_attributes_private(self) -> bool: diff --git a/ldclient/impl/events/event_context_formatter.py b/ldclient/impl/events/event_context_formatter.py new file mode 100644 index 00000000..7af7b50b --- /dev/null +++ b/ldclient/impl/events/event_context_formatter.py @@ -0,0 +1,95 @@ +from typing import Any, List, Optional + +from ldclient.context import Context +from ldclient.impl.model import AttributeRef + + +class EventContextFormatter: + IGNORE_ATTRS = frozenset(['key', 'custom', 'anonymous']) + ALLOWED_TOP_LEVEL_ATTRS = frozenset(['key', 'secondary', 'ip', 'country', 'email', + 'firstName', 'lastName', 'avatar', 'name', 'anonymous', 'custom']) + + def __init__(self, all_attributes_private: bool, private_attributes: List[str]): + self._all_attributes_private = all_attributes_private + self._private_attributes = [] # type: List[AttributeRef] + for p in private_attributes: + ar = AttributeRef.from_path(p) + if ar.valid: + self._private_attributes.append(ar) + + def format_context(self, context: Context) -> dict: + if context.multiple: + out = {'kind': 'multi'} # type: dict[str, Any] + for i in range(context.individual_context_count): + c = context.get_individual_context(i) + if c is not None: + out[c.kind] = self._format_context_single(c, False) + return out + else: + return self._format_context_single(context, True) + + def _format_context_single(self, context: Context, include_kind: bool) -> dict: + out = {'key': context.key} # type: dict[str, Any] + if include_kind: + out['kind'] = context.kind + if context.anonymous: + out['anonymous'] = True + + redacted = [] # type: List[str] + all_private = self._private_attributes + for p in context.private_attributes: + if all_private is self._private_attributes: + all_private = all_private.copy() + ar = AttributeRef.from_path(p) + if ar.valid: + all_private.append(ar) + + if context.name is not None and not self._check_whole_attr_private('name', all_private, redacted): + out['name'] = context.name + + for attr in context.custom_attributes: + if not self._check_whole_attr_private(attr, all_private, redacted): + value = context.get(attr) + out[attr] = self._redact_json_value(None, attr, value, all_private, redacted) + + if len(redacted) != 0: + out['_meta'] = {'redactedAttributes': redacted} + + return out + + def _check_whole_attr_private(self, attr: str, all_private: List[AttributeRef], redacted: List[str]) -> bool: + if self._all_attributes_private: + redacted.append(attr) + return True + for p in all_private: + if p.depth == 1 and p[0] == attr: + redacted.append(attr) + return True + return False + + def _redact_json_value(self, parent_path: Optional[List[str]], name: str, value: Any, all_private: List[AttributeRef], + redacted: List[str]) -> Any: + if not isinstance(value, dict) or len(value) == 0: + return value + ret = {} + current_path = parent_path.copy() if parent_path else [] + current_path.append(name) + for k, v in value.items(): + was_redacted = False + for p in all_private: + if p.depth != len(current_path) + 1: + continue + if p[len(current_path)] != k: + continue + match = True + for i, component in enumerate(current_path): + if p[i] != component: + match = False + break + if match: + redacted.append(p.path) + was_redacted = True + break + if not was_redacted: + ret[k] = self._redact_json_value(current_path, k, v, all_private, redacted) + return ret diff --git a/ldclient/impl/events/event_processor.py b/ldclient/impl/events/event_processor.py index 57cf141f..46945b1b 100644 --- a/ldclient/impl/events/event_processor.py +++ b/ldclient/impl/events/event_processor.py @@ -12,17 +12,18 @@ import uuid import queue import urllib3 +from ldclient.config import Config from ldclient.context import Context from ldclient.diagnostics import create_diagnostic_init -from ldclient.impl.events.event_summarizer import EventSummarizer, EventSummary from ldclient.fixed_thread_pool import FixedThreadPool +from ldclient.impl.events.event_context_formatter import EventContextFormatter +from ldclient.impl.events.event_summarizer import EventSummarizer, EventSummary from ldclient.impl.events.types import EventInput, EventInputCustom, EventInputEvaluation, EventInputIdentify from ldclient.impl.http import _http_factory from ldclient.impl.repeating_task import RepeatingTask from ldclient.impl.util import current_time_millis from ldclient.lru_cache import SimpleLRUCache -from ldclient.user_filter import UserFilter from ldclient.interfaces import EventProcessor from ldclient.util import check_if_error_is_recoverable_and_log, is_http_error_recoverable, log, _headers @@ -48,8 +49,8 @@ def __init__(self, timestamp: int, context: Context): class EventOutputFormatter: - def __init__(self, config): - self._user_filter = UserFilter(config) + def __init__(self, config: Config): + self._context_formatter = EventContextFormatter(config.all_attributes_private, config.private_attributes) def make_output_events(self, events: List[Any], summary: EventSummary): events_out = [ self.make_output_event(e) for e in events ] @@ -123,8 +124,7 @@ def make_summary_event(self, summary: EventSummary): } def _process_context(self, context: Context): - # TODO: implement context redaction - return context.to_dict() + return self._context_formatter.format_context(context) def _context_keys(self, context: Context): out = {} diff --git a/ldclient/impl/model/attribute_ref.py b/ldclient/impl/model/attribute_ref.py index e1b76251..5349cfe7 100644 --- a/ldclient/impl/model/attribute_ref.py +++ b/ldclient/impl/model/attribute_ref.py @@ -42,6 +42,10 @@ def valid(self) -> bool: def error(self) -> Optional[str]: return self._error + @property + def path(self) -> str: + return self._raw + @property def depth(self) -> int: if self._error is not None: diff --git a/ldclient/user_filter.py b/ldclient/user_filter.py deleted file mode 100644 index acca254f..00000000 --- a/ldclient/user_filter.py +++ /dev/null @@ -1,41 +0,0 @@ -""" -Internal helper class for filtering out private attributes. -""" -# currently excluded from documentation - see docs/README.md - -class UserFilter: - IGNORE_ATTRS = frozenset(['key', 'custom', 'anonymous']) - ALLOWED_TOP_LEVEL_ATTRS = frozenset(['key', 'secondary', 'ip', 'country', 'email', - 'firstName', 'lastName', 'avatar', 'name', 'anonymous', 'custom']) - - def __init__(self, config): - self._private_attribute_names = config.private_attribute_names - self._all_attributes_private = config.all_attributes_private - - def _is_private_attr(self, name, user_private_attrs): - if name in UserFilter.IGNORE_ATTRS: - return False - elif self._all_attributes_private: - return True - else: - return (name in self._private_attribute_names) or (name in user_private_attrs) - - def filter_user_props(self, user_props): - all_private_attrs = set() - user_private_attrs = user_props.get('privateAttributeNames', []) - - def filter_private_attrs(attrs, allowed_attrs = frozenset()): - for key, value in attrs.items(): - if (not allowed_attrs) or (key in allowed_attrs): - if self._is_private_attr(key, user_private_attrs): - all_private_attrs.add(key) - else: - yield key, value - - ret = dict(filter_private_attrs(user_props, UserFilter.ALLOWED_TOP_LEVEL_ATTRS)) - if 'custom' in user_props: - ret['custom'] = dict(filter_private_attrs(user_props['custom'])) - - if all_private_attrs: - ret['privateAttrs'] = sorted(list(all_private_attrs)) # note, only sorting to make tests reliable - return ret diff --git a/testing/impl/events/test_event_context_formatter.py b/testing/impl/events/test_event_context_formatter.py new file mode 100644 index 00000000..06662ab3 --- /dev/null +++ b/testing/impl/events/test_event_context_formatter.py @@ -0,0 +1,82 @@ +from ldclient.context import Context +from ldclient.impl.events.event_context_formatter import EventContextFormatter + +def test_simple_context(): + f = EventContextFormatter(False, []) + c = Context.create('a') + assert f.format_context(c) == {'kind': 'user', 'key': 'a'} + +def test_context_with_more_attributes(): + f = EventContextFormatter(False, []) + c = Context.builder('a').name('b').anonymous(True).set('c', True).set('d', 2).build() + assert f.format_context(c) == { + 'kind': 'user', + 'key': 'a', + 'name': 'b', + 'anonymous': True, + 'c': True, + 'd': 2 + } + +def test_multi_context(): + f = EventContextFormatter(False, []) + c = Context.create_multi( + Context.create('a'), + Context.builder('b').kind('c').name('d').build() + ) + assert f.format_context(c) == { + 'kind': 'multi', + 'user': { + 'key': 'a' + }, + 'c': { + 'key': 'b', + 'name': 'd' + } + } + +def test_all_private(): + f = EventContextFormatter(True, []) + c = Context.builder('a').name('b').anonymous(True).set('c', True).set('d', 2).build() + assert f.format_context(c) == { + 'kind': 'user', + 'key': 'a', + 'anonymous': True, + '_meta': {'redactedAttributes': ['name', 'c', 'd']} + } + +def test_some_private_global(): + f = EventContextFormatter(False, ['name', 'd']) + c = Context.builder('a').name('b').anonymous(True).set('c', True).set('d', 2).build() + assert f.format_context(c) == { + 'kind': 'user', + 'key': 'a', + 'anonymous': True, + 'c': True, + '_meta': {'redactedAttributes': ['name', 'd']} + } + +def test_some_private_per_context(): + f = EventContextFormatter(False, ['name']) + c = Context.builder('a').name('b').anonymous(True).set('c', True).set('d', 2).private('d').build() + assert f.format_context(c) == { + 'kind': 'user', + 'key': 'a', + 'anonymous': True, + 'c': True, + '_meta': {'redactedAttributes': ['name', 'd']} + } + +def test_private_property_in_object(): + f = EventContextFormatter(False, ['/b/prop1', '/c/prop2/sub1']) + c = Context.builder('a') \ + .set('b', {'prop1': True, 'prop2': 3}) \ + .set('c', {'prop1': {'sub1': True}, 'prop2': {'sub1': 4, 'sub2': 5}}) \ + .build() + assert f.format_context(c) == { + 'kind': 'user', + 'key': 'a', + 'b': {'prop2': 3}, + 'c': {'prop1': {'sub1': True}, 'prop2': {'sub2': 5}}, + '_meta': {'redactedAttributes': ['/b/prop1', '/c/prop2/sub1']} + } diff --git a/testing/impl/events/test_event_processor.py b/testing/impl/events/test_event_processor.py index 140db8bd..d72f2e01 100644 --- a/testing/impl/events/test_event_processor.py +++ b/testing/impl/events/test_event_processor.py @@ -17,10 +17,11 @@ default_config = Config("fake_sdk_key") context = Context.builder('userkey').name('Red').build() filtered_context = context.to_dict() # TODO: implement attribute redaction -# filtered_context = { -# 'key': 'userkey', -# 'privateAttrs': [ 'name' ] -# } +filtered_context = { + 'kind': 'user', + 'key': 'userkey', + '_meta': {'redactedAttributes': ['name']} +} flag = FlagBuilder('flagkey').version(2).build() timestamp = 10000 diff --git a/testing/test_user_filter.py b/testing/test_user_filter.py deleted file mode 100644 index aa53bbad..00000000 --- a/testing/test_user_filter.py +++ /dev/null @@ -1,117 +0,0 @@ -import json -from ldclient.client import Config -from ldclient.user_filter import UserFilter - - -base_config = Config("fake_sdk_key") -config_with_all_attrs_private = Config("fake_sdk_key", all_attributes_private = True) -config_with_some_attrs_private = Config("fake_sdk_key", private_attribute_names=set([u'firstName', u'bizzle'])) - -# users to serialize - -user = { - u'key': u'abc', - u'firstName': u'Sue', - u'custom': { - u'bizzle': u'def', - u'dizzle': u'ghi' - } -} - -user_specifying_own_private_attr = { - u'key': u'abc', - u'firstName': u'Sue', - u'custom': { - u'bizzle': u'def', - u'dizzle': u'ghi' - }, - u'privateAttributeNames': [ u'dizzle', u'unused' ] -} - -user_with_unknown_top_level_attrs = { - u'key': u'abc', - u'firstName': u'Sue', - u'species': u'human', - u'hatSize': 6, - u'custom': { - u'bizzle': u'def', - u'dizzle': u'ghi' - } -} - -anon_user = { - u'key': u'abc', - u'anonymous': True, - u'custom': { - u'bizzle': u'def', - u'dizzle': u'ghi' - } -} - -# expected results from serializing user - -user_with_all_attrs_hidden = { - u'key': u'abc', - u'custom': { }, - u'privateAttrs': [ u'bizzle', u'dizzle', u'firstName' ] -} - -user_with_some_attrs_hidden = { - u'key': u'abc', - u'custom': { - u'dizzle': u'ghi' - }, - u'privateAttrs': [ u'bizzle', u'firstName' ] -} - -user_with_own_specified_attr_hidden = { - u'key': u'abc', - u'firstName': u'Sue', - u'custom': { - u'bizzle': u'def' - }, - u'privateAttrs': [ u'dizzle' ] -} - -anon_user_with_all_attrs_hidden = { - u'key': u'abc', - u'anonymous': True, - u'custom': { }, - u'privateAttrs': [ u'bizzle', u'dizzle' ] -} - - -def test_all_user_attrs_serialized(): - uf = UserFilter(base_config) - j = uf.filter_user_props(user) - assert j == user - -def test_all_user_attrs_private(): - uf = UserFilter(config_with_all_attrs_private) - j = uf.filter_user_props(user) - assert j == user_with_all_attrs_hidden - -def test_some_user_attrs_private(): - uf = UserFilter(config_with_some_attrs_private) - j = uf.filter_user_props(user) - assert j == user_with_some_attrs_hidden - -def test_per_user_private_attr(): - uf = UserFilter(base_config) - j = uf.filter_user_props(user_specifying_own_private_attr) - assert j == user_with_own_specified_attr_hidden - -def test_per_user_private_attr_plus_global_private_attrs(): - uf = UserFilter(config_with_some_attrs_private) - j = uf.filter_user_props(user_specifying_own_private_attr) - assert j == user_with_all_attrs_hidden - -def test_unknown_top_level_attrs_stripped(): - uf = UserFilter(base_config) - j = uf.filter_user_props(user_with_unknown_top_level_attrs) - assert j == user - -def test_leave_anonymous_attr_as_is(): - uf = UserFilter(config_with_all_attrs_private) - j = uf.filter_user_props(anon_user) - assert j == anon_user_with_all_attrs_hidden From 49cd88a6be416d4134057739202615ae6e0f6bae Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 22 Dec 2022 10:31:02 -0800 Subject: [PATCH 336/356] move a lot of code out of top-level modules --- docs/index.rst | 2 + ldclient/__init__.py | 6 +- ldclient/client.py | 11 +- ldclient/config.py | 7 +- ldclient/feature_store.py | 4 +- ldclient/impl/big_segments.py | 2 +- ldclient/impl/datasource/__init__.py | 0 .../datasource}/feature_requester.py | 6 +- ldclient/{ => impl/datasource}/polling.py | 3 +- ldclient/{ => impl/datasource}/streaming.py | 2 +- ldclient/impl/evaluator.py | 2 +- ldclient/{ => impl/events}/diagnostics.py | 5 - ldclient/impl/events/event_processor.py | 9 +- ldclient/{ => impl}/fixed_thread_pool.py | 7 +- .../integrations/files/file_data_source.py | 2 +- .../test_data/test_data_source.py | 2 - ldclient/impl/listeners.py | 2 +- ldclient/{ => impl}/lru_cache.py | 5 - ldclient/{ => impl}/operators.py | 0 ldclient/impl/repeating_task.py | 2 +- ldclient/{ => impl}/rwlock.py | 5 - ldclient/impl/sse.py | 4 +- ldclient/impl/util.py | 107 +++++++++++++++++ ldclient/integrations/test_data.py | 2 +- ldclient/util.py | 112 ------------------ testing/impl/datasource/__init__.py | 0 .../datasource}/test_feature_requester.py | 5 +- .../datasource}/test_polling_processor.py | 6 +- .../{ => impl/datasource}/test_streaming.py | 4 +- testing/{ => impl/events}/test_diagnostics.py | 2 +- testing/impl/events/test_event_processor.py | 2 +- testing/{ => impl}/test_lru_cache.py | 4 +- testing/{ => impl}/test_operators.py | 2 +- testing/{ => impl}/test_retry_delay.py | 0 testing/test_ldclient.py | 4 +- 35 files changed, 150 insertions(+), 188 deletions(-) create mode 100644 ldclient/impl/datasource/__init__.py rename ldclient/{ => impl/datasource}/feature_requester.py (88%) rename ldclient/{ => impl/datasource}/polling.py (93%) rename ldclient/{ => impl/datasource}/streaming.py (98%) rename ldclient/{ => impl/events}/diagnostics.py (97%) rename ldclient/{ => impl}/fixed_thread_pool.py (92%) rename ldclient/{ => impl}/lru_cache.py (89%) rename ldclient/{ => impl}/operators.py (100%) rename ldclient/{ => impl}/rwlock.py (91%) delete mode 100644 ldclient/util.py create mode 100644 testing/impl/datasource/__init__.py rename testing/{ => impl/datasource}/test_feature_requester.py (97%) rename testing/{ => impl/datasource}/test_polling_processor.py (94%) rename testing/{ => impl/datasource}/test_streaming.py (98%) rename testing/{ => impl/events}/test_diagnostics.py (97%) rename testing/{ => impl}/test_lru_cache.py (94%) rename testing/{ => impl}/test_operators.py (99%) rename testing/{ => impl}/test_retry_delay.py (100%) diff --git a/docs/index.rst b/docs/index.rst index 8c601890..9c6e4ecb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -12,6 +12,8 @@ The latest version of the SDK can be found on `PyPI `_ and `SDK Reference Guide `_. +Any types, functions, or constants that are not specifically described in this API reference should be considered implementation details that are not supported for external use; LaunchDarkly reserves the right to change them at any time and application code should not rely on them. + .. toctree:: :maxdepth: 2 :caption: Contents: diff --git a/ldclient/__init__.py b/ldclient/__init__.py index dd60d11c..30615b0e 100644 --- a/ldclient/__init__.py +++ b/ldclient/__init__.py @@ -2,11 +2,11 @@ The ldclient module contains the most common top-level entry points for the SDK. """ -from ldclient.rwlock import ReadWriteLock +from ldclient.impl.rwlock import ReadWriteLock as _ReadWriteLock +from ldclient.impl.util import log from ldclient.version import VERSION from .client import * from .context import * -from .util import log __version__ = VERSION @@ -20,7 +20,7 @@ __client = None __config = None -__lock = ReadWriteLock() +__lock = _ReadWriteLock() def set_config(config: Config): diff --git a/ldclient/client.py b/ldclient/client.py index fb5a1688..5d1fc093 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -13,22 +13,21 @@ from ldclient.config import Config from ldclient.context import Context -from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator -from ldclient.feature_requester import FeatureRequesterImpl from ldclient.feature_store import _FeatureStoreDataSetSorter from ldclient.evaluation import EvaluationDetail, FeatureFlagsState from ldclient.impl.big_segments import BigSegmentStoreManager +from ldclient.impl.datasource.feature_requester import FeatureRequesterImpl +from ldclient.impl.datasource.polling import PollingUpdateProcessor +from ldclient.impl.datasource.streaming import StreamingUpdateProcessor from ldclient.impl.evaluator import Evaluator, error_reason +from ldclient.impl.events.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.impl.events.event_processor import DefaultEventProcessor from ldclient.impl.events.types import EventFactory from ldclient.impl.stubs import NullEventProcessor, NullUpdateProcessor +from ldclient.impl.util import check_uwsgi, log from ldclient.interfaces import BigSegmentStoreStatusProvider, FeatureRequester, FeatureStore -from ldclient.polling import PollingUpdateProcessor -from ldclient.streaming import StreamingUpdateProcessor -from ldclient.util import check_uwsgi, log from ldclient.versioned_data_kind import FEATURES, SEGMENTS, VersionedDataKind from ldclient.feature_store import FeatureStore -import queue from threading import Lock diff --git a/ldclient/config.py b/ldclient/config.py index 3e1e655b..c0721e3e 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -7,8 +7,8 @@ from typing import Optional, Callable, List, Set from ldclient.feature_store import InMemoryFeatureStore -from ldclient.util import log -from ldclient.interfaces import BigSegmentStore, EventProcessor, FeatureStore, UpdateProcessor, FeatureRequester +from ldclient.impl.util import log +from ldclient.interfaces import BigSegmentStore, EventProcessor, FeatureStore, UpdateProcessor GET_LATEST_FEATURES_PATH = '/sdk/latest-flags' STREAM_FLAGS_PATH = '/flags' @@ -440,3 +440,6 @@ def big_segments(self) -> BigSegmentsConfig: def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key == '': log.warning("Missing or blank sdk_key.") + + +__all__ = ['Config', 'BigSegmentsConfig', 'HTTPConfig'] diff --git a/ldclient/feature_store.py b/ldclient/feature_store.py index 062ed5fa..401010db 100644 --- a/ldclient/feature_store.py +++ b/ldclient/feature_store.py @@ -9,9 +9,9 @@ from typing import Callable, Any from collections import OrderedDict, defaultdict -from ldclient.util import log +from ldclient.impl.rwlock import ReadWriteLock +from ldclient.impl.util import log from ldclient.interfaces import DiagnosticDescription, FeatureStore -from ldclient.rwlock import ReadWriteLock from ldclient.versioned_data_kind import VersionedDataKind diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index 0ec1da43..149d5c2c 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -2,8 +2,8 @@ from ldclient.evaluation import BigSegmentsStatus from ldclient.impl.listeners import Listeners from ldclient.impl.repeating_task import RepeatingTask +from ldclient.impl.util import log from ldclient.interfaces import BigSegmentStoreStatus, BigSegmentStoreStatusProvider -from ldclient.util import log import base64 from expiringdict import ExpiringDict diff --git a/ldclient/impl/datasource/__init__.py b/ldclient/impl/datasource/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldclient/feature_requester.py b/ldclient/impl/datasource/feature_requester.py similarity index 88% rename from ldclient/feature_requester.py rename to ldclient/impl/datasource/feature_requester.py index b526a332..dbb4f16c 100644 --- a/ldclient/feature_requester.py +++ b/ldclient/impl/datasource/feature_requester.py @@ -1,18 +1,14 @@ """ Default implementation of feature flag polling requests. """ -# currently excluded from documentation - see docs/README.md from collections import namedtuple import json import urllib3 from ldclient.impl.http import _http_factory +from ldclient.impl.util import _headers, log, throw_if_unsuccessful_response from ldclient.interfaces import FeatureRequester -from ldclient.util import UnsuccessfulResponseException -from ldclient.util import _headers -from ldclient.util import log -from ldclient.util import throw_if_unsuccessful_response from ldclient.versioned_data_kind import FEATURES, SEGMENTS diff --git a/ldclient/polling.py b/ldclient/impl/datasource/polling.py similarity index 93% rename from ldclient/polling.py rename to ldclient/impl/datasource/polling.py index 5b2a3c44..95d4d3ff 100644 --- a/ldclient/polling.py +++ b/ldclient/impl/datasource/polling.py @@ -7,9 +7,8 @@ from ldclient.config import Config from ldclient.impl.repeating_task import RepeatingTask +from ldclient.impl.util import UnsuccessfulResponseException, http_error_message, is_http_error_recoverable, log from ldclient.interfaces import FeatureRequester, FeatureStore, UpdateProcessor -from ldclient.util import log -from ldclient.util import UnsuccessfulResponseException, http_error_message, is_http_error_recoverable class PollingUpdateProcessor(UpdateProcessor): diff --git a/ldclient/streaming.py b/ldclient/impl/datasource/streaming.py similarity index 98% rename from ldclient/streaming.py rename to ldclient/impl/datasource/streaming.py index 2255b419..3dbef2ff 100644 --- a/ldclient/streaming.py +++ b/ldclient/impl/datasource/streaming.py @@ -14,8 +14,8 @@ from ldclient.impl.http import HTTPFactory, _http_factory from ldclient.impl.retry_delay import RetryDelayStrategy, DefaultBackoffStrategy, DefaultJitterStrategy from ldclient.impl.sse import SSEClient +from ldclient.impl.util import log, UnsuccessfulResponseException, http_error_message, is_http_error_recoverable from ldclient.interfaces import UpdateProcessor -from ldclient.util import log, UnsuccessfulResponseException, http_error_message, is_http_error_recoverable from ldclient.versioned_data_kind import FEATURES, SEGMENTS # allows for up to 5 minutes to elapse without any data sent across the stream. The heartbeats sent as comments on the diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 3ad8dae6..8fcc961a 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -1,6 +1,6 @@ -from ldclient import operators from ldclient.context import Context from ldclient.evaluation import BigSegmentsStatus, EvaluationDetail +from ldclient.impl import operators from ldclient.impl.events.types import EventFactory, EventInputEvaluation from ldclient.impl.model import * diff --git a/ldclient/diagnostics.py b/ldclient/impl/events/diagnostics.py similarity index 97% rename from ldclient/diagnostics.py rename to ldclient/impl/events/diagnostics.py index d1c66ead..dace86e1 100644 --- a/ldclient/diagnostics.py +++ b/ldclient/impl/events/diagnostics.py @@ -1,8 +1,3 @@ -""" -Implementation details of the diagnostic event generation. -""" -# currently excluded from documentation - see docs/README.md - import threading import time import uuid diff --git a/ldclient/impl/events/event_processor.py b/ldclient/impl/events/event_processor.py index 46945b1b..8cf78bfc 100644 --- a/ldclient/impl/events/event_processor.py +++ b/ldclient/impl/events/event_processor.py @@ -15,17 +15,16 @@ from ldclient.config import Config from ldclient.context import Context -from ldclient.diagnostics import create_diagnostic_init -from ldclient.fixed_thread_pool import FixedThreadPool +from ldclient.impl.events.diagnostics import create_diagnostic_init from ldclient.impl.events.event_context_formatter import EventContextFormatter from ldclient.impl.events.event_summarizer import EventSummarizer, EventSummary from ldclient.impl.events.types import EventInput, EventInputCustom, EventInputEvaluation, EventInputIdentify +from ldclient.impl.fixed_thread_pool import FixedThreadPool from ldclient.impl.http import _http_factory +from ldclient.impl.lru_cache import SimpleLRUCache from ldclient.impl.repeating_task import RepeatingTask -from ldclient.impl.util import current_time_millis -from ldclient.lru_cache import SimpleLRUCache +from ldclient.impl.util import check_if_error_is_recoverable_and_log, current_time_millis, is_http_error_recoverable, log, _headers from ldclient.interfaces import EventProcessor -from ldclient.util import check_if_error_is_recoverable_and_log, is_http_error_recoverable, log, _headers __MAX_FLUSH_THREADS__ = 5 __CURRENT_EVENT_SCHEMA__ = 4 diff --git a/ldclient/fixed_thread_pool.py b/ldclient/impl/fixed_thread_pool.py similarity index 92% rename from ldclient/fixed_thread_pool.py rename to ldclient/impl/fixed_thread_pool.py index 3428daa8..c545ed7f 100644 --- a/ldclient/fixed_thread_pool.py +++ b/ldclient/impl/fixed_thread_pool.py @@ -1,12 +1,7 @@ -""" -Internal helper class for thread management. -""" -# currently excluded from documentation - see docs/README.md - from threading import Event, Lock, Thread import queue -from ldclient.util import log +from ldclient.impl.util import log """ A simple fixed-size thread pool that rejects jobs when its limit is reached. diff --git a/ldclient/impl/integrations/files/file_data_source.py b/ldclient/impl/integrations/files/file_data_source.py index 34e69f26..1b292fee 100644 --- a/ldclient/impl/integrations/files/file_data_source.py +++ b/ldclient/impl/integrations/files/file_data_source.py @@ -19,8 +19,8 @@ pass from ldclient.impl.repeating_task import RepeatingTask +from ldclient.impl.util import log from ldclient.interfaces import UpdateProcessor -from ldclient.util import log from ldclient.versioned_data_kind import FEATURES, SEGMENTS def _sanitize_json_item(item): diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py index e6272925..0f0221bf 100644 --- a/ldclient/impl/integrations/test_data/test_data_source.py +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -1,6 +1,4 @@ -import copy from ldclient.versioned_data_kind import FEATURES -from ldclient.rwlock import ReadWriteLock class _TestDataSource(): diff --git a/ldclient/impl/listeners.py b/ldclient/impl/listeners.py index 6a1e5c86..dda5bf52 100644 --- a/ldclient/impl/listeners.py +++ b/ldclient/impl/listeners.py @@ -1,4 +1,4 @@ -from ldclient.util import log +from ldclient.impl.util import log from threading import RLock from typing import Any, Callable diff --git a/ldclient/lru_cache.py b/ldclient/impl/lru_cache.py similarity index 89% rename from ldclient/lru_cache.py rename to ldclient/impl/lru_cache.py index d65c59c4..387d6b91 100644 --- a/ldclient/lru_cache.py +++ b/ldclient/impl/lru_cache.py @@ -1,8 +1,3 @@ -""" -Internal helper class for caching. -""" -# currently excluded from documentation - see docs/README.md - from collections import OrderedDict class SimpleLRUCache: diff --git a/ldclient/operators.py b/ldclient/impl/operators.py similarity index 100% rename from ldclient/operators.py rename to ldclient/impl/operators.py diff --git a/ldclient/impl/repeating_task.py b/ldclient/impl/repeating_task.py index 15794e3a..57d9a088 100644 --- a/ldclient/impl/repeating_task.py +++ b/ldclient/impl/repeating_task.py @@ -1,4 +1,4 @@ -from ldclient.util import log +from ldclient.impl.util import log from threading import Event, Thread import time diff --git a/ldclient/rwlock.py b/ldclient/impl/rwlock.py similarity index 91% rename from ldclient/rwlock.py rename to ldclient/impl/rwlock.py index 4365cd9a..8179d20a 100644 --- a/ldclient/rwlock.py +++ b/ldclient/impl/rwlock.py @@ -1,8 +1,3 @@ -""" -Internal helper class for locking. -""" -# currently excluded from documentation - see docs/README.md - import threading diff --git a/ldclient/impl/sse.py b/ldclient/impl/sse.py index 5a867096..1e37b659 100644 --- a/ldclient/impl/sse.py +++ b/ldclient/impl/sse.py @@ -1,8 +1,6 @@ -import urllib3 - from ldclient.config import HTTPConfig from ldclient.impl.http import HTTPFactory -from ldclient.util import throw_if_unsuccessful_response +from ldclient.impl.util import throw_if_unsuccessful_response class _BufferedLineReader: diff --git a/ldclient/impl/util.py b/ldclient/impl/util.py index f6c89db6..47820100 100644 --- a/ldclient/impl/util.py +++ b/ldclient/impl/util.py @@ -1,5 +1,112 @@ +import logging +import sys import time +from ldclient.impl.http import _base_headers + def current_time_millis() -> int: return int(time.time() * 1000) + + +log = logging.getLogger('ldclient.util') # historical logger name + +import queue + + +__LONG_SCALE__ = float(0xFFFFFFFFFFFFFFF) + +__BUILTINS__ = ["key", "ip", "country", "email", + "firstName", "lastName", "avatar", "name", "anonymous"] + +__BASE_TYPES__ = (str, float, int, bool) + + +_retryable_statuses = [400, 408, 429] + +def _headers(config): + base_headers = _base_headers(config) + base_headers.update({'Content-Type': "application/json"}) + return base_headers + +def check_uwsgi(): + if 'uwsgi' in sys.modules: + # noinspection PyPackageRequirements,PyUnresolvedReferences + import uwsgi + if not hasattr(uwsgi, 'opt'): + # means that we are not running under uwsgi + return + + if uwsgi.opt.get('enable-threads'): + return + if uwsgi.opt.get('threads') is not None and int(uwsgi.opt.get('threads')) > 1: + return + log.error("The LaunchDarkly client requires the 'enable-threads' or 'threads' option be passed to uWSGI. " + 'To learn more, read https://docs.launchdarkly.com/sdk/server-side/python#configuring-uwsgi') + + +class Event: + def __init__(self, data='', event='message', event_id=None, retry=None): + self.data = data + self.event = event + self.id = event_id + self.retry = retry + + def __str__(self, *args, **kwargs): + return self.data + + +class UnsuccessfulResponseException(Exception): + def __init__(self, status): + super(UnsuccessfulResponseException, self).__init__("HTTP error %d" % status) + self._status = status + + @property + def status(self): + return self._status + + +def throw_if_unsuccessful_response(resp): + if resp.status >= 400: + raise UnsuccessfulResponseException(resp.status) + + +def is_http_error_recoverable(status): + if status >= 400 and status < 500: + return status in _retryable_statuses # all other 4xx besides these are unrecoverable + return True # all other errors are recoverable + + +def http_error_description(status): + return "HTTP error %d%s" % (status, " (invalid SDK key)" if (status == 401 or status == 403) else "") + + +def http_error_message(status, context, retryable_message = "will retry"): + return "Received %s for %s - %s" % ( + http_error_description(status), + context, + retryable_message if is_http_error_recoverable(status) else "giving up permanently" + ) + + +def check_if_error_is_recoverable_and_log(error_context, status_code, error_desc, recoverable_message): + if status_code and (error_desc is None): + error_desc = http_error_description(status_code) + if status_code and not is_http_error_recoverable(status_code): + log.error("Error %s (giving up permanently): %s" % (error_context, error_desc)) + return False + log.warning("Error %s (%s): %s" % (error_context, recoverable_message, error_desc)) + return True + + +def stringify_attrs(attrdict, attrs): + if attrdict is None: + return None + newdict = None + for attr in attrs: + val = attrdict.get(attr) + if val is not None and not isinstance(val, str): + if newdict is None: + newdict = attrdict.copy() + newdict[attr] = str(val) + return attrdict if newdict is None else newdict diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 0030cde6..bb22d00e 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -2,8 +2,8 @@ from typing import Any, Dict, List, Optional, Union from ldclient.versioned_data_kind import FEATURES -from ldclient.rwlock import ReadWriteLock from ldclient.impl.integrations.test_data.test_data_source import _TestDataSource +from ldclient.impl.rwlock import ReadWriteLock TRUE_VARIATION_INDEX = 0 FALSE_VARIATION_INDEX = 1 diff --git a/ldclient/util.py b/ldclient/util.py deleted file mode 100644 index 00da8838..00000000 --- a/ldclient/util.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -General internal helper functions. -""" -# currently excluded from documentation - see docs/README.md - -import logging -import sys -import time - -from ldclient.impl.http import _base_headers - -log = logging.getLogger(sys.modules[__name__].__name__) - -import queue - - -__LONG_SCALE__ = float(0xFFFFFFFFFFFFFFF) - -__BUILTINS__ = ["key", "ip", "country", "email", - "firstName", "lastName", "avatar", "name", "anonymous"] - -__BASE_TYPES__ = (str, float, int, bool) - - -_retryable_statuses = [400, 408, 429] - -def _headers(config): - base_headers = _base_headers(config) - base_headers.update({'Content-Type': "application/json"}) - return base_headers - -def check_uwsgi(): - if 'uwsgi' in sys.modules: - # noinspection PyPackageRequirements,PyUnresolvedReferences - import uwsgi - if not hasattr(uwsgi, 'opt'): - # means that we are not running under uwsgi - return - - if uwsgi.opt.get('enable-threads'): - return - if uwsgi.opt.get('threads') is not None and int(uwsgi.opt.get('threads')) > 1: - return - log.error("The LaunchDarkly client requires the 'enable-threads' or 'threads' option be passed to uWSGI. " - 'To learn more, read https://docs.launchdarkly.com/sdk/server-side/python#configuring-uwsgi') - - -class Event: - def __init__(self, data='', event='message', event_id=None, retry=None): - self.data = data - self.event = event - self.id = event_id - self.retry = retry - - def __str__(self, *args, **kwargs): - return self.data - - -class UnsuccessfulResponseException(Exception): - def __init__(self, status): - super(UnsuccessfulResponseException, self).__init__("HTTP error %d" % status) - self._status = status - - @property - def status(self): - return self._status - - -def throw_if_unsuccessful_response(resp): - if resp.status >= 400: - raise UnsuccessfulResponseException(resp.status) - - -def is_http_error_recoverable(status): - if status >= 400 and status < 500: - return status in _retryable_statuses # all other 4xx besides these are unrecoverable - return True # all other errors are recoverable - - -def http_error_description(status): - return "HTTP error %d%s" % (status, " (invalid SDK key)" if (status == 401 or status == 403) else "") - - -def http_error_message(status, context, retryable_message = "will retry"): - return "Received %s for %s - %s" % ( - http_error_description(status), - context, - retryable_message if is_http_error_recoverable(status) else "giving up permanently" - ) - - -def check_if_error_is_recoverable_and_log(error_context, status_code, error_desc, recoverable_message): - if status_code and (error_desc is None): - error_desc = http_error_description(status_code) - if status_code and not is_http_error_recoverable(status_code): - log.error("Error %s (giving up permanently): %s" % (error_context, error_desc)) - return False - log.warning("Error %s (%s): %s" % (error_context, recoverable_message, error_desc)) - return True - - -def stringify_attrs(attrdict, attrs): - if attrdict is None: - return None - newdict = None - for attr in attrs: - val = attrdict.get(attr) - if val is not None and not isinstance(val, str): - if newdict is None: - newdict = attrdict.copy() - newdict[attr] = str(val) - return attrdict if newdict is None else newdict diff --git a/testing/impl/datasource/__init__.py b/testing/impl/datasource/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testing/test_feature_requester.py b/testing/impl/datasource/test_feature_requester.py similarity index 97% rename from testing/test_feature_requester.py rename to testing/impl/datasource/test_feature_requester.py index db18f555..b6eacc1e 100644 --- a/testing/test_feature_requester.py +++ b/testing/impl/datasource/test_feature_requester.py @@ -1,8 +1,5 @@ -import pytest - from ldclient.config import Config -from ldclient.feature_requester import FeatureRequesterImpl -from ldclient.util import UnsuccessfulResponseException +from ldclient.impl.datasource.feature_requester import FeatureRequesterImpl from ldclient.version import VERSION from ldclient.versioned_data_kind import FEATURES, SEGMENTS from testing.http_util import start_server, BasicResponse, JsonResponse diff --git a/testing/test_polling_processor.py b/testing/impl/datasource/test_polling_processor.py similarity index 94% rename from testing/test_polling_processor.py rename to testing/impl/datasource/test_polling_processor.py index c34f9721..068d1684 100644 --- a/testing/test_polling_processor.py +++ b/testing/impl/datasource/test_polling_processor.py @@ -1,13 +1,11 @@ -import pytest import threading import time import mock from ldclient.config import Config from ldclient.feature_store import InMemoryFeatureStore -from ldclient.interfaces import FeatureRequester -from ldclient.polling import PollingUpdateProcessor -from ldclient.util import UnsuccessfulResponseException +from ldclient.impl.datasource.polling import PollingUpdateProcessor +from ldclient.impl.util import UnsuccessfulResponseException from ldclient.versioned_data_kind import FEATURES, SEGMENTS from testing.builders import * diff --git a/testing/test_streaming.py b/testing/impl/datasource/test_streaming.py similarity index 98% rename from testing/test_streaming.py rename to testing/impl/datasource/test_streaming.py index adc1cf44..5bf3ba38 100644 --- a/testing/test_streaming.py +++ b/testing/impl/datasource/test_streaming.py @@ -4,9 +4,9 @@ import time from ldclient.config import Config -from ldclient.diagnostics import _DiagnosticAccumulator from ldclient.feature_store import InMemoryFeatureStore -from ldclient.streaming import StreamingUpdateProcessor +from ldclient.impl.datasource.streaming import StreamingUpdateProcessor +from ldclient.impl.events.diagnostics import _DiagnosticAccumulator from ldclient.version import VERSION from ldclient.versioned_data_kind import FEATURES, SEGMENTS diff --git a/testing/test_diagnostics.py b/testing/impl/events/test_diagnostics.py similarity index 97% rename from testing/test_diagnostics.py rename to testing/impl/events/test_diagnostics.py index 4423584e..079430f8 100644 --- a/testing/test_diagnostics.py +++ b/testing/impl/events/test_diagnostics.py @@ -2,9 +2,9 @@ import uuid from ldclient.config import Config, HTTPConfig -from ldclient.diagnostics import create_diagnostic_id, create_diagnostic_init, _DiagnosticAccumulator, _create_diagnostic_config_object from ldclient.feature_store import CacheConfig from ldclient.feature_store_helpers import CachingStoreWrapper +from ldclient.impl.events.diagnostics import create_diagnostic_id, create_diagnostic_init, _DiagnosticAccumulator, _create_diagnostic_config_object def test_create_diagnostic_id(): test_config = Config(sdk_key = "SDK_KEY", http=HTTPConfig()) diff --git a/testing/impl/events/test_event_processor.py b/testing/impl/events/test_event_processor.py index d72f2e01..5f9636c1 100644 --- a/testing/impl/events/test_event_processor.py +++ b/testing/impl/events/test_event_processor.py @@ -5,7 +5,7 @@ from ldclient.config import Config from ldclient.context import Context -from ldclient.diagnostics import create_diagnostic_id, _DiagnosticAccumulator +from ldclient.impl.events.diagnostics import create_diagnostic_id, _DiagnosticAccumulator from ldclient.impl.events.event_processor import DefaultEventProcessor from ldclient.impl.events.types import EventInput, EventInputCustom, EventInputEvaluation, EventInputIdentify diff --git a/testing/test_lru_cache.py b/testing/impl/test_lru_cache.py similarity index 94% rename from testing/test_lru_cache.py rename to testing/impl/test_lru_cache.py index a8de97fd..2faba036 100644 --- a/testing/test_lru_cache.py +++ b/testing/impl/test_lru_cache.py @@ -1,6 +1,4 @@ -import pytest - -from ldclient.lru_cache import SimpleLRUCache +from ldclient.impl.lru_cache import SimpleLRUCache def test_retains_values_up_to_capacity(): lru = SimpleLRUCache(3) diff --git a/testing/test_operators.py b/testing/impl/test_operators.py similarity index 99% rename from testing/test_operators.py rename to testing/impl/test_operators.py index bfd72162..7b9d7294 100644 --- a/testing/test_operators.py +++ b/testing/impl/test_operators.py @@ -1,6 +1,6 @@ import pytest -from ldclient import operators +from ldclient.impl import operators @pytest.mark.parametrize("op,value1,value2,expected", [ diff --git a/testing/test_retry_delay.py b/testing/impl/test_retry_delay.py similarity index 100% rename from testing/test_retry_delay.py rename to testing/impl/test_retry_delay.py diff --git a/testing/test_ldclient.py b/testing/test_ldclient.py index 6fd9bbc3..506773b1 100644 --- a/testing/test_ldclient.py +++ b/testing/test_ldclient.py @@ -1,9 +1,9 @@ from ldclient.client import LDClient, Config, Context from ldclient.feature_store import InMemoryFeatureStore +from ldclient.impl.datasource.polling import PollingUpdateProcessor +from ldclient.impl.datasource.streaming import StreamingUpdateProcessor from ldclient.impl.stubs import NullUpdateProcessor from ldclient.interfaces import UpdateProcessor -from ldclient.polling import PollingUpdateProcessor -from ldclient.streaming import StreamingUpdateProcessor from ldclient.versioned_data_kind import FEATURES, SEGMENTS import pytest From 66afd975bab1e639892e9445d475ae163044fb55 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 22 Dec 2022 16:51:19 -0800 Subject: [PATCH 337/356] TestData changes for contexts --- .../test_data/test_data_source.py | 6 +- ldclient/integrations/test_data.py | 221 ++++++-- testing/integrations/test_test_data_source.py | 495 ++++++++++-------- 3 files changed, 458 insertions(+), 264 deletions(-) diff --git a/ldclient/impl/integrations/test_data/test_data_source.py b/ldclient/impl/integrations/test_data/test_data_source.py index e6272925..36483a69 100644 --- a/ldclient/impl/integrations/test_data/test_data_source.py +++ b/ldclient/impl/integrations/test_data/test_data_source.py @@ -1,7 +1,9 @@ -import copy from ldclient.versioned_data_kind import FEATURES -from ldclient.rwlock import ReadWriteLock +# This is the internal component that's created when you initialize an SDK instance that is using +# TestData. The TestData object manages the setup of the fake data, and it broadcasts the data +# through _TestDataSource to inject it into the SDK. If there are multiple SDK instances connected +# to a TestData, each has its own _TestDataSource. class _TestDataSource(): diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 0030cde6..07d1710c 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -1,6 +1,7 @@ import copy -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Set, Union +from ldclient.context import Context from ldclient.versioned_data_kind import FEATURES from ldclient.rwlock import ReadWriteLock from ldclient.impl.integrations.test_data.test_data_source import _TestDataSource @@ -153,7 +154,7 @@ def __init__(self, key: str): self._variations = [] # type: List[Any] self._off_variation = None # type: Optional[int] self._fallthrough_variation = None # type: Optional[int] - self._targets = {} # type: Dict[int, List[str]] + self._targets = {} # type: Dict[str, Dict[int, Set[str]]] self._rules = [] # type: List[FlagRuleBuilder] # Note that _copy is private by convention, because we don't want developers to @@ -170,7 +171,9 @@ def _copy(self) -> 'FlagBuilder': to._variations = copy.copy(self._variations) to._off_variation = self._off_variation to._fallthrough_variation = self._fallthrough_variation - to._targets = copy.copy(self._targets) + to._targets = dict() + for k, v in self._targets.items(): + to._targets[k] = copy.copy(v) to._rules = copy.copy(self._rules) return to @@ -276,9 +279,17 @@ def variations(self, *variations) -> 'FlagBuilder': return self def variation_for_all_users(self, variation: Union[bool, int]) -> 'FlagBuilder': - """Sets the flag to always return the specified variation for all users. + """Deprecated name for variation_for_all(). - The variation is specified, Targeting is switched on, and any existing targets or rules are removed. + .. deprecated:: 8.0.0 + Use :meth:`ldclient.integrations.test_data.FlagBuilder.variation_for_all()`. + """ + return self.variation_for_all(variation) + + def variation_for_all(self, variation: Union[bool, int]) -> 'FlagBuilder': + """Sets the flag to always return the specified variation for all contexts. + + The variation is specified, targeting is switched on, and any existing targets or rules are removed. The fallthrough variation is set to the specified value. The off variation is left unchanged. If the flag was previously configured with other variations and the variation specified is a boolean, @@ -294,6 +305,14 @@ def variation_for_all_users(self, variation: Union[bool, int]) -> 'FlagBuilder': return self.clear_rules().clear_targets().on(True).fallthrough_variation(variation) def value_for_all_users(self, value: Any) -> 'FlagBuilder': + """Deprecated name for value_for_all(). + + .. deprecated:: 8.0.0 + Use :meth:`ldclient.integrations.test_data.FlagBuilder.value_for_all()`. + """ + return self.value_for_all(value) + + def value_for_all(self, value: Any) -> 'FlagBuilder': """ Sets the flag to always return the specified variation value for all users. @@ -321,37 +340,50 @@ def variation_for_user(self, user_key: str, variation: Union[bool, int]) -> 'Fla ``0`` for the first, ``1`` for the second, etc. :return: the flag builder """ - if isinstance(variation, bool): - # `variation` is True/False value - return self.boolean_flag().variation_for_user(user_key, _variation_for_boolean(variation)) - else: - # `variation` specifies the index of the variation to set - targets = self._targets + return self.variation_for_key(Context.DEFAULT_KIND, user_key, variation) - for idx, var in enumerate(self._variations): - if (idx == variation): - # If there is no set at the current variation, set it to be empty - target_for_variation = [] # type: List[str] - if idx in targets: - target_for_variation = targets[idx] + def variation_for_key(self, context_kind: str, context_key: str, variation: Union[bool, int]) -> 'FlagBuilder': + """Sets the flag to return the specified variation for a specific context, identified + by context kind and key, when targeting is on. - # If user is not in the current variation set, add them - if user_key not in target_for_variation: - target_for_variation.append(user_key) + This has no effect when targeting is turned off for the flag. - self._targets[idx] = target_for_variation + If the flag was previously configured with other variations and the variation specified is a boolean, + this also changes it to a boolean flag. - else: - # Remove user from the other variation set if necessary - if idx in targets: - target_for_variation = targets[idx] - if user_key in target_for_variation: - user_key_idx = target_for_variation.index(user_key) - del target_for_variation[user_key_idx] + :param context_kind: the context kind + :param context_key: the context key + :param bool|int variation: ``True`` or ``False`` or the desired variation index to return: + ``0`` for the first, ``1`` for the second, etc. + :return: the flag builder + """ + if isinstance(variation, bool): + # `variation` is True/False value + return self.boolean_flag().variation_for_key(context_kind, context_key, _variation_for_boolean(variation)) + + # `variation` specifies the index of the variation to set + targets = self._targets.get(context_kind) + if targets is None: + targets = {} + self._targets[context_kind] = targets + + for idx, var in enumerate(self._variations): + if (idx == variation): + # If there is no set at the current variation, set it to be empty + target_for_variation = targets.get(idx) + if target_for_variation is None: + target_for_variation = set() + targets[idx] = target_for_variation + + # If key is not in the current variation set, add it + target_for_variation.add(context_key) - self._targets[idx] = target_for_variation + else: + # Remove key from the other variation set if necessary + if idx in targets: + targets[idx].discard(context_key) - return self + return self def _add_rule(self, flag_rule_builder: 'FlagRuleBuilder'): self._rules.append(flag_rule_builder) @@ -359,6 +391,9 @@ def _add_rule(self, flag_rule_builder: 'FlagRuleBuilder'): def if_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """Starts defining a flag rule, using the "is one of" operator. + This is a shortcut for calling :meth:`ldclient.integrations.test_data.FlagBuilder.if_match_context()` + with "user" as the context kind. + **Example:** create a rule that returns ``True`` if the name is "Patsy" or "Edina" :: @@ -370,12 +405,34 @@ def if_match(self, attribute: str, *values) -> 'FlagRuleBuilder': :param values: values to compare to :return: the flag rule builder """ + return self.if_match_context(Context.DEFAULT_KIND, attribute, *values) + + def if_match_context(self, context_kind: str, attribute: str, *values) -> 'FlagRuleBuilder': + """Starts defining a flag rule, using the "is one of" operator. This matching expression only + applies to contexts of a specific kind. + + **Example:** create a rule that returns ``True`` if the name attribute for the + company" context is "Ella" or "Monsoon": + :: + + td.flag("flag") \\ + .if_match_context('company', 'name', 'Ella', 'Monsoon') \\ + .then_return(True) + + :param context_kind: the context kind + :param attribute: the context attribute to match against + :param values: values to compare to + :return: the flag rule builder + """ flag_rule_builder = FlagRuleBuilder(self) - return flag_rule_builder.and_match(attribute, *values) + return flag_rule_builder.and_match_context(context_kind, attribute, *values) def if_not_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """Starts defining a flag rule, using the "is not one of" operator. + This is a shortcut for calling :meth:`ldclient.integrations.test_data.FlagBuilder.if_not_match_context()` + with "user" as the context kind. + **Example:** create a rule that returns ``True`` if the name is neither "Saffron" nor "Bubble" :: @@ -387,13 +444,32 @@ def if_not_match(self, attribute: str, *values) -> 'FlagRuleBuilder': :param values: values to compare to :return: the flag rule builder """ + return self.if_not_match_context(Context.DEFAULT_KIND, attribute, *values) + + def if_not_match_context(self, context_kind: str, attribute: str, *values) -> 'FlagRuleBuilder': + """Starts defining a flag rule, using the "is not one of" operator. This matching expression only + applies to contexts of a specific kind. + + **Example:** create a rule that returns ``True`` if the name attribute for the + "company" context is neither "Pendant" nor "Sterling Cooper": + :: + + td.flag("flag") \\ + .if_not_match('company', 'name', 'Pendant', 'Sterling Cooper') \\ + .then_return(True) + + :param context_kind: the context kind + :param attribute: the context attribute to match against + :param values: values to compare to + :return: the flag rule builder + """ flag_rule_builder = FlagRuleBuilder(self) - return flag_rule_builder.and_not_match(attribute, values) + return flag_rule_builder.and_not_match_context(context_kind, attribute, *values) def clear_rules(self) -> 'FlagBuilder': """Removes any existing rules from the flag. This undoes the effect of methods like - :meth:`ldclient.integrations.test_data.FlagBuilder.if_match()` + :meth:`ldclient.integrations.test_data.FlagBuilder.if_match()`. :return: the same flag builder """ @@ -403,7 +479,7 @@ def clear_rules(self) -> 'FlagBuilder': def clear_targets(self) -> 'FlagBuilder': """Removes any existing targets from the flag. This undoes the effect of methods like - :meth:`ldclient.integrations.test_data.FlagBuilder.variation_for_user()` + :meth:`ldclient.integrations.test_data.FlagBuilder.variation_for_user()`. :return: the same flag builder """ @@ -422,7 +498,9 @@ def _build(self, version: int) -> dict: 'key': self._key, 'version': version, 'on': self._on, - 'variations': self._variations + 'variations': self._variations, + 'prerequisites': [], + 'salt': '' } base_flag_object['offVariation'] = self._off_variation @@ -431,12 +509,27 @@ def _build(self, version: int) -> dict: } targets = [] - for var_index, user_keys in self._targets.items(): - targets.append({ - 'variation': var_index, - 'values': user_keys - }) + context_targets = [] + for target_context_kind, target_variations in self._targets.items(): + for var_index, target_keys in target_variations.items(): + if target_context_kind == Context.DEFAULT_KIND: + targets.append({ + 'variation': var_index, + 'values': sorted(list(target_keys)) # sorting just for test determinacy + }) + context_targets.append({ + 'contextKind': target_context_kind, + 'variation': var_index, + 'values': [] + }) + else: + context_targets.append({ + 'contextKind': target_context_kind, + 'variation': var_index, + 'values': sorted(list(target_keys)) # sorting just for test determinacy + }) base_flag_object['targets'] = targets + base_flag_object['contextTargets'] = context_targets rules = [] for idx, rule in enumerate(self._rules): @@ -471,6 +564,9 @@ def __init__(self, flag_builder: FlagBuilder): def and_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """Adds another clause, using the "is one of" operator. + This is a shortcut for calling :meth:`ldclient.integrations.test_data.FlagRuleBuilder.and_match_context()` + with "user" as the context kind. + **Example:** create a rule that returns ``True`` if the name is "Patsy" and the country is "gb" :: @@ -483,7 +579,28 @@ def and_match(self, attribute: str, *values) -> 'FlagRuleBuilder': :param values: values to compare to :return: the flag rule builder """ + return self.and_match_context(Context.DEFAULT_KIND, attribute, *values) + + def and_match_context(self, context_kind: str, attribute: str, *values) -> 'FlagRuleBuilder': + """Adds another clause, using the "is one of" operator. This matching expression only + applies to contexts of a specific kind. + + **Example:** create a rule that returns ``True`` if the name attribute for the + "company" context is "Ella", and the country attribute for the "company" context is "gb": + :: + + td.flag('flag') \\ + .if_match_context('company', 'name', 'Ella') \\ + .and_match_context('company', 'country', 'gb') \\ + .then_return(True) + + :param context_kind: the context kind + :param attribute: the context attribute to match against + :param values: values to compare to + :return: the flag rule builder + """ self._clauses.append({ + 'contextKind': context_kind, 'attribute': attribute, 'op': 'in', 'values': list(values), @@ -494,6 +611,9 @@ def and_match(self, attribute: str, *values) -> 'FlagRuleBuilder': def and_not_match(self, attribute: str, *values) -> 'FlagRuleBuilder': """Adds another clause, using the "is not one of" operator. + This is a shortcut for calling :meth:`ldclient.integrations.test_data.FlagRuleBuilder.and_not_match_context()` + with "user" as the context kind. + **Example:** create a rule that returns ``True`` if the name is "Patsy" and the country is not "gb" :: @@ -506,7 +626,28 @@ def and_not_match(self, attribute: str, *values) -> 'FlagRuleBuilder': :param values: values to compare to :return: the flag rule builder """ + return self.and_not_match_context(Context.DEFAULT_KIND, attribute, *values) + + def and_not_match_context(self, context_kind: str, attribute: str, *values) -> 'FlagRuleBuilder': + """Adds another clause, using the "is not one of" operator. This matching expression only + applies to contexts of a specific kind. + + **Example:** create a rule that returns ``True`` if the name attribute for the + "company" context is "Ella", and the country attribute for the "company" context is not "gb": + :: + + td.flag('flag') \\ + .if_match_context('company', 'name', 'Ella') \\ + .and_not_match_context('company', 'country', 'gb') \\ + .then_return(True) + + :param context_kind: the context kind + :param attribute: the context attribute to match against + :param values: values to compare to + :return: the flag rule builder + """ self._clauses.append({ + 'contextKind': context_kind, 'attribute': attribute, 'op': 'in', 'values': list(values), diff --git a/testing/integrations/test_test_data_source.py b/testing/integrations/test_test_data_source.py index f4e06d8e..38708b8e 100644 --- a/testing/integrations/test_test_data_source.py +++ b/testing/integrations/test_test_data_source.py @@ -1,12 +1,12 @@ import pytest -import warnings +from typing import Callable from ldclient.client import LDClient from ldclient.config import Config from ldclient.feature_store import InMemoryFeatureStore -from ldclient.versioned_data_kind import FEATURES, SEGMENTS +from ldclient.versioned_data_kind import FEATURES -from ldclient.integrations.test_data import TestData +from ldclient.integrations.test_data import TestData, FlagBuilder ## Test Data + Data Source @@ -20,15 +20,264 @@ def test_makes_valid_datasource(): assert store.all(FEATURES, lambda x: x) == {} -def test_makes_valid_datasource_with_flag(): - td = TestData.data_source() - flag = td.flag(key='test-flag') - assert flag is not None +def verify_flag_builder(desc: str, expected_props: dict, builder_actions: Callable[[FlagBuilder], FlagBuilder]): + all_expected_props = { + 'key': 'test-flag', + 'version': 1, + 'on': True, + 'prerequisites': [], + 'targets': [], + 'contextTargets': [], + 'rules': [], + 'salt': '', + 'variations': [True, False], + 'offVariation': 1, + 'fallthrough': {'variation': 0} + } + all_expected_props.update(expected_props) - builtFlag = flag._build(0) - assert builtFlag['key'] is 'test-flag' - assert builtFlag['on'] is True - assert builtFlag['variations'] == [True, False] + td = TestData.data_source() + flag_builder = builder_actions(td.flag(key='test-flag')) + built_flag = flag_builder._build(1) + assert built_flag == all_expected_props, "did not get expected flag properties for '%s' test" % desc + + +@pytest.mark.parametrize('expected_props,builder_actions', [ + pytest.param( + {}, + lambda f: f, + id='defaults' + ), + pytest.param( + {}, + lambda f: f.boolean_flag(), + id='changing default flag to boolean flag has no effect' + ), + pytest.param( + {}, + lambda f: f.variations('a', 'b').boolean_flag(), + id='non-boolean flag can be changed to boolean flag', + ), + pytest.param( + {'on': False}, + lambda f: f.on(False), + id='flag can be turned off' + ), + pytest.param( + {}, + lambda f: f.on(False).on(True), + id='flag can be turned on', + ), + pytest.param( + {'fallthrough': {'variation': 1}}, + lambda f: f.variation_for_all(False), + id='set false variation for all' + ), + pytest.param( + {'fallthrough': {'variation': 0}}, + lambda f: f.variation_for_all(True), + id='set true variation for all' + ), + pytest.param( + {'variations': ['a', 'b', 'c'], 'fallthrough': {'variation': 2}}, + lambda f: f.variations('a', 'b', 'c').variation_for_all(2), + id='set variation index for all' + ), + pytest.param( + {'offVariation': 0}, + lambda f: f.off_variation(True), + id='set off variation boolean' + ), + pytest.param( + {'variations': ['a', 'b', 'c'], 'offVariation': 2}, + lambda f: f.variations('a', 'b', 'c').off_variation(2), + id='set off variation index' + ), + pytest.param( + { + 'targets': [ + {'variation': 0, 'values': ['key1', 'key2']}, + ], + 'contextTargets': [ + {'contextKind': 'user', 'variation': 0, 'values': []}, + {'contextKind': 'kind1', 'variation': 0, 'values': ['key3', 'key4']}, + {'contextKind': 'kind1', 'variation': 1, 'values': ['key5', 'key6']}, + ] + }, + lambda f: f.variation_for_key('user', 'key1', True) \ + .variation_for_key('user', 'key2', True) \ + .variation_for_key('kind1', 'key3', True) \ + .variation_for_key('kind1', 'key5', False) \ + .variation_for_key('kind1', 'key4', True) \ + .variation_for_key('kind1', 'key6', False), + id='set context targets as boolean' + ), + pytest.param( + { + 'variations': ['a', 'b'], + 'targets': [ + {'variation': 0, 'values': ['key1', 'key2']}, + ], + 'contextTargets': [ + {'contextKind': 'user', 'variation': 0, 'values': []}, + {'contextKind': 'kind1', 'variation': 0, 'values': ['key3', 'key4']}, + {'contextKind': 'kind1', 'variation': 1, 'values': ['key5', 'key6']}, + ] + }, + lambda f: f.variations('a', 'b') \ + .variation_for_key('user', 'key1', 0) \ + .variation_for_key('user', 'key2', 0) \ + .variation_for_key('kind1', 'key3', 0) \ + .variation_for_key('kind1', 'key5', 1) \ + .variation_for_key('kind1', 'key4', 0) \ + .variation_for_key('kind1', 'key6', 1), + id='set context targets as variation index' + ), + pytest.param( + { + 'contextTargets': [ + {'contextKind': 'kind1', 'variation': 0, 'values': ['key1', 'key2']}, + {'contextKind': 'kind1', 'variation': 1, 'values': ['key3']} + ] + }, + lambda f: f.variation_for_key('kind1', 'key1', 0) \ + .variation_for_key('kind1', 'key2', 1) \ + .variation_for_key('kind1', 'key3', 1) \ + .variation_for_key('kind1', 'key2', 0), + id='replace existing context target key' + ), + pytest.param( + { + 'variations': ['a', 'b'], + 'contextTargets': [ + {'contextKind': 'kind1', 'variation': 1, 'values': ['key1']}, + ] + }, + lambda f: f.variations('a', 'b') \ + .variation_for_key('kind1', 'key1', 1) \ + .variation_for_key('kind1', 'key2', 3), + id='ignore target for nonexistent variation' + ), + pytest.param( + { + 'targets': [ + {'variation': 0, 'values': ['key1']} + ], + 'contextTargets': [ + {'contextKind': 'user', 'variation': 0, 'values': []} + ] + }, + lambda f: f.variation_for_user('key1', True), + id='variation_for_user is shortcut for variation_for_key' + ), + pytest.param( + {}, + lambda f: f.variation_for_key('kind1', 'key1', 0) \ + .clear_targets(), + id='clear targets' + ), + pytest.param( + { + 'rules': [ + { + 'variation': 1, + 'id': 'rule0', + 'clauses': [ + {'contextKind': 'kind1', 'attribute': 'attr1', 'op': 'in', 'values': ['a', 'b'], 'negate': False} + ] + } + ] + }, + lambda f: f.if_match_context('kind1', 'attr1', 'a', 'b').then_return(1), + id='if_match_context' + ), + pytest.param( + { + 'rules': [ + { + 'variation': 1, + 'id': 'rule0', + 'clauses': [ + {'contextKind': 'kind1', 'attribute': 'attr1', 'op': 'in', 'values': ['a', 'b'], 'negate': True} + ] + } + ] + }, + lambda f: f.if_not_match_context('kind1', 'attr1', 'a', 'b').then_return(1), + id='if_not_match_context' + ), + pytest.param( + { + 'rules': [ + { + 'variation': 1, + 'id': 'rule0', + 'clauses': [ + {'contextKind': 'user', 'attribute': 'attr1', 'op': 'in', 'values': ['a', 'b'], 'negate': False} + ] + } + ] + }, + lambda f: f.if_match('attr1', 'a', 'b').then_return(1), + id='if_match is shortcut for if_match_context' + ), + pytest.param( + { + 'rules': [ + { + 'variation': 1, + 'id': 'rule0', + 'clauses': [ + {'contextKind': 'user', 'attribute': 'attr1', 'op': 'in', 'values': ['a', 'b'], 'negate': True} + ] + } + ] + }, + lambda f: f.if_not_match('attr1', 'a', 'b').then_return(1), + id='if_not_match is shortcut for if_not_match_context' + ), + pytest.param( + { + 'rules': [ + { + 'variation': 1, + 'id': 'rule0', + 'clauses': [ + {'contextKind': 'kind1', 'attribute': 'attr1', 'op': 'in', 'values': ['a', 'b'], 'negate': False}, + {'contextKind': 'kind1', 'attribute': 'attr2', 'op': 'in', 'values': ['c', 'd'], 'negate': False} + ] + } + ] + }, + lambda f: f.if_match_context('kind1', 'attr1', 'a', 'b') \ + .and_match_context('kind1', 'attr2', 'c', 'd').then_return(1), + id='and_match_context' + ), + pytest.param( + { + 'rules': [ + { + 'variation': 1, + 'id': 'rule0', + 'clauses': [ + {'contextKind': 'kind1', 'attribute': 'attr1', 'op': 'in', 'values': ['a', 'b'], 'negate': False}, + {'contextKind': 'kind1', 'attribute': 'attr2', 'op': 'in', 'values': ['c', 'd'], 'negate': True} + ] + } + ] + }, + lambda f: f.if_match_context('kind1', 'attr1', 'a', 'b') \ + .and_not_match_context('kind1', 'attr2', 'c', 'd').then_return(1), + id='and_not_match_context' + ), + pytest.param( + {}, + lambda f: f.if_match_context('kind1', 'attr1', 'a').then_return(1).clear_rules(), + id='clear rules' + ) +]) +def test_flag_configs_parameterized(expected_props: dict, builder_actions: Callable[[FlagBuilder], FlagBuilder]): + verify_flag_builder('x', expected_props, builder_actions) def test_can_retrieve_flag_from_store(): @@ -71,7 +320,9 @@ def test_updates_after_client_close_have_no_affect(): def test_can_handle_multiple_clients(): td = TestData.data_source() - td.update(td.flag('flag')) + flag_builder = td.flag('flag') + built_flag = flag_builder._build(1) + td.update(flag_builder) store = InMemoryFeatureStore() store2 = InMemoryFeatureStore() @@ -82,223 +333,23 @@ def test_can_handle_multiple_clients(): config2 = Config('SDK_KEY', update_processor_class = td, send_events = False, offline = True, feature_store = store2) client2 = LDClient(config=config2) - assert store.get(FEATURES, 'flag') == FEATURES.decode({ - 'fallthrough': { - 'variation': 0, - }, - 'key': 'flag', - 'offVariation': 1, - 'on': True, - 'rules': [], - 'targets': [], - 'variations': [True, False], - 'version': 1 - }) - - assert store2.get(FEATURES, 'flag') == FEATURES.decode({ - 'fallthrough': { - 'variation': 0, - }, - 'key': 'flag', - 'offVariation': 1, - 'on': True, - 'rules': [], - 'targets': [], - 'variations': [True, False], - 'version': 1 - }) - - td.update(td.flag('flag').variation_for_all_users(False)) - - assert store.get(FEATURES, 'flag') == FEATURES.decode({ - 'fallthrough': { - 'variation': 1, - }, - 'key': 'flag', - 'offVariation': 1, - 'on': True, - 'rules': [], - 'targets': [], - 'variations': [True, False], - 'version': 2 - }) - - assert store2.get(FEATURES, 'flag') == FEATURES.decode({ - 'fallthrough': { - 'variation': 1, - }, - 'key': 'flag', - 'offVariation': 1, - 'on': True, - 'rules': [], - 'targets': [], - 'variations': [True, False], - 'version': 2 - }) - - client.close() - client2.close() - - -## FlagBuilder - -def test_flagbuilder_defaults_to_boolean_flag(): - td = TestData.data_source() - flag = td.flag('empty-flag') - assert flag._build(0)['variations'] == [True, False] - assert flag._build(0)['fallthrough'] == {'variation': 0} - assert flag._build(0)['offVariation'] == 1 - -def test_flagbuilder_can_turn_flag_off(): - td = TestData.data_source() - flag = td.flag('test-flag') - flag.on(False) + assert store.get(FEATURES, 'flag') == FEATURES.decode(built_flag) - assert flag._build(0)['on'] is False + assert store2.get(FEATURES, 'flag') == FEATURES.decode(built_flag) -def test_flagbuilder_can_set_fallthrough_variation(): - td = TestData.data_source() - flag = td.flag('test-flag') - flag.fallthrough_variation(2) + flag_builder_v2 = td.flag('flag').variation_for_all_users(False) + td.update(flag_builder_v2) + built_flag_v2 = flag_builder_v2._build(2) - assert flag._build(0)['fallthrough'] == {'variation': 2} + assert store.get(FEATURES, 'flag') == FEATURES.decode(built_flag_v2) - flag.fallthrough_variation(True) + assert store2.get(FEATURES, 'flag') == FEATURES.decode(built_flag_v2) - assert flag._build(0)['fallthrough'] == {'variation': 0} - -def test_flagbuilder_can_set_off_variation(): - td = TestData.data_source() - flag = td.flag('test-flag') - flag.off_variation(2) - - assert flag._build(0)['offVariation'] == 2 - - flag.off_variation(True) - - assert flag._build(0)['offVariation'] == 0 - -def test_flagbuilder_can_make_boolean_flag(): - td = TestData.data_source() - flag = td.flag('boolean-flag').boolean_flag() - - builtFlag = flag._build(0) - assert builtFlag['fallthrough'] == {'variation': 0} - assert builtFlag['offVariation'] == 1 - -def test_flagbuilder_can_set_variation_when_targeting_is_off(): - td = TestData.data_source() - flag = td.flag('test-flag') \ - .on(False) - assert flag._build(0)['on'] == False - assert flag._build(0)['variations'] == [True,False] - flag.variations('dog', 'cat') - assert flag._build(0)['variations'] == ['dog','cat'] - -def test_flagbuilder_can_set_variation_for_all_users(): - td = TestData.data_source() - flag = td.flag('test-flag') - flag.variation_for_all_users(True) - assert flag._build(0)['fallthrough'] == {'variation': 0} - -def test_flagbuilder_clears_existing_rules_and_targets_when_setting_variation_for_all_users(): - td = TestData.data_source() - - flag = td.flag('test-flag').if_match('name', 'christian').then_return(False).variation_for_user('christian', False).variation_for_all_users(True)._build(0) - - assert flag['rules'] == [] - assert flag['targets'] == [] - -def test_flagbuilder_can_set_variations(): - td = TestData.data_source() - flag = td.flag('test-flag') - flag.variations(2,3,4,5) - assert flag._build(0)['variations'] == [2,3,4,5] - -def test_flagbuilder_can_make_an_immutable_copy(): - td = TestData.data_source() - flag = td.flag('test-flag') - flag.variations(1,2) - copy_of_flag = flag._copy() - flag.variations(3,4) - assert copy_of_flag._build(0)['variations'] == [1,2] - - copy_of_flag.variations(5,6) - assert flag._build(0)['variations'] == [3,4] - -def test_flagbuilder_can_set_boolean_variation_for_user(): - td = TestData.data_source() - flag = td.flag('user-variation-flag') - flag.variation_for_user('christian', False) - expected_targets = [ - { - 'variation': 1, - 'values': ['christian'] - } - ] - assert flag._build(0)['targets'] == expected_targets - -def test_flagbuilder_can_set_numerical_variation_for_user(): - td = TestData.data_source() - flag = td.flag('user-variation-flag') - flag.variations('a','b','c') - flag.variation_for_user('christian', 2) - expected_targets = [ - { - 'variation': 2, - 'values': ['christian'] - } - ] - assert flag._build(1)['targets'] == expected_targets - -def test_flagbuilder_can_set_value_for_all_users(): - td = TestData.data_source() - flag = td.flag('user-value-flag') - flag.variation_for_user('john', 1) - - built_flag = flag._build(0) - assert built_flag['targets'] == [{'values': ['john'], 'variation': 1}] - assert built_flag['variations'] == [True, False] - - flag.value_for_all_users('yes') - - built_flag2 = flag._build(0) - assert built_flag2['targets'] == [] - assert built_flag2['variations'] == ['yes'] - - -def test_flagbuilder_can_build(): - td = TestData.data_source() - flag = td.flag('some-flag') - flag.if_match('country', 'fr').then_return(True) - expected_result = { - 'fallthrough': { - 'variation': 0, - }, - 'key': 'some-flag', - 'offVariation': 1, - 'on': True, - 'targets': [], - 'variations': [True, False], - 'rules': [ - { - 'clauses': [ - {'attribute': 'country', - 'negate': False, - 'op': 'in', - 'values': ['fr'] - } - ], - 'id': 'rule0', - 'variation': 0 - } - ], - 'version': 1, - } + client.close() + client2.close() - assert flag._build(1) == expected_result -def test_flag_can_evaluate_rules(): +def test_flag_evaluation_with_client(): td = TestData.data_source() store = InMemoryFeatureStore() From afef6a49fd859864f7a6b5995e4041870384e71a Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 23 Dec 2022 13:06:11 -0800 Subject: [PATCH 338/356] general doc comment fixes for 8.0 --- docs/api-main.rst | 6 +- ldclient/client.py | 26 ++++- ldclient/config.py | 26 ++--- ldclient/context.py | 160 ++++++++++++++++++----------- ldclient/integrations/test_data.py | 4 +- ldclient/interfaces.py | 22 ++-- 6 files changed, 150 insertions(+), 94 deletions(-) diff --git a/docs/api-main.rst b/docs/api-main.rst index 15ddca1b..514dc698 100644 --- a/docs/api-main.rst +++ b/docs/api-main.rst @@ -5,22 +5,26 @@ ldclient module --------------- .. automodule:: ldclient - :members: get,set_config + :members: Context,ContextBuilder,ContextMultiBuilder,get,set_config + :special-members: __init__ ldclient.client module ---------------------- .. automodule:: ldclient.client :members: LDClient + :special-members: __init__ ldclient.config module ---------------------- .. automodule:: ldclient.config :members: + :special-members: __init__ ldclient.evaluation module -------------------------- .. automodule:: ldclient.evaluation :members: + :special-members: __init__ diff --git a/ldclient/client.py b/ldclient/client.py index 5d1fc093..a726efbf 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -195,13 +195,17 @@ def track(self, event_name: str, context: Union[dict, Context], data: Optional[A This method creates a "custom" analytics event containing the specified event name (key) and context properties. You may attach arbitrary data or a metric value to the event with the - optional `data` and `metric_value` parameters. + optional ``data`` and ``metric_value`` parameters. Note that event delivery is asynchronous, so the event may not actually be sent until later; see :func:`flush()`. + If you pass a dictionary of user attributes instead of a :class:`ldclient.Context`, + the SDK will convert the user to a Context. There is some overhead to this conversion, + so it is more efficient to pass a Context. + :param event_name: the name of the event - :param context: the evaluation context associated with the event + :param context: the evaluation context or user associated with the event :param data: optional additional data associated with the event :param metric_value: a numeric value used by the LaunchDarkly experimentation feature in numeric custom metrics; can be omitted if this event is used by only non-numeric metrics @@ -225,6 +229,10 @@ def identify(self, context: Union[Context, dict]): need to use :func:`identify()` if you want to identify the context without evaluating a flag. + If you pass a dictionary of user attributes instead of a :class:`ldclient.Context`, + the SDK will convert the user to a Context. There is some overhead to this conversion, + so it is more efficient to pass a Context. + :param context: the context to register """ if not isinstance(context, Context): @@ -268,11 +276,15 @@ def flush(self): def variation(self, key: str, context: Union[Context, dict], default: Any) -> Any: """Calculates the value of a feature flag for a given context. + If you pass a dictionary of user attributes instead of a :class:`ldclient.Context`, + the SDK will convert the user to a Context. There is some overhead to this conversion, + so it is more efficient to pass a Context. + :param key: the unique key for the feature flag :param context: the evaluation context or user :param default: the default value of the flag, to be used if the value is not available from LaunchDarkly - :return: the variation for the given context, or the `default` value if the flag cannot be evaluated + :return: the variation for the given context, or the ``default`` value if the flag cannot be evaluated """ return self._evaluate_internal(key, context, default, self._event_factory_default).value @@ -280,9 +292,13 @@ def variation_detail(self, key: str, context: Union[Context, dict], default: Any """Calculates the value of a feature flag for a given context, and returns an object that describes the way the value was determined. - The `reason` property in the result will also be included in analytics events, if you are + The ``reason`` property in the result will also be included in analytics events, if you are capturing detailed event data for this flag. - + + If you pass a dictionary of user attributes instead of a :class:`ldclient.Context`, + the SDK will convert the user to a Context. There is some overhead to this conversion, + so it is more efficient to pass a Context. + :param key: the unique key for the feature flag :param context: the evaluation context or user :param default: the default value of the flag, to be used if the value is not diff --git a/ldclient/config.py b/ldclient/config.py index c0721e3e..2288a7fd 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -20,9 +20,9 @@ class BigSegmentsConfig: Big Segments are a specific type of user segments. For more information, read the LaunchDarkly documentation: https://docs.launchdarkly.com/home/users/big-segments - If your application uses Big Segments, you will need to create a `BigSegmentsConfig` that at a - minimum specifies what database integration to use, and then pass the `BigSegmentsConfig` - object as the `big_segments` parameter when creating a :class:`Config`. + If your application uses Big Segments, you will need to create a ``BigSegmentsConfig`` that at a + minimum specifies what database integration to use, and then pass the ``BigSegmentsConfig`` + object as the ``big_segments`` parameter when creating a :class:`Config`. This example shows Big Segments being configured to use Redis: :: @@ -81,7 +81,7 @@ class HTTPConfig: """Advanced HTTP configuration options for the SDK client. This class groups together HTTP/HTTPS-related configuration properties that rarely need to be changed. - If you need to set these, construct an `HTTPConfig` instance and pass it as the `http` parameter when + If you need to set these, construct an ``HTTPConfig`` instance and pass it as the ``http`` parameter when you construct the main :class:`Config` for the SDK client. """ def __init__(self, @@ -95,7 +95,7 @@ def __init__(self, :param connect_timeout: The connect timeout for network connections in seconds. :param read_timeout: The read timeout for network connections in seconds. :param http_proxy: Use a proxy when connecting to LaunchDarkly. This is the full URI of the - proxy; for example: http://my-proxy.com:1234. Note that unlike the standard `http_proxy` environment + proxy; for example: http://my-proxy.com:1234. Note that unlike the standard ``http_proxy`` environment variable, this is used regardless of whether the target URI is HTTP or HTTPS (the actual LaunchDarkly service uses HTTPS, but a Relay Proxy instance could use HTTP). Setting this Config parameter will override any proxy specified by an environment variable, but only for LaunchDarkly SDK connections. @@ -105,7 +105,7 @@ def __init__(self, certificate. :param disable_ssl_verification: If true, completely disables SSL verification and certificate verification for secure requests. This is unsafe and should not be used in a production environment; - instead, use a self-signed certificate and set `ca_certs`. + instead, use a self-signed certificate and set ``ca_certs``. """ self.__connect_timeout = connect_timeout self.__read_timeout = read_timeout @@ -194,16 +194,16 @@ def __init__(self, to be reestablished. The delay for the first reconnection will start near this value, and then increase exponentially for any subsequent connection failures. :param send_events: Whether or not to send events back to LaunchDarkly. This differs from - `offline` in that it affects only the sending of client-side events, not streaming or polling for + ``offline`` in that it affects only the sending of client-side events, not streaming or polling for events from the server. By default, events will be sent. - :param events_enabled: Obsolete name for `send_events`. + :param events_enabled: Obsolete name for ``send_events``. :param offline: Whether the client should be initialized in offline mode. In offline mode, default values are returned for all flags and no remote network requests are made. By default, this is false. :param poll_interval: The number of seconds between polls for flag updates if streaming is off. :param use_ldd: Whether you are using the LaunchDarkly Relay Proxy in daemon mode. In this configuration, the client will not use a streaming connection to listen for updates, but instead - will get feature state from a Redis instance. The `stream` and `poll_interval` options will be + will get feature state from a Redis instance. The ``stream`` and ``poll_interval`` options will be ignored if this option is set to true. By default, this is false. For more information, read the LaunchDarkly documentation: https://docs.launchdarkly.com/home/relay-proxy/using#using-daemon-mode @@ -211,10 +211,10 @@ def __init__(self, with this configuration active will have these attributes removed. Each item can be either the name of an attribute ("email"), or a slash-delimited path ("/address/street") to mark a property within a JSON object value as private. - :param array private_attribute_names: Deprecated alias for `private_attributes` ("names" is no longer + :param array private_attribute_names: Deprecated alias for ``private_attributes`` ("names" is no longer strictly accurate because these could also be attribute reference paths). :param all_attributes_private: If true, all user attributes (other than the key) will be - private, not just the attributes specified in `private_attribute`. + private, not just the attributes specified in ``private_attributes``. :param feature_store: A FeatureStore implementation :param user_keys_capacity: The number of user keys that the event processor can remember at any one time, so that duplicate user details will not be sent in analytics events. @@ -235,8 +235,8 @@ def __init__(self, being used. This will be sent in HTTP headers during requests to the LaunchDarkly servers to allow recording metrics on the usage of these wrapper libraries. :param wrapper_version: For use by wrapper libraries to report the version of the library in - use. If `wrapper_name` is not set, this field will be ignored. Otherwise the version string will - be included in the HTTP headers along with the `wrapper_name` during requests to the LaunchDarkly + use. If ``wrapper_name`` is not set, this field will be ignored. Otherwise the version string will + be included in the HTTP headers along with the ``wrapper_name`` during requests to the LaunchDarkly servers. :param http: Optional properties for customizing the client's HTTP/HTTPS behavior. See :class:`HTTPConfig`. diff --git a/ldclient/context.py b/ldclient/context.py index d2e783c6..6b789318 100644 --- a/ldclient/context.py +++ b/ldclient/context.py @@ -40,8 +40,8 @@ class Context: To create a Context with multiple kinds (a multi-context), use :func:`create_multi()` or :func:`multi_builder()`. - A Context can be in an error state if it was built with invalid attributes. See :func:`valid()` - and :func:`error()`. + A Context can be in an error state if it was built with invalid attributes. See :attr:`valid` + and :attr:`error`. A Context is immutable once created. """ @@ -117,7 +117,7 @@ def __init__( self.__make_invalid(kind_error) return if key == '' and not allow_empty_key: - self.__make_invalid('context key must not be null or empty') + self.__make_invalid('context key must not be None or empty') return self.__key = key self.__kind = kind @@ -135,11 +135,14 @@ def create(cls, key: str, kind: Optional[str] = None) -> Context: """ Creates a single-kind Context with only the key and the kind specified. - If you omit the kind, it defaults to "user" (:const:`DEFAULT_KIND1). + If you omit the kind, it defaults to "user" (:const:`DEFAULT_KIND`). :param key: the context key :param kind: the context kind; if omitted, it is :const:`DEFAULT_KIND` ("user") :return: a context + + :see: :func:`builder()` + :see: :func:`create_multi()` """ return Context(kind, key, None, False, None, None, None, False) @@ -151,6 +154,9 @@ def create_multi(cls, *contexts: Context) -> Context: To create a Context for a single context kind, use :func:`create()` or :func:`builder()`. + You may use :func:`multi_builder()` instead if you want to add contexts one at a time + using a builder pattern. + For the returned Context to be valid, the contexts list must not be empty, and all of its elements must be valid Contexts. Otherwise, the returned Context will be invalid as reported by :func:`error()`. @@ -158,10 +164,13 @@ def create_multi(cls, *contexts: Context) -> Context: If only one context parameter is given, the method returns that same context. If a nested context is a multi-context, this is exactly equivalent to adding each of the - individual kinds from it separately. See :func:ldclient.context.ContextMultiBuilder.add()`. + individual kinds from it separately. See :func:`ldclient.ContextMultiBuilder.add()`. :param contexts: the individual contexts :return: a multi-context + + :see: :func:`create()` + :see: :func:`multi_builder()` """ # implementing this via multi_builder gives us the flattening behavior for free builder = ContextMultiBuilder() @@ -208,8 +217,8 @@ def builder(cls, key: str) -> ContextBuilder: You may use :class:`ldclient.ContextBuilder` methods to set additional attributes and/or change the context kind before calling :func:`ldclient.ContextBuilder.build()`. If you - do not change any values, the defaults for the Context are that its `kind` is :const:`DEFAULT_KIND`, - its `key` is set to the key parameter specified here, `anonymous` is False, and it has no values for + do not change any values, the defaults for the Context are that its ``kind`` is :const:`DEFAULT_KIND`, + its :attr:`key` is set to the key parameter specified here, :attr:`anonymous` is False, and it has no values for any other attributes. This method is for building a Context that has only a single kind. To define a multi-context, @@ -217,6 +226,10 @@ def builder(cls, key: str) -> ContextBuilder: :param key: the context key :return: a new builder + + :see: :func:`create()` + :see: :func:`create_multi()` + """ return ContextBuilder(key) @@ -245,6 +258,9 @@ def multi_builder(cls) -> ContextMultiBuilder: allows you to add contexts one at a time, if that is more convenient for your logic. :return: a new builder + + :see: :func:`builder()` + :see: :func:`create_multi()` """ return ContextMultiBuilder() @@ -257,13 +273,13 @@ def valid(self) -> bool: is missing necessary attributes or has invalid attributes, indicating an incorrect usage of the SDK API. The only ways for a context to be invalid are: - * The `kind` property had a disallowed value. See :func:`kind()`. - * For a single context, the `key` property was null or empty. + * The :attr:`kind` property had a disallowed value. See :func:`ldclient.ContextBuilder.kind()`. + * For a single context, the :attr:`key` property was None or empty. * You tried to create a multi-context without specifying any contexts. * You tried to create a multi-context using the same context kind more than once. * You tried to create a multi-context where at least one of the individual Contexts was invalid. - In any of these cases, `valid` will return false, and :func:`error()` will return a + In any of these cases, :attr:`valid` will be False, and :attr:`error` will return a description of the error. Since in normal usage it is easy for applications to be sure they are using context kinds @@ -273,7 +289,7 @@ def valid(self) -> bool: the context is invalid, the operation will fail in some well-defined way as described in the documentation for that method, and the SDK will generally log a warning as well. But in any situation where you are not sure if you have a valid Context, you can check - :func:`valid()` or :func:`error()`. + :attr:`valid` or :attr:`error`. """ return self.__error is None @@ -282,7 +298,7 @@ def error(self) -> Optional[str]: """ Returns None for a valid Context, or an error message for an invalid one. - If this is None, then :func:`valid()` is True. If it is not None, then :func:`valid()` is + If this is None, then :attr:`valid` is True. If it is not None, then :attr:`valid` is False. """ return self.__error @@ -292,49 +308,59 @@ def multiple(self) -> bool: """ True if this is a multi-context. - If this value is True, then :func:`kind()` is guaranteed to be :const:`MULTI_KIND`, and + If this value is True, then :attr:`kind` is guaranteed to be :const:`MULTI_KIND`, and you can inspect the individual context for each kind with :func:`get_individual_context()`. - If this value is False, then :func:`kind()` is guaranteed to return a value that is not + If this value is False, then :attr:`kind` is guaranteed to return a value that is not :const:`MULTI_KIND`. + + :see: :func:`create_multi()` """ return self.__multi is not None @property def kind(self) -> str: """ - Returns the context's `kind` attribute. + Returns the context's ``kind`` attribute. Every valid context has a non-empty kind. For multi-contexts, this value is :const:`MULTI_KIND` and the kinds within the context can be inspected with :func:`get_individual_context()`. + + :see: :func:`ldclient.ContextBuilder.kind()` + :see: :func:`create()` """ return self.__kind @property def key(self) -> str: """ - Returns the context's `key` attribute. + Returns the context's ``key`` attribute. For a single context, this value is set by :func:`create`, or :func:`ldclient.ContextBuilder.key()`. - For a multi-context, there is no single value and :func:`key()` returns an empty string. Use - :func:`get_individual_context()` to get the context for a particular kind, then call :func:`key()` - on it. + For a multi-context, there is no single value and :attr:`key` returns an empty string. Use + :func:`get_individual_context()` to get the Context for a particular kind, then get the + :attr:`key` of that Context. + + :see: :func:`ldclient.ContextBuilder.key()` + :see: :func:`create()` """ return self.__key @property def name(self) -> Optional[str]: """ - Returns the context's `name` attribute. + Returns the context's ``name`` attribute. For a single context, this value is set by :func:`ldclient.ContextBuilder.name()`. It is None if no value was set. - For a multi-context, there is no single value and :func:`name()` returns null. Use - :func:`get_individual_context()` to get the context for a particular kind, then call :func:`name()` - on it. + For a multi-context, there is no single value and :attr:`name` returns None. Use + :func:`get_individual_context()` to get the Context for a particular kind, then get the + :attr:`name` of that Context. + + :see: :func:`ldclient.ContextBuilder.name()` """ return self.__name @@ -347,15 +373,17 @@ def anonymous(self) -> bool: The default value is False. False means that this Context represents an entity such as a user that you want to be able to see on the LaunchDarkly dashboard. - Setting `anonymous` to true excludes this context from the database that is + Setting ``anonymous`` to True excludes this context from the database that is used by the dashboard. It does not exclude it from analytics event data, so it is not the same as making attributes private; all non-private attributes will still be included in events and data export. There is no limitation on what other attributes - may be included (so, for instance, `anonymous` does not mean there is no `name`), - and the context will still have whatever `key` you have given it. + may be included (so, for instance, ``anonymous`` does not mean there is no :attr:`name`), + and the context will still have whatever :attr:`key` you have given it. This value is also addressable in evaluations as the attribute name "anonymous". It is always treated as a boolean true or false in evaluations. + + :see: :func:`ldclient.ContextBuilder.anonymous()` """ return self.__anonymous @@ -364,9 +392,9 @@ def get(self, attribute: str) -> Any: Looks up the value of any attribute of the context by name. For a single-kind context, the attribute name can be any custom attribute that was set - by :func:`ldclient.context.ContextBuilder.set()`. It can also be one of the built-in ones - like "kind", "key", or "name"; in such cases, it is equivalent to :func:`kind`, - :func:`key`, or :fund:`name`. + by :func:`ldclient.ContextBuilder.set()`. It can also be one of the built-in ones + like "kind", "key", or "name"; in such cases, it is equivalent to :attr:`kind`, + :attr:`key`, or :attr:`name`. For a multi-context, the only supported attribute name is "kind". Use :func:`get_individual_context()` to get the context for a particular kind and then get @@ -376,11 +404,13 @@ def get(self, attribute: str) -> Any: attribute, the return value is None. An attribute that actually exists cannot have a value of None. - Context has a `__getitem__` magic method equivalent to `get`, so `context['attr']` - behaves the same as `context.get('attr')`. + Context has a ``__getitem__`` magic method equivalent to ``get``, so ``context['attr']`` + behaves the same as ``context.get('attr')``. :param attribute: the desired attribute name :return: the attribute value, or None if there is no such attribute + + :see: :func:`ldclient.ContextBuilder.set()` """ if attribute == 'key': return self.__key @@ -403,6 +433,8 @@ def individual_context_count(self) -> int: of context kinds. For an invalid context, it returns zero. :return: the number of context kinds + + :see: :func:`get_individual_context()` """ if self.__error is not None: return 0 @@ -414,23 +446,25 @@ def get_individual_context(self, kind: Union[int, str]) -> Optional[Context]: """ Returns the single-kind Context corresponding to one of the kinds in this context. - The `kind` parameter can be either a number representing a zero-based index, or a string + The ``kind`` parameter can be either a number representing a zero-based index, or a string representing a context kind. If this method is called on a single-kind Context, then the only allowable value for - `kind` is either zero or the same value as the Context's :func:`kind`, and the return + ``kind`` is either zero or the same value as the Context's :attr:`kind`, and the return value on success is the same Context. - If the method is called on a multi-context, and `kind` is a number, it must be a - non-negative index that is less than the number of kinds (that is, less than the return - value of :func:`individual_context_count`), and the return value on success is one of - the individual Contexts within. Or, if `kind` is a string, it must match the context + If the method is called on a multi-context, and ``kind`` is a number, it must be a + non-negative index that is less than the number of kinds (that is, less than the value + of :attr:`individual_context_count`), and the return value on success is one of the + individual Contexts within. Or, if ``kind`` is a string, it must match the context kind of one of the individual contexts. - If there is no context corresponding to `kind`, the method returns null. + If there is no context corresponding to ``kind``, the method returns None. :param kind: the index or string value of a context kind - :return: the context corresponding to that index or kind, or null if none + :return: the context corresponding to that index or kind, or None + + :see: :attr:`individual_context_count` """ if self.__error is not None: return None @@ -489,10 +523,10 @@ def _private_attributes(self) -> Optional[list[str]]: @property def fully_qualified_key(self) -> str: """ - A string that describes the Context uniquely based on `kind` and `key` values. + A string that describes the Context uniquely based on ``kind`` and ``key`` values. This value is used whenever LaunchDarkly needs a string identifier based on all of the - `kind` and `key` values in the context. Applications typically do not need to use it. + :attr:`kind` and :attr:`key` values in the context. Applications typically do not need to use it. """ return self.__full_key @@ -520,7 +554,7 @@ def to_json_string(self) -> str: Returns the JSON representation of the context as a string, in the standard format used by LaunchDarkly SDKs. - This is equivalent to calling :func:`to_dict()` and then `json.dumps()`. + This is equivalent to calling :func:`to_dict()` and then ``json.dumps()``. :return: the JSON representation as a string """ @@ -611,8 +645,8 @@ def __repr__(self) -> str: For a valid Context, this is currently defined as being the same as the JSON representation, since that is the simplest way to represent all of the Context properties. However, application - code should not rely on `__repr__` always being the same as the JSON representation. If you - specifically want the latter, use :func:`to_json_string()`. For an invalid Context, `__repr__` + code should not rely on ``__repr__`` always being the same as the JSON representation. If you + specifically want the latter, use :func:`to_json_string()`. For an invalid Context, ``__repr__`` returns a description of why it is invalid. :return: a string representation @@ -715,8 +749,8 @@ def build(self) -> Context: It is possible to specify invalid attributes for a ContextBuilder, such as an empty key. Instead of throwing an exception, the ContextBuilder always returns an Context and you can - check :func:`ldclient.Context.valid()` or :func:`ldclient.Context.error()` to see if it has - an error. See :func:`ldclient.Context.valid()` for more information about invalid conditions. + check :attr:`ldclient.Context.valid` or :attr:`ldclient.Context.error` to see if it has + an error. See :attr:`ldclient.Context.valid` for more information about invalid conditions. If you pass an invalid Context to an SDK method, the SDK will detect this and will log a description of the error. @@ -746,13 +780,13 @@ def kind(self, kind: str) -> ContextBuilder: """ Sets the context's kind attribute. - Every context has a kind. Setting it to an empty string or null is equivalent to - :const:`ldclient.context.DEFAULT_KIND` ("user"). This value is case-sensitive. + Every context has a kind. Setting it to an empty string or None is equivalent to + :const:`ldclient.Context.DEFAULT_KIND` ("user"). This value is case-sensitive. The meaning of the context kind is completely up to the application. Validation rules are as follows: - * It may only contain letters, numbers, and the characters `.`, `_`, and `-`. + * It may only contain letters, numbers, and the characters ``.``, ``_``, and ``-``. * It cannot equal the literal string "kind". * For a single context, it cannot equal "multi". @@ -770,7 +804,7 @@ def name(self, name: Optional[str]) -> ContextBuilder: * Unlike most other attributes, it is always a string if it is specified. * The LaunchDarkly dashboard treats this attribute as the preferred display name for - contexts. + contexts. :param name: the context name (None to unset the attribute) :return: the builder @@ -786,18 +820,20 @@ def anonymous(self, anonymous: bool) -> ContextBuilder: The default value is False. False means that this Context represents an entity such as a user that you want to be able to see on the LaunchDarkly dashboard. - Setting `anonymous` to true excludes this context from the database that is + Setting ``anonymous`` to True excludes this context from the database that is used by the dashboard. It does not exclude it from analytics event data, so it is not the same as making attributes private; all non-private attributes will still be included in events and data export. There is no limitation on what other attributes - may be included (so, for instance, `anonymous` does not mean there is no `name`), - and the context will still have whatever `key` you have given it. + may be included (so, for instance, ``anonymous`` does not mean there is no ``name``), + and the context will still have whatever ``key`` you have given it. This value is also addressable in evaluations as the attribute name "anonymous". It is always treated as a boolean true or false in evaluations. :param anonymous: true if the context should be excluded from the LaunchDarkly database :return: the builder + + :see: :attr:`ldclient.Context.anonymous` """ self.__anonymous = anonymous return self @@ -807,7 +843,7 @@ def set(self, attribute: str, value: Any) -> ContextBuilder: Sets the value of any attribute for the context. This includes only attributes that are addressable in evaluations-- not metadata such - as :func:`private()`. If `attributeName` is `"private"`, you will be setting an attribute + as :func:`private()`. If ``attributeName`` is ``"private"``, you will be setting an attribute with that name which you can use in evaluations or to record data for your own purposes, but it will be unrelated to :func:`private()`. @@ -820,11 +856,11 @@ def set(self, attribute: str, value: Any) -> ContextBuilder: The following attribute names have special restrictions on their value types, and any value of an unsupported type will be ignored (leaving the attribute unchanged): - * `kind`, `key`: Must be a string. See :func:`kind()` and :func:`key()`. - * `name`: Must be a string or None. See :func:`name()`. - * `anonymous`: Must be a boolean. See :func:`anonymous()`. + * ``"kind"``, ``"key"``: Must be a string. See :func:`kind()` and :func:`key()`. + * ``"name"``: Must be a string or None. See :func:`name()`. + * ``"anonymous"``: Must be a boolean. See :func:`anonymous()`. - The attribute name "_meta" is not allowed, because it has special meaning in the + The attribute name ``"_meta"`` is not allowed, because it has special meaning in the JSON schema for contexts; any attempt to set an attribute with this name has no effect. @@ -942,8 +978,8 @@ def build(self) -> Context: It is possible for a ContextMultiBuilder to represent an invalid state. Instead of throwing an exception, the ContextMultiBuilder always returns a Context, and you can check - :func:`ldclient.Context.valid()` or :func:`ldclient.Context.error()` to see if it has an - error. See :func:`ldclient.Context.valid()` for more information about invalid context + :attr:`ldclient.Context.valid` or :attr:`ldclient.Context.error` to see if it has an + error. See :attr:`ldclient.Context.valid` for more information about invalid context conditions. If you pass an invalid context to an SDK method, the SDK will detect this and will log a description of the error. @@ -966,8 +1002,8 @@ def add(self, context: Context) -> ContextMultiBuilder: that is itself invalid. This error is detected when you call :func:`build()`. If the nested context is a multi-context, this is exactly equivalent to adding each of the - individual contexts from it separately. For instance, in the following example, `multi1` and - `multi2` end up being exactly the same: + individual contexts from it separately. For instance, in the following example, ``multi1`` and + ``multi2`` end up being exactly the same: :: c1 = Context.new("key1", "kind1") diff --git a/ldclient/integrations/test_data.py b/ldclient/integrations/test_data.py index 219dd9b1..ce9527eb 100644 --- a/ldclient/integrations/test_data.py +++ b/ldclient/integrations/test_data.py @@ -38,8 +38,8 @@ class TestData(): supports many of the ways a flag can be configured on the LaunchDarkly dashboard, but does not currently support 1. rule operators other than "in" and "not in", or 2. percentage rollouts. - If the same `TestData` instance is used to configure multiple `LDClient` instances, - any changes made to the data will propagate to all of the `LDClient` instances. + If the same ``TestData`` instance is used to configure multiple ``LDClient`` instances, + any changes made to the data will propagate to all of the ``LDClient`` instances. """ # Prevent pytest from treating this as a test class diff --git a/ldclient/interfaces.py b/ldclient/interfaces.py index 05f210ab..c3b1f2f7 100644 --- a/ldclient/interfaces.py +++ b/ldclient/interfaces.py @@ -139,7 +139,7 @@ def upsert_internal(self, kind: VersionedDataKind, item: dict) -> dict: the old one. It should return the final state of the item, i.e. if the update succeeded then it returns the item that was passed in, and if the update failed due to the version check then it returns the item that is currently in the data store (this ensures that - `CachingStoreWrapper` will update the cache correctly). + ``CachingStoreWrapper`` will update the cache correctly). :param kind: The kind of object to update :param item: The object to update or insert @@ -152,7 +152,7 @@ def initialized_internal(self) -> bool: Returns true if this store has been initialized. In a shared data store, it should be able to detect this even if initInternal was called in a different process, i.e. the test should be based on looking at what is in the data store. The method does not need to worry about caching - this value; `CachingStoreWrapper` will only call it when necessary. + this value; ``CachingStoreWrapper`` will only call it when necessary. """ @@ -263,7 +263,7 @@ def __init__(self, last_up_to_date: Optional[int]): @property def last_up_to_date(self) -> Optional[int]: """ - The Unix epoch millisecond timestamp of the last update to the `BigSegmentStore`. It is + The Unix epoch millisecond timestamp of the last update to the ``BigSegmentStore``. It is None if the store has never been updated. """ return self.__last_up_to_date @@ -297,7 +297,7 @@ def get_membership(self, context_hash: str) -> Optional[dict]: of how this is done, because it deals only with already-hashed keys, but the string can be assumed to only contain characters that are valid in base64. - The return value should be either a `dict`, or None if the context is not referenced in any big + The return value should be either a ``dict``, or None if the context is not referenced in any big segments. Each key in the dictionary is a "segment reference", which is how segments are identified in Big Segment data. This string is not identical to the segment key-- the SDK will add other information. The store implementation should not be concerned with the @@ -343,7 +343,7 @@ def available(self) -> bool: a valid database connection). In this case, the SDK will treat any reference to a Big Segment as if no users are included in that segment. Also, the :func:`ldclient.evaluation.EvaluationDetail.reason` associated with with any flag evaluation that references a Big Segment when the store is not - available will have a `bigSegmentsStatus` of `"STORE_ERROR"`. + available will have a ``bigSegmentsStatus`` of ``"STORE_ERROR"``. """ return self.__available @@ -357,7 +357,7 @@ def stale(self) -> bool: running or has become unable to receive fresh data from LaunchDarkly. Any feature flag evaluations that reference a Big Segment will be using the last known data, which may be out of date. Also, the :func:`ldclient.evaluation.EvaluationDetail.reason` associated with those evaluations - will have a `bigSegmentsStatus` of `"STALE"`. + will have a ``bigSegmentsStatus`` of ``"STALE"``. """ return self.__stale @@ -375,13 +375,13 @@ class BigSegmentStoreStatusProvider: Application code never needs to implement this interface. There are two ways to interact with the status. One is to simply get the current status; if its - `available` property is true, then the SDK is able to evaluate user membership in Big Segments, - and the `stale`` property indicates whether the data might be out of date. + ``available`` property is true, then the SDK is able to evaluate user membership in Big Segments, + and the ``stale`` property indicates whether the data might be out of date. The other way is to subscribe to status change notifications. Applications may wish to know if there is an outage in the Big Segment store, or if it has become stale (the Relay Proxy has stopped updating it with new data), since then flag evaluations that reference a Big Segment - might return incorrect values. Use `add_listener` to register a callback for notifications. + might return incorrect values. Use :func:`add_listener()` to register a callback for notifications. """ @abstractproperty @@ -399,7 +399,7 @@ def add_listener(self, listener: Callable[[BigSegmentStoreStatus], None]) -> Non Subscribes for notifications of status changes. The listener is a function or method that will be called with a single parameter: the - new `BigSegmentStoreStatus`. + new ``BigSegmentStoreStatus``. :param listener: the listener to add """ @@ -410,7 +410,7 @@ def remove_listener(self, listener: Callable[[BigSegmentStoreStatus], None]) -> """ Unsubscribes from notifications of status changes. - :param listener: a listener that was previously added with `add_listener`; if it was not, + :param listener: a listener that was previously added with :func:`add_listener()`; if it was not, this method does nothing """ pass From db889df028bf71186be857c1e914f48ddee3a50d Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Fri, 23 Dec 2022 19:07:16 -0800 Subject: [PATCH 339/356] U2C configuration updates --- contract-tests/client_entity.py | 4 +- ldclient/config.py | 80 +++++++++++++-------- ldclient/impl/big_segments.py | 2 +- ldclient/impl/events/diagnostics.py | 4 +- ldclient/impl/events/event_processor.py | 4 +- testing/impl/events/test_diagnostics.py | 2 +- testing/impl/events/test_event_processor.py | 4 +- testing/impl/test_big_segments.py | 6 +- 8 files changed, 64 insertions(+), 42 deletions(-) diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index e461a6e4..dac8625f 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -43,8 +43,8 @@ def __init__(self, tag, config): "store": BigSegmentStoreFixture(big_params["callbackUri"]) } if big_params.get("userCacheSize") is not None: - big_config["user_cache_size"] = big_params["userCacheSize"] - _set_optional_time_prop(big_params, "userCacheTimeMs", big_config, "user_cache_time") + big_config["context_cache_size"] = big_params["userCacheSize"] + _set_optional_time_prop(big_params, "userCacheTimeMs", big_config, "context_cache_time") _set_optional_time_prop(big_params, "statusPollIntervalMs", big_config, "status_poll_interval") _set_optional_time_prop(big_params, "staleAfterMs", big_config, "stale_after") opts["big_segments"] = BigSegmentsConfig(**big_config) diff --git a/ldclient/config.py b/ldclient/config.py index 2288a7fd..3ae0c04c 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -17,7 +17,7 @@ class BigSegmentsConfig: """Configuration options related to Big Segments. - Big Segments are a specific type of user segments. For more information, read the LaunchDarkly + Big Segments are a specific type of segments. For more information, read the LaunchDarkly documentation: https://docs.launchdarkly.com/home/users/big-segments If your application uses Big Segments, you will need to create a ``BigSegmentsConfig`` that at a @@ -34,25 +34,29 @@ class BigSegmentsConfig: """ def __init__(self, store: Optional[BigSegmentStore] = None, - user_cache_size: int=1000, - user_cache_time: float=5, + context_cache_size: int=1000, + context_cache_time: float=5, + user_cache_size: Optional[int]=None, + user_cache_time: Optional[float]=None, status_poll_interval: float=5, stale_after: float=120): """ :param store: the implementation of :class:`ldclient.interfaces.BigSegmentStore` that will be used to query the Big Segments database - :param user_cache_size: the maximum number of users whose Big Segment state will be cached + :param context_cache_size: the maximum number of contexts whose Big Segment state will be cached by the SDK at any given time - :param user_cache_time: the maximum length of time (in seconds) that the Big Segment state - for a user will be cached by the SDK + :param context_cache_time: the maximum length of time (in seconds) that the Big Segment state + for a context will be cached by the SDK + :param user_cache_size: deprecated alias for `context_cache_size` + :param user_cache_time: deprecated alias for `context_cache_time` :param status_poll_interval: the interval (in seconds) at which the SDK will poll the Big Segment store to make sure it is available and to determine how long ago it was updated :param stale_after: the maximum length of time between updates of the Big Segments data before the data is considered out of date """ self.__store = store - self.__user_cache_size = user_cache_size - self.__user_cache_time = user_cache_time + self.__context_cache_size = context_cache_size if user_cache_size is None else user_cache_size + self.__context_cache_time = context_cache_time if user_cache_time is None else user_cache_time self.__status_poll_interval = status_poll_interval self.__stale_after = stale_after pass @@ -61,13 +65,23 @@ def __init__(self, def store(self) -> Optional[BigSegmentStore]: return self.__store + @property + def context_cache_size(self) -> int: + return self.__context_cache_size + + @property + def context_cache_time(self) -> float: + return self.__context_cache_time + @property def user_cache_size(self) -> int: - return self.__user_cache_size + """Deprecated alias for :attr:`context_cache_size`.""" + return self.context_cache_size @property def user_cache_time(self) -> float: - return self.__user_cache_time + """Deprecated alias for :attr:`context_cache_time`.""" + return self.context_cache_time @property def status_poll_interval(self) -> float: @@ -155,7 +169,6 @@ def __init__(self, initial_reconnect_delay: float=1, defaults: dict={}, send_events: Optional[bool]=None, - events_enabled: bool=True, update_processor_class: Optional[Callable[[str, 'Config', FeatureStore], UpdateProcessor]]=None, poll_interval: float=30, use_ldd: bool=False, @@ -166,8 +179,10 @@ def __init__(self, private_attribute_names: Set[str]=set(), all_attributes_private: bool=False, offline: bool=False, - user_keys_capacity: int=1000, - user_keys_flush_interval: float=300, + context_keys_capacity: int=1000, + context_keys_flush_interval: float=300, + user_keys_capacity: Optional[int] = None, + user_keys_flush_interval: Optional[float] = None, diagnostic_opt_out: bool=False, diagnostic_recording_interval: int=900, wrapper_name: Optional[str]=None, @@ -196,7 +211,6 @@ def __init__(self, :param send_events: Whether or not to send events back to LaunchDarkly. This differs from ``offline`` in that it affects only the sending of client-side events, not streaming or polling for events from the server. By default, events will be sent. - :param events_enabled: Obsolete name for ``send_events``. :param offline: Whether the client should be initialized in offline mode. In offline mode, default values are returned for all flags and no remote network requests are made. By default, this is false. @@ -216,10 +230,12 @@ def __init__(self, :param all_attributes_private: If true, all user attributes (other than the key) will be private, not just the attributes specified in ``private_attributes``. :param feature_store: A FeatureStore implementation - :param user_keys_capacity: The number of user keys that the event processor can remember at any - one time, so that duplicate user details will not be sent in analytics events. - :param user_keys_flush_interval: The interval in seconds at which the event processor will - reset its set of known user keys. + :param context_keys_capacity: The number of context keys that the event processor can remember at any + one time, so that duplicate context details will not be sent in analytics events. + :param context_keys_flush_interval: The interval in seconds at which the event processor will + reset its set of known context keys. + :param user_keys_capacity: Deprecated alias for ``context_keys_capacity``. + :param user_keys_flush_interval: Deprecated alias for ``context_keys_flush_interval``. :param feature_requester_class: A factory for a FeatureRequester implementation taking the sdk key and config :param event_processor_class: A factory for an EventProcessor implementation taking the config :param update_processor_class: A factory for an UpdateProcessor implementation taking the sdk key, @@ -259,12 +275,12 @@ def __init__(self, self.__defaults = defaults if offline is True: send_events = False - self.__send_events = events_enabled if send_events is None else send_events + self.__send_events = True if send_events is None else send_events self.__private_attributes = private_attributes or private_attribute_names self.__all_attributes_private = all_attributes_private self.__offline = offline - self.__user_keys_capacity = user_keys_capacity - self.__user_keys_flush_interval = user_keys_flush_interval + self.__context_keys_capacity = context_keys_capacity if user_keys_capacity is None else user_keys_capacity + self.__context_keys_flush_interval = context_keys_flush_interval if user_keys_flush_interval is None else user_keys_flush_interval self.__diagnostic_opt_out = diagnostic_opt_out self.__diagnostic_recording_interval = max(diagnostic_recording_interval, 60) self.__wrapper_name = wrapper_name @@ -296,8 +312,8 @@ def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': private_attributes=self.__private_attributes, all_attributes_private=self.__all_attributes_private, offline=self.__offline, - user_keys_capacity=self.__user_keys_capacity, - user_keys_flush_interval=self.__user_keys_flush_interval, + context_keys_capacity=self.__context_keys_capacity, + context_keys_flush_interval=self.__context_keys_flush_interval, diagnostic_opt_out=self.__diagnostic_opt_out, diagnostic_recording_interval=self.__diagnostic_recording_interval, wrapper_name=self.__wrapper_name, @@ -373,10 +389,6 @@ def event_processor_class(self) -> Optional[Callable[['Config'], EventProcessor] def feature_requester_class(self) -> Callable: return self.__feature_requester_class - @property - def events_enabled(self) -> bool: - return self.__send_events - @property def send_events(self) -> bool: return self.__send_events @@ -405,13 +417,23 @@ def all_attributes_private(self) -> bool: def offline(self) -> bool: return self.__offline + @property + def context_keys_capacity(self) -> int: + return self.__context_keys_capacity + + @property + def context_keys_flush_interval(self) -> float: + return self.__context_keys_flush_interval + @property def user_keys_capacity(self) -> int: - return self.__user_keys_capacity + """Deprecated name for :attr:`context_keys_capacity`.""" + return self.context_keys_capacity @property def user_keys_flush_interval(self) -> float: - return self.__user_keys_flush_interval + """Deprecated name for :attr:`context_keys_flush_interval`.""" + return self.context_keys_flush_interval @property def diagnostic_opt_out(self) -> bool: diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index 149d5c2c..b4f4ce1e 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -61,7 +61,7 @@ def __init__(self, config: BigSegmentsConfig): self.__poll_task = None # type: Optional[RepeatingTask] if self.__store: - self.__cache = ExpiringDict(max_len = config.user_cache_size, max_age_seconds=config.user_cache_time) + self.__cache = ExpiringDict(max_len = config.context_cache_size, max_age_seconds=config.context_cache_time) self.__poll_task = RepeatingTask(config.status_poll_interval, 0, self.poll_store_and_update_status) self.__poll_task.start() diff --git a/ldclient/impl/events/diagnostics.py b/ldclient/impl/events/diagnostics.py index dace86e1..d6ce98c9 100644 --- a/ldclient/impl/events/diagnostics.py +++ b/ldclient/impl/events/diagnostics.py @@ -71,8 +71,8 @@ def _create_diagnostic_config_object(config): 'usingRelayDaemon': config.use_ldd, 'allAttributesPrivate': config.all_attributes_private, 'pollingIntervalMillis': config.poll_interval * 1000, - 'userKeysCapacity': config.user_keys_capacity, - 'userKeysFlushIntervalMillis': config.user_keys_flush_interval * 1000, + 'userKeysCapacity': config.context_keys_capacity, + 'userKeysFlushIntervalMillis': config.context_keys_flush_interval * 1000, 'diagnosticRecordingIntervalMillis': config.diagnostic_recording_interval * 1000, 'dataStoreType': _get_component_type_name(config.feature_store, config, 'memory')} diff --git a/ldclient/impl/events/event_processor.py b/ldclient/impl/events/event_processor.py index 8cf78bfc..bf246153 100644 --- a/ldclient/impl/events/event_processor.py +++ b/ldclient/impl/events/event_processor.py @@ -260,7 +260,7 @@ def __init__(self, inbox, config, http_client, diagnostic_accumulator=None): self._close_http = (http_client is None) # so we know whether to close it later self._disabled = False self._outbox = EventBuffer(config.events_max_pending) - self._context_keys = SimpleLRUCache(config.user_keys_capacity) + self._context_keys = SimpleLRUCache(config.context_keys_capacity) self._formatter = EventOutputFormatter(config) self._last_known_past_time = 0 self._deduplicated_contexts = 0 @@ -403,7 +403,7 @@ def __init__(self, config, http=None, dispatcher_class=None, diagnostic_accumula self._inbox = queue.Queue(config.events_max_pending) self._inbox_full = False self._flush_timer = RepeatingTask(config.flush_interval, config.flush_interval, self.flush) - self._contexts_flush_timer = RepeatingTask(config.user_keys_flush_interval, config.user_keys_flush_interval, self._flush_contexts) + self._contexts_flush_timer = RepeatingTask(config.context_keys_flush_interval, config.context_keys_flush_interval, self._flush_contexts) self._flush_timer.start() self._contexts_flush_timer.start() if diagnostic_accumulator is not None: diff --git a/testing/impl/events/test_diagnostics.py b/testing/impl/events/test_diagnostics.py index 079430f8..1de0566f 100644 --- a/testing/impl/events/test_diagnostics.py +++ b/testing/impl/events/test_diagnostics.py @@ -65,7 +65,7 @@ def test_create_diagnostic_config_custom(): test_config = Config("SDK_KEY", base_uri='https://test.com', events_uri='https://test.com', events_max_pending=10, flush_interval=1, stream_uri='https://test.com', stream=False, poll_interval=60, use_ldd=True, feature_store=test_store, - all_attributes_private=True, user_keys_capacity=10, user_keys_flush_interval=60, + all_attributes_private=True, context_keys_capacity=10, context_keys_flush_interval=60, http=HTTPConfig(http_proxy = 'proxy', read_timeout=1, connect_timeout=1), diagnostic_recording_interval=60) diag_config = _create_diagnostic_config_object(test_config) diff --git a/testing/impl/events/test_event_processor.py b/testing/impl/events/test_event_processor.py index 5f9636c1..15e95bba 100644 --- a/testing/impl/events/test_event_processor.py +++ b/testing/impl/events/test_event_processor.py @@ -97,7 +97,7 @@ def test_context_is_filtered_in_index_event(): check_summary_event(output[2]) def test_two_events_for_same_context_only_produce_one_index_event(): - with DefaultTestProcessor(user_keys_flush_interval = 300) as ep: + with DefaultTestProcessor(context_keys_flush_interval = 300) as ep: e0 = EventInputEvaluation(timestamp, context, flag.key, flag, 1, 'value1', None, 'default', None, True) e1 = EventInputEvaluation(timestamp, context, flag.key, flag, 2, 'value2', None, 'default', None, True) ep.send_event(e0) @@ -111,7 +111,7 @@ def test_two_events_for_same_context_only_produce_one_index_event(): check_summary_event(output[3]) def test_new_index_event_is_added_if_context_cache_has_been_cleared(): - with DefaultTestProcessor(user_keys_flush_interval = 0.1) as ep: + with DefaultTestProcessor(context_keys_flush_interval = 0.1) as ep: e0 = EventInputEvaluation(timestamp, context, flag.key, flag, 1, 'value1', None, 'default', None, True) e1 = EventInputEvaluation(timestamp, context, flag.key, flag, 2, 'value2', None, 'default', None, True) ep.send_event(e0) diff --git a/testing/impl/test_big_segments.py b/testing/impl/test_big_segments.py index 9cb8e3fd..2b880e4c 100644 --- a/testing/impl/test_big_segments.py +++ b/testing/impl/test_big_segments.py @@ -55,7 +55,7 @@ def test_membership_query_cache_can_expire(): store = MockBigSegmentStore() store.setup_metadata_always_up_to_date() store.setup_membership(user_hash, expected_membership) - manager = BigSegmentStoreManager(BigSegmentsConfig(store=store, user_cache_time=0.005)) + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store, context_cache_time=0.005)) try: expected_result = (expected_membership, BigSegmentsStatus.HEALTHY) assert manager.get_user_membership(user_key) == expected_result @@ -89,7 +89,7 @@ def test_membership_query_stale_status_no_store_metadata(): finally: manager.stop() -def test_membership_query_least_recent_user_evicted_from_cache(): +def test_membership_query_least_recent_context_evicted_from_cache(): user_key_1, user_key_2, user_key_3 = 'userkey1', 'userkey2', 'userkey3' user_hash_1, user_hash_2, user_hash_3 = _hash_for_user_key(user_key_1), \ _hash_for_user_key(user_key_2), _hash_for_user_key(user_key_3) @@ -100,7 +100,7 @@ def test_membership_query_least_recent_user_evicted_from_cache(): store.setup_membership(user_hash_2, membership_2) store.setup_membership(user_hash_3, membership_3) - manager = BigSegmentStoreManager(BigSegmentsConfig(store=store, user_cache_size=2)) + manager = BigSegmentStoreManager(BigSegmentsConfig(store=store, context_cache_size=2)) try: result1 = manager.get_user_membership(user_key_1) From a0ced672d552d7a4530858df86f247f235faf960 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 28 Dec 2022 12:12:17 -0800 Subject: [PATCH 340/356] update release metadata --- .ldrelease/config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.ldrelease/config.yml b/.ldrelease/config.yml index 9021210c..cc14b358 100644 --- a/.ldrelease/config.yml +++ b/.ldrelease/config.yml @@ -12,7 +12,8 @@ publications: branches: - name: main - description: 7.x + description: 8.x + - name: 7.x - name: 6.x jobs: From 0fe3f307ba050e0cfbdeb8d745453281e18831a4 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 28 Dec 2022 12:33:47 -0800 Subject: [PATCH 341/356] store flag/segment target lists as sets --- ldclient/impl/model/feature_flag.py | 6 +++--- ldclient/impl/model/segment.py | 14 +++++++------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/ldclient/impl/model/feature_flag.py b/ldclient/impl/model/feature_flag.py index b282e4b9..a53c7689 100644 --- a/ldclient/impl/model/feature_flag.py +++ b/ldclient/impl/model/feature_flag.py @@ -1,4 +1,4 @@ -from typing import Any, List, Optional +from typing import Any, List, Optional, Set from ldclient.impl.model.clause import Clause from ldclient.impl.model.entity import * @@ -27,7 +27,7 @@ class Target: def __init__(self, data: dict): self._context_kind = opt_str(data, 'contextKind') self._variation = req_int(data, 'variation') - self._values = req_str_list(data, 'values') + self._values = set(req_str_list(data, 'values')) @property def context_kind(self) -> Optional[str]: @@ -38,7 +38,7 @@ def variation(self) -> int: return self._variation @property - def values(self) -> List[str]: + def values(self) -> Set[str]: return self._values diff --git a/ldclient/impl/model/segment.py b/ldclient/impl/model/segment.py index f3ee1afd..f5c4fa2a 100644 --- a/ldclient/impl/model/segment.py +++ b/ldclient/impl/model/segment.py @@ -1,4 +1,4 @@ -from typing import Any, List, Optional +from typing import Any, List, Optional, Set from ldclient.impl.model.attribute_ref import AttributeRef, opt_attr_ref_with_opt_context_kind from ldclient.impl.model.clause import Clause @@ -10,14 +10,14 @@ class SegmentTarget: def __init__(self, data: dict, logger = None): self._context_kind = opt_str(data, 'contextKind') - self._values = req_str_list(data, 'values') + self._values = set(req_str_list(data, 'values')) @property def context_kind(self) -> Optional[str]: return self._context_kind @property - def values(self) -> List[str]: + def values(self) -> Set[str]: return self._values @@ -63,8 +63,8 @@ def __init__(self, data: dict): self._deleted = opt_bool(data, 'deleted') if self._deleted: return - self._included = opt_str_list(data, 'included') - self._excluded = opt_str_list(data, 'excluded') + self._included = set(opt_str_list(data, 'included')) + self._excluded = set(opt_str_list(data, 'excluded')) self._included_contexts = list(SegmentTarget(item) for item in opt_dict_list(data, 'includedContexts')) self._excluded_contexts = list(SegmentTarget(item) for item in opt_dict_list(data, 'excludedContexts')) self._rules = list(SegmentRule(item) for item in opt_dict_list(data, 'rules')) @@ -86,11 +86,11 @@ def deleted(self) -> bool: return self._deleted @property - def included(self) -> List[str]: + def included(self) -> Set[str]: return self._included @property - def excluded(self) -> List[str]: + def excluded(self) -> Set[str]: return self._excluded @property From 62c81f4f7169fb25d1b7479616fcb19db4a0c8af Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 28 Dec 2022 12:40:43 -0800 Subject: [PATCH 342/356] fix type hint --- ldclient/impl/evaluator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index 8fcc961a..c35d5e1a 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -423,7 +423,7 @@ def _bucketable_string_value(u_value) -> Optional[str]: return None -def _context_key_is_in_target_list(context: Context, context_kind: Optional[str], keys: Optional[List[str]]) -> bool: +def _context_key_is_in_target_list(context: Context, context_kind: Optional[str], keys: Set[str]) -> bool: if keys is None or len(keys) == 0: return False match_context = context.get_individual_context(context_kind or Context.DEFAULT_KIND) From bef4110633a37c05b6faaafecb99437857036116 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Wed, 28 Dec 2022 14:11:59 -0800 Subject: [PATCH 343/356] preprocess clause values for time/regex/semver operators --- ldclient/impl/evaluator.py | 23 ++-- ldclient/impl/model/clause.py | 41 +++++- ldclient/impl/model/value_parsing.py | 85 ++++++++++++ ldclient/impl/operators.py | 191 ++++++++------------------- testing/impl/test_model_decode.py | 47 +++++++ testing/impl/test_operators.py | 12 +- 6 files changed, 246 insertions(+), 153 deletions(-) create mode 100644 ldclient/impl/model/value_parsing.py create mode 100644 testing/impl/test_model_decode.py diff --git a/ldclient/impl/evaluator.py b/ldclient/impl/evaluator.py index c35d5e1a..0ceed119 100644 --- a/ldclient/impl/evaluator.py +++ b/ldclient/impl/evaluator.py @@ -202,10 +202,8 @@ def _rule_matches_context(self, rule: FlagRule, context: Context, state: EvalRes return True def _clause_matches_context(self, clause: Clause, context: Context, state: EvalResult) -> bool: - op = clause.op - clause_values = clause.values - if op == 'segmentMatch': - for seg_key in clause_values: + if clause.op == 'segmentMatch': + for seg_key in clause.values: segment = self.__get_segment(seg_key) if segment is not None and self._segment_matches_context(segment, context, state): return _maybe_negate(clause, True) @@ -226,10 +224,10 @@ def _clause_matches_context(self, clause: Clause, context: Context, state: EvalR # is the attr an array? if isinstance(context_value, (list, tuple)): for v in context_value: - if _match_single_context_value(op, v, clause_values): + if _match_single_context_value(clause, v): return _maybe_negate(clause, True) return _maybe_negate(clause, False) - return _maybe_negate(clause, _match_single_context_value(op, context_value, clause_values)) + return _maybe_negate(clause, _match_single_context_value(clause, context_value)) def _segment_matches_context(self, segment: Segment, context: Context, state: EvalResult) -> bool: if state.segment_stack is not None and segment.key in state.segment_stack: @@ -447,12 +445,14 @@ def _get_context_value_by_attr_ref(context: Context, attr: AttributeRef) -> Any: i += 1 return value -def _match_single_context_value(op: str, context_value: Any, values: List[Any]) -> bool: - op_fn = operators.ops.get(op) +def _match_single_context_value(clause: Clause, context_value: Any) -> bool: + op_fn = operators.ops.get(clause.op) if op_fn is None: return False - for v in values: - if op_fn(context_value, v): + values_preprocessed = clause.values_preprocessed + for i, v in enumerate(clause.values): + preprocessed = None if values_preprocessed is None else values_preprocessed[i] + if op_fn(context_value, v, preprocessed): return True return False @@ -460,10 +460,9 @@ def _match_clause_by_kind(clause: Clause, context: Context) -> bool: # If attribute is "kind", then we treat operator and values as a match expression against a list # of all individual kinds in the context. That is, for a multi-kind context with kinds of "org" # and "user", it is a match if either of those strings is a match with Operator and Values. - op = clause.op for i in range(context.individual_context_count): c = context.get_individual_context(i) - if c is not None and _match_single_context_value(op, c.kind, clause.values): + if c is not None and _match_single_context_value(clause, c.kind): return True return False diff --git a/ldclient/impl/model/clause.py b/ldclient/impl/model/clause.py index 8c476f13..19ab52c5 100644 --- a/ldclient/impl/model/clause.py +++ b/ldclient/impl/model/clause.py @@ -1,10 +1,44 @@ +from re import Pattern +from semver import VersionInfo from typing import Any, List, Optional from ldclient.impl.model.attribute_ref import AttributeRef, req_attr_ref_with_opt_context_kind from ldclient.impl.model.entity import * +from ldclient.impl.model.value_parsing import parse_regex, parse_semver, parse_time +class ClausePreprocessedValue: + __slots__ = ['_as_time', '_as_regex', '_as_semver'] + + def __init__(self, as_time: Optional[float]=None, as_regex: Optional[Pattern]=None, as_semver: Optional[VersionInfo]=None): + self._as_time = as_time + self._as_regex = as_regex + self._as_semver = as_semver + + @property + def as_time(self) -> Optional[float]: + return self._as_time + + @property + def as_regex(self) -> Optional[Pattern]: + return self._as_regex + + @property + def as_semver(self) -> Optional[VersionInfo]: + return self._as_semver + + +def _preprocess_clause_values(op: str, values: List[Any]) -> Optional[List[ClausePreprocessedValue]]: + if op == 'matches': + return list(ClausePreprocessedValue(as_regex=parse_regex(value)) for value in values) + if op == 'before' or op == 'after': + return list(ClausePreprocessedValue(as_time=parse_time(value)) for value in values) + if op == 'semVerEqual' or op == 'semVerGreaterThan' or op == 'semVerLessThan': + return list(ClausePreprocessedValue(as_semver=parse_semver(value)) for value in values) + return None + + class Clause: - __slots__ = ['_context_kind', '_attribute', '_op', '_values', '_negate'] + __slots__ = ['_context_kind', '_attribute', '_op', '_negate', '_values', '_values_preprocessed'] def __init__(self, data: dict): self._context_kind = opt_str(data, 'contextKind') @@ -12,6 +46,7 @@ def __init__(self, data: dict): self._negate = opt_bool(data, 'negate') self._op = req_str(data, 'op') self._values = req_list(data, 'values') + self._values_preprocessed = _preprocess_clause_values(self._op, self._values) @property def attribute(self) -> AttributeRef: @@ -32,3 +67,7 @@ def op(self) -> str: @property def values(self) -> List[Any]: return self._values + + @property + def values_preprocessed(self) -> Optional[List[ClausePreprocessedValue]]: + return self._values_preprocessed diff --git a/ldclient/impl/model/value_parsing.py b/ldclient/impl/model/value_parsing.py new file mode 100644 index 00000000..8fb843a0 --- /dev/null +++ b/ldclient/impl/model/value_parsing.py @@ -0,0 +1,85 @@ +import re +from re import Pattern +from semver import VersionInfo +from datetime import tzinfo, timedelta, datetime +from numbers import Number +from typing import Any, Optional + +import pyrfc3339 + +_ZERO = timedelta(0) + +# A UTC class. + +class _UTC(tzinfo): + """UTC""" + + def utcoffset(self, dt): + return _ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return _ZERO + +_epoch = datetime.utcfromtimestamp(0).replace(tzinfo=_UTC()) + + +def is_number(input: Any) -> bool: + # bool is a subtype of int, and we don't want to try and treat it as a number. + return isinstance(input, Number) and not isinstance(input, bool) + + +def parse_regex(input: Any) -> Optional[Pattern]: + if isinstance(input, str): + try: + return re.compile(input) + except Exception: + return None + return None + + +def parse_time(input: Any) -> Optional[float]: + """ + :param input: Either a number as milliseconds since Unix Epoch, or a string as a valid RFC3339 timestamp + :return: milliseconds since Unix epoch, or None if input was invalid. + """ + + if is_number(input): + return float(input) + + if isinstance(input, str): + try: + parsed_time = pyrfc3339.parse(input) + timestamp = (parsed_time - _epoch).total_seconds() + return timestamp * 1000.0 + except Exception as e: + return None + + return None + +def parse_semver(input: Any) -> Optional[VersionInfo]: + if not isinstance(input, str): + return None + try: + return VersionInfo.parse(input) + except TypeError: + return None + except ValueError as e: + try: + input = _add_zero_version_component(input) + return VersionInfo.parse(input) + except ValueError as e: + try: + input = _add_zero_version_component(input) + return VersionInfo.parse(input) + return input + except ValueError as e: + return None + +def _add_zero_version_component(input): + m = re.search("^([0-9.]*)(.*)", input) + if m is None: + return input + ".0" + return m.group(1) + ".0" + m.group(2) diff --git a/ldclient/impl/operators.py b/ldclient/impl/operators.py index 0fb45c68..dbde8f65 100644 --- a/ldclient/impl/operators.py +++ b/ldclient/impl/operators.py @@ -1,179 +1,96 @@ -""" -Implementation details of feature flag evaluation. -""" -# currently excluded from documentation - see docs/README.md +from ldclient.impl.model.clause import ClausePreprocessedValue +from ldclient.impl.model.value_parsing import is_number, parse_semver, parse_time -import logging -import re -from semver import VersionInfo -import sys -from datetime import tzinfo, timedelta, datetime from collections import defaultdict from numbers import Number +from semver import VersionInfo +from typing import Any, Callable, Optional -import pyrfc3339 - -log = logging.getLogger(sys.modules[__name__].__name__) - - -def _string_operator(u, c, fn): - return fn(u, c) if isinstance(u, str) and isinstance(c, str) else False - -def _numeric_operator(u, c, fn): - # bool is a subtype of int, and we don't want to try and compare it as a number. - if isinstance(input, bool): - log.warning("Got unexpected bool type when attempting to parse time") - return None - - if isinstance(u, Number): - if isinstance(c, Number): - return fn(u, c) - return False - - -def _parse_time(input): - """ - :param input: Either a number as milliseconds since Unix Epoch, or a string as a valid RFC3339 timestamp - :return: milliseconds since Unix epoch, or None if input was invalid. - """ - - # bool is a subtype of int, and we don't want to try and compare it as a time. - if isinstance(input, bool): - log.warning("Got unexpected bool type when attempting to parse time") - return None - - if isinstance(input, Number): - return float(input) - - if isinstance(input, str): - try: - parsed_time = pyrfc3339.parse(input) - timestamp = (parsed_time - epoch).total_seconds() - return timestamp * 1000.0 - except Exception as e: - log.warning("Couldn't parse timestamp:" + str(input) + " with message: " + str(e)) - return None - - log.warning("Got unexpected type: " + str(type(input)) + " with value: " + str(input) + " when attempting to parse time") - return None - -def _time_operator(u, c, fn): - u_time = _parse_time(u) - if u_time is not None: - c_time = _parse_time(c) - if c_time is not None: - return fn(u_time, c_time) - return False - -def _parse_semver(input): - try: - VersionInfo.parse(input) - return input - except TypeError: - return None - except ValueError as e: - try: - input = _add_zero_version_component(input) - VersionInfo.parse(input) - return input - except ValueError as e: - try: - input = _add_zero_version_component(input) - VersionInfo.parse(input) - return input - except ValueError as e: - return None - -def _add_zero_version_component(input): - m = re.search("^([0-9.]*)(.*)", input) - if m is None: - return input + ".0" - return m.group(1) + ".0" + m.group(2) - -def _semver_operator(u, c, fn): - u_ver = _parse_semver(u) - c_ver = _parse_semver(c) - if u_ver is not None and c_ver is not None: - return fn(u_ver, c_ver) - return False +def _string_operator(context_value: Any, clause_value: Any, fn: Callable[[str, str], bool]) -> bool: + return isinstance(context_value, str) and isinstance(clause_value, str) and fn(context_value, clause_value) -def _in(u, c): - if u == c: - return True - return False +def _numeric_operator(context_value: Any, clause_value: Any, fn: Callable[[float, float], bool]) -> bool: + return is_number(context_value) and is_number(clause_value) and fn(float(context_value), float(clause_value)) -def _starts_with(u, c): - return _string_operator(u, c, lambda u, c: u.startswith(c)) +def _time_operator(clause_preprocessed: Optional[ClausePreprocessedValue], + context_value: Any, fn: Callable[[float, float], bool]) -> bool: + clause_time = None if clause_preprocessed is None else clause_preprocessed.as_time + if clause_time is None: + return False + context_time = parse_time(context_value) + return context_time is not None and fn(context_time, clause_time) -def _ends_with(u, c): - return _string_operator(u, c, lambda u, c: u.endswith(c)) +def _semver_operator(clause_preprocessed: Optional[ClausePreprocessedValue], + context_value: Any, fn: Callable[[VersionInfo, VersionInfo], bool]) -> bool: + clause_ver = None if clause_preprocessed is None else clause_preprocessed.as_semver + if clause_ver is None: + return False + context_ver = parse_semver(context_value) + return context_ver is not None and fn(context_ver, clause_ver) -def _contains(u, c): - return _string_operator(u, c, lambda u, c: c in u) +def _in(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]) -> bool: + return context_value == clause_value -def _matches(u, c): - return _string_operator(u, c, lambda u, c: re.search(c, u) is not None) +def _starts_with(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]) -> bool: + return _string_operator(context_value, clause_value, lambda a, b: a.startswith(b)) -def _less_than(u, c): - return _numeric_operator(u, c, lambda u, c: u < c) +def _ends_with(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _string_operator(context_value, clause_value, lambda a, b: a.endswith(b)) -def _less_than_or_equal(u, c): - return _numeric_operator(u, c, lambda u, c: u <= c) +def _contains(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _string_operator(context_value, clause_value, lambda a, b: b in a) -def _greater_than(u, c): - return _numeric_operator(u, c, lambda u, c: u > c) +def _matches(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + clause_regex = None if clause_preprocessed is None else clause_preprocessed.as_regex + if clause_regex is None: + return False + return isinstance(clause_value, str) and clause_regex.search(context_value) is not None -def _greater_than_or_equal(u, c): - return _numeric_operator(u, c, lambda u, c: u >= c) +def _less_than(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _numeric_operator(context_value, clause_value, lambda a, b: a < b) -def _before(u, c): - return _time_operator(u, c, lambda u, c: u < c) +def _less_than_or_equal(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _numeric_operator(context_value, clause_value, lambda a, b: a <= b) -def _after(u, c): - return _time_operator(u, c, lambda u, c: u > c) +def _greater_than(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _numeric_operator(context_value, clause_value, lambda a, b: a > b) -def _semver_equal(u, c): - return _semver_operator(u, c, lambda u, c: VersionInfo.parse(u).compare(c) == 0) +def _greater_than_or_equal(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _numeric_operator(context_value, clause_value, lambda a, b: a >= b) -def _semver_less_than(u, c): - return _semver_operator(u, c, lambda u, c: VersionInfo.parse(u).compare(c) < 0) +def _before(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _time_operator(clause_preprocessed, context_value, lambda a, b: a < b) -def _semver_greater_than(u, c): - return _semver_operator(u, c, lambda u, c: VersionInfo.parse(u).compare(c) > 0) +def _after(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _time_operator(clause_preprocessed, context_value, lambda a, b: a > b) -_ZERO = timedelta(0) -_HOUR = timedelta(hours=1) -# A UTC class. +def _semver_equal(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _semver_operator(clause_preprocessed, context_value, lambda a, b: a.compare(b) == 0) -class _UTC(tzinfo): - """UTC""" - def utcoffset(self, dt): - return _ZERO +def _semver_less_than(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _semver_operator(clause_preprocessed, context_value, lambda a, b: a.compare(b) < 0) - def tzname(self, dt): - return "UTC" - def dst(self, dt): - return _ZERO +def _semver_greater_than(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): + return _semver_operator(clause_preprocessed, context_value, lambda a, b: a.compare(b) > 0) -epoch = datetime.utcfromtimestamp(0).replace(tzinfo=_UTC()) ops = { "in": _in, @@ -192,4 +109,4 @@ def dst(self, dt): "semVerGreaterThan": _semver_greater_than } -ops = defaultdict(lambda: lambda l, r: False, ops) +ops = defaultdict(lambda: lambda l, r, p: False, ops) diff --git a/testing/impl/test_model_decode.py b/testing/impl/test_model_decode.py new file mode 100644 index 00000000..a5e0d44a --- /dev/null +++ b/testing/impl/test_model_decode.py @@ -0,0 +1,47 @@ +import pytest +import re +from semver import VersionInfo + +from ldclient.impl.model import * + +from testing.builders import * + + +def test_flag_targets_are_stored_as_sets(): + flag = FlagBuilder("key") \ + .target(0, "a", "b") \ + .context_target("kind1", 0, "c", "d") \ + .build() + assert flag.targets[0].values == {"a", "b"} + assert flag.context_targets[0].values == {"c", "d"} + +def test_segment_targets_are_stored_as_sets(): + segment = SegmentBuilder("key") \ + .included("a", "b") \ + .excluded("c", "d") \ + .included_contexts("kind1", "e", "f") \ + .excluded_contexts("kind2", "g", "h") \ + .build() + assert segment.included == {"a", "b"} + assert segment.excluded == {"c", "d"} + assert segment.included_contexts[0].values == {"e", "f"} + assert segment.excluded_contexts[0].values == {"g", "h"} + +def test_clause_values_preprocessed_with_regex_operator(): + pattern_str = "^[a-z]*$" + pattern = re.compile(pattern_str) + flag = make_boolean_flag_with_clauses(make_clause(None, "attr", "matches", pattern_str, "?", True)) + assert flag.rules[0].clauses[0]._values == [pattern_str, "?", True] + assert list(x.as_regex for x in flag.rules[0].clauses[0]._values_preprocessed) == [pattern, None, None] + +@pytest.mark.parametrize('op', ['semVerEqual', 'semVerGreaterThan', 'semVerLessThan']) +def test_clause_values_preprocessed_with_semver_operator(op): + flag = make_boolean_flag_with_clauses(make_clause(None, "attr", op, "1.2.3", 1, True)) + assert flag.rules[0].clauses[0]._values == ["1.2.3", 1, True] + assert list(x.as_semver for x in flag.rules[0].clauses[0]._values_preprocessed) == [VersionInfo(1, 2, 3), None, None] + +@pytest.mark.parametrize('op', ['before', 'after']) +def test_clause_values_preprocessed_with_time_operator(op): + flag = make_boolean_flag_with_clauses(make_clause(None, "attr", op, 1000, "1970-01-01T00:00:02Z", True)) + assert flag.rules[0].clauses[0]._values == [1000, "1970-01-01T00:00:02Z", True] + assert list(x.as_time for x in flag.rules[0].clauses[0]._values_preprocessed) == [1000, 2000, None] diff --git a/testing/impl/test_operators.py b/testing/impl/test_operators.py index 7b9d7294..4ed56e79 100644 --- a/testing/impl/test_operators.py +++ b/testing/impl/test_operators.py @@ -2,8 +2,10 @@ from ldclient.impl import operators +from testing.builders import * -@pytest.mark.parametrize("op,value1,value2,expected", [ + +@pytest.mark.parametrize("op,context_value,clause_value,expected", [ # numeric comparisons [ "in", 99, 99, True ], [ "in", 99.0001, 99.0001, True ], @@ -86,5 +88,9 @@ [ "semVerLessThan", "2.0.1", "xbad%ver", False ], [ "semVerGreaterThan", "2.0.1", "xbad%ver", False ] ]) -def test_operator(op, value1, value2, expected): - assert operators.ops.get(op)(value1, value2) == expected + +def test_operator(op, context_value, clause_value, expected): + flag = make_boolean_flag_with_clauses(make_clause(None, 'attr', op, clause_value)) + preprocessed = flag.rules[0].clauses[0].values_preprocessed + result = operators.ops.get(op)(context_value, clause_value, None if preprocessed is None else preprocessed[0]) + assert result == expected From 48fac418c335ed096df45d2545120774f6590bb1 Mon Sep 17 00:00:00 2001 From: Eli Bishop Date: Thu, 29 Dec 2022 12:22:30 -0800 Subject: [PATCH 344/356] fix type checking for matches operator --- ldclient/impl/operators.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ldclient/impl/operators.py b/ldclient/impl/operators.py index dbde8f65..bf4b5578 100644 --- a/ldclient/impl/operators.py +++ b/ldclient/impl/operators.py @@ -53,7 +53,7 @@ def _matches(context_value: Any, clause_value: Any, clause_preprocessed: Optiona clause_regex = None if clause_preprocessed is None else clause_preprocessed.as_regex if clause_regex is None: return False - return isinstance(clause_value, str) and clause_regex.search(context_value) is not None + return isinstance(context_value, str) and clause_regex.search(context_value) is not None def _less_than(context_value: Any, clause_value: Any, clause_preprocessed: Optional[ClausePreprocessedValue]): From 9b6096387dbda8856598edbc25bf4b51a2025032 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Tue, 17 Jan 2023 13:22:42 -0500 Subject: [PATCH 345/356] Add application info support (#214) --- contract-tests/client_entity.py | 7 +++++++ contract-tests/service.py | 1 + ldclient/config.py | 20 ++++++++++++++++++-- ldclient/impl/http.py | 20 ++++++++++++++++++++ ldclient/util.py | 22 ++++++++++++++++++++++ testing/test_config.py | 27 +++++++++++++++++++++++++++ testing/test_event_processor.py | 16 ++++++++++++++++ testing/test_feature_requester.py | 14 ++++++++++++++ testing/test_streaming.py | 16 ++++++++++++++++ 9 files changed, 141 insertions(+), 2 deletions(-) diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index 5d2d5220..e835e4f4 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -12,6 +12,13 @@ def __init__(self, tag, config): self.log = logging.getLogger(tag) opts = {"sdk_key": config["credential"]} + tags = config.get('tags', {}) + if tags: + opts['application'] = { + 'id': tags.get('applicationId', ''), + 'version': tags.get('applicationVersion', ''), + } + if config.get("streaming") is not None: streaming = config["streaming"] if streaming.get("baseUri") is not None: diff --git a/contract-tests/service.py b/contract-tests/service.py index d9f8e0a5..79b0e621 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -63,6 +63,7 @@ def status(): 'all-flags-with-reasons', 'all-flags-client-side-only', 'all-flags-details-only-for-tracked-flags', + 'tags', ] } return (json.dumps(body), 200, {'Content-type': 'application/json'}) diff --git a/ldclient/config.py b/ldclient/config.py index dfe1a29a..faf6fcc1 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -7,7 +7,7 @@ from typing import Optional, Callable, List, Any, Set from ldclient.feature_store import InMemoryFeatureStore -from ldclient.util import log +from ldclient.util import log, validate_application_info from ldclient.interfaces import BigSegmentStore, EventProcessor, FeatureStore, UpdateProcessor, FeatureRequester GET_LATEST_FEATURES_PATH = '/sdk/latest-flags' @@ -173,7 +173,8 @@ def __init__(self, wrapper_name: Optional[str]=None, wrapper_version: Optional[str]=None, http: HTTPConfig=HTTPConfig(), - big_segments: Optional[BigSegmentsConfig]=None): + big_segments: Optional[BigSegmentsConfig]=None, + application: Optional[dict]=None): """ :param sdk_key: The SDK key for your LaunchDarkly account. This is always required. :param base_uri: The base URL for the LaunchDarkly server. Most users should use the default @@ -239,6 +240,7 @@ def __init__(self, servers. :param http: Optional properties for customizing the client's HTTP/HTTPS behavior. See :class:`HTTPConfig`. + :param application: Optional properties for setting application metadata. See :py:attr:`~application` """ self.__sdk_key = sdk_key @@ -271,6 +273,7 @@ def __init__(self, self.__wrapper_version = wrapper_version self.__http = http self.__big_segments = BigSegmentsConfig() if not big_segments else big_segments + self.__application = validate_application_info(application or {}, log) def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': """Returns a new ``Config`` instance that is the same as this one, except for having a different SDK key. @@ -441,3 +444,16 @@ def big_segments(self) -> BigSegmentsConfig: def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key == '': log.warning("Missing or blank sdk_key.") + + @property + def application(self) -> dict: + """ + An object that allows configuration of application metadata. + + Application metadata may be used in LaunchDarkly analytics or other + product features, but does not affect feature flag evaluations. + + If you want to set non-default values for any of these fields, provide + the appropriately configured dict to the {Config} object. + """ + return self.__application diff --git a/ldclient/impl/http.py b/ldclient/impl/http.py index ef36c8ba..858fd371 100644 --- a/ldclient/impl/http.py +++ b/ldclient/impl/http.py @@ -3,14 +3,34 @@ from os import environ import urllib3 +def _application_header_value(application: dict) -> str: + parts = [] + id = application.get('id', '') + version = application.get('version', '') + + if id: + parts.append("application-id/%s" % id) + + if version: + parts.append("application-version/%s" % version) + + return " ".join(parts) + + def _base_headers(config): headers = {'Authorization': config.sdk_key or '', 'User-Agent': 'PythonClient/' + VERSION} + + app_value = _application_header_value(config.application) + if app_value: + headers['X-LaunchDarkly-Tags'] = app_value + if isinstance(config.wrapper_name, str) and config.wrapper_name != "": wrapper_version = "" if isinstance(config.wrapper_version, str) and config.wrapper_version != "": wrapper_version = "/" + config.wrapper_version headers.update({'X-LaunchDarkly-Wrapper': config.wrapper_name + wrapper_version}) + return headers def _http_factory(config): diff --git a/ldclient/util.py b/ldclient/util.py index 66c0c70b..042f33dc 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -4,10 +4,12 @@ # currently excluded from documentation - see docs/README.md import logging +import re from os import environ import sys import urllib3 +from typing import Any from ldclient.impl.http import HTTPFactory, _base_headers log = logging.getLogger(sys.modules[__name__].__name__) @@ -25,6 +27,26 @@ _retryable_statuses = [400, 408, 429] +def validate_application_info(application: dict, logger: logging.Logger) -> dict: + return { + "id": validate_application_value(application.get("id", ""), "id", logger), + "version": validate_application_value(application.get("version", ""), "version", logger), + } + +def validate_application_value(value: Any, name: str, logger: logging.Logger) -> str: + if not isinstance(value, str): + return "" + + if len(value) > 64: + logger.warning('Value of application[%s] was longer than 64 characters and was discarded' % name) + return "" + + if re.search(r"[^a-zA-Z0-9._-]", value): + logger.warning('Value of application[%s] contained invalid characters and was discarded' % name) + return "" + + return value + def _headers(config): base_headers = _base_headers(config) base_headers.update({'Content-Type': "application/json"}) diff --git a/testing/test_config.py b/testing/test_config.py index 701e70e5..7c5e342d 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1,4 +1,5 @@ from ldclient.config import Config +import pytest def test_copy_config(): @@ -40,3 +41,29 @@ def test_trims_trailing_slashes_on_uris(): assert config.base_uri == "https://launchdarkly.com" assert config.events_uri == "https://docs.launchdarkly.com/bulk" assert config.stream_base_uri == "https://blog.launchdarkly.com" + +def application_can_be_set_and_read(): + application = {"id": "my-id", "version": "abcdef"} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "my-id", "version": "abcdef"} + +def application_can_handle_non_string_values(): + application = {"id": 1, "version": 2} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "1", "version": "2"} + +def application_will_ignore_invalid_keys(): + application = {"invalid": 1, "key": 2} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "", "version": ""} + +@pytest.fixture(params = [ + " ", + "@", + ":", + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-a" +]) +def application_will_drop_invalid_values(value): + application = {"id": value, "version": value} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "", "version": ""} diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 363d980e..ec777812 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -484,6 +484,22 @@ def test_wrapper_header_sent_without_version(): assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') == "Flask" +def test_application_header_not_sent_when_not_set(): + with DefaultTestProcessor() as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + assert mock_http.request_headers.get('X-LaunchDarkly-Tags') is None + +def test_application_header_sent_when_set(): + with DefaultTestProcessor(wrapper_name = "Flask", application = {"id": "my-id", "version": "my-version"}) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + assert mock_http.request_headers.get('X-LaunchDarkly-Tags') == "application-id/my-id application-version/my-version" + def test_event_schema_set_on_event_send(): with DefaultTestProcessor() as ep: ep.send_event({ 'kind': 'identify', 'user': user }) diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py index db18f555..031167dc 100644 --- a/testing/test_feature_requester.py +++ b/testing/test_feature_requester.py @@ -35,6 +35,7 @@ def test_get_all_data_sends_headers(): assert req.headers['Authorization'] == 'sdk-key' assert req.headers['User-Agent'] == 'PythonClient/' + VERSION assert req.headers.get('X-LaunchDarkly-Wrapper') is None + assert req.headers.get('X-LaunchDarkly-Tags') is None def test_get_all_data_sends_wrapper_header(): with start_server() as server: @@ -62,6 +63,19 @@ def test_get_all_data_sends_wrapper_header_without_version(): req = server.require_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' +def test_get_all_data_sends_tags_header(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri, + application = {"id": "my-id", "version": "my-version"}) + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + server.for_path('/sdk/latest-all', JsonResponse(resp_data)) + + fr.get_all_data() + req = server.require_request() + assert req.headers.get('X-LaunchDarkly-Tags') == 'application-id/my-id application-version/my-version' + def test_get_all_data_can_use_cached_data(): with start_server() as server: config = Config(sdk_key = 'sdk-key', base_uri = server.uri) diff --git a/testing/test_streaming.py b/testing/test_streaming.py index 1838e500..82700b4d 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -38,6 +38,7 @@ def test_request_properties(): assert req.headers.get('Authorization') == 'sdk-key' assert req.headers.get('User-Agent') == 'PythonClient/' + VERSION assert req.headers.get('X-LaunchDarkly-Wrapper') is None + assert req.headers.get('X-LaunchDarkly-Tags') is None def test_sends_wrapper_header(): store = InMemoryFeatureStore() @@ -69,6 +70,21 @@ def test_sends_wrapper_header_without_version(): req = server.await_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' +def test_sends_tag_header(): + store = InMemoryFeatureStore() + ready = Event() + + with start_server() as server: + with stream_content(make_put_event()) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, + application = {"id": "my-id", "version": "my-version"}) + server.for_path('/all', stream) + + with StreamingUpdateProcessor(config, store, ready, None) as sp: + sp.start() + req = server.await_request() + assert req.headers.get('X-LaunchDarkly-Tags') == 'application-id/my-id application-version/my-version' + def test_receives_put_event(): store = InMemoryFeatureStore() ready = Event() From 885eeb9d5b0b0a7fa83c118b86e54985dd3cbf09 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Fri, 20 Jan 2023 11:20:22 -0500 Subject: [PATCH 346/356] Add application info support (#214) (#215) --- contract-tests/client_entity.py | 7 ++++ contract-tests/service.py | 3 +- ldclient/config.py | 33 ++++++++++++++----- ldclient/impl/http.py | 20 +++++++++++ ldclient/impl/util.py | 22 +++++++++++++ .../impl/datasource/test_feature_requester.py | 14 ++++++++ testing/impl/datasource/test_streaming.py | 16 +++++++++ testing/test_config.py | 27 +++++++++++++++ 8 files changed, 132 insertions(+), 10 deletions(-) diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index dac8625f..702a6a90 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -18,6 +18,13 @@ def __init__(self, tag, config): self.log = logging.getLogger(tag) opts = {"sdk_key": config["credential"]} + tags = config.get('tags', {}) + if tags: + opts['application'] = { + 'id': tags.get('applicationId', ''), + 'version': tags.get('applicationVersion', ''), + } + if config.get("streaming") is not None: streaming = config["streaming"] if streaming.get("baseUri") is not None: diff --git a/contract-tests/service.py b/contract-tests/service.py index e455d8ad..16a078ad 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -67,6 +67,7 @@ def status(): 'big-segments', 'context-type', 'secure-mode-hash', + 'tags', ] } return (json.dumps(body), 200, {'Content-type': 'application/json'}) @@ -131,7 +132,7 @@ def post_client_command(id): response = client.get_big_segment_store_status() else: return ('', 400) - + if response is None: return ('', 201) return (json.dumps(response), 200) diff --git a/ldclient/config.py b/ldclient/config.py index 3ae0c04c..540928c1 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -7,7 +7,7 @@ from typing import Optional, Callable, List, Set from ldclient.feature_store import InMemoryFeatureStore -from ldclient.impl.util import log +from ldclient.impl.util import log, validate_application_info from ldclient.interfaces import BigSegmentStore, EventProcessor, FeatureStore, UpdateProcessor GET_LATEST_FEATURES_PATH = '/sdk/latest-flags' @@ -60,15 +60,15 @@ def __init__(self, self.__status_poll_interval = status_poll_interval self.__stale_after = stale_after pass - + @property def store(self) -> Optional[BigSegmentStore]: return self.__store - + @property def context_cache_size(self) -> int: return self.__context_cache_size - + @property def context_cache_time(self) -> float: return self.__context_cache_time @@ -77,7 +77,7 @@ def context_cache_time(self) -> float: def user_cache_size(self) -> int: """Deprecated alias for :attr:`context_cache_size`.""" return self.context_cache_size - + @property def user_cache_time(self) -> float: """Deprecated alias for :attr:`context_cache_time`.""" @@ -86,7 +86,7 @@ def user_cache_time(self) -> float: @property def status_poll_interval(self) -> float: return self.__status_poll_interval - + @property def stale_after(self) -> float: return self.__stale_after @@ -169,7 +169,7 @@ def __init__(self, initial_reconnect_delay: float=1, defaults: dict={}, send_events: Optional[bool]=None, - update_processor_class: Optional[Callable[[str, 'Config', FeatureStore], UpdateProcessor]]=None, + update_processor_class: Optional[Callable[[str, 'Config', FeatureStore], UpdateProcessor]]=None, poll_interval: float=30, use_ldd: bool=False, feature_store: Optional[FeatureStore]=None, @@ -188,7 +188,8 @@ def __init__(self, wrapper_name: Optional[str]=None, wrapper_version: Optional[str]=None, http: HTTPConfig=HTTPConfig(), - big_segments: Optional[BigSegmentsConfig]=None): + big_segments: Optional[BigSegmentsConfig]=None, + application: Optional[dict]=None): """ :param sdk_key: The SDK key for your LaunchDarkly account. This is always required. :param base_uri: The base URL for the LaunchDarkly server. Most users should use the default @@ -256,6 +257,7 @@ def __init__(self, servers. :param http: Optional properties for customizing the client's HTTP/HTTPS behavior. See :class:`HTTPConfig`. + :param application: Optional properties for setting application metadata. See :py:attr:`~application` """ self.__sdk_key = sdk_key @@ -287,6 +289,7 @@ def __init__(self, self.__wrapper_version = wrapper_version self.__http = http self.__big_segments = BigSegmentsConfig() if not big_segments else big_segments + self.__application = validate_application_info(application or {}, log) def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': """Returns a new ``Config`` instance that is the same as this one, except for having a different SDK key. @@ -459,9 +462,21 @@ def http(self) -> HTTPConfig: def big_segments(self) -> BigSegmentsConfig: return self.__big_segments + @property + def application(self) -> dict: + """ + An object that allows configuration of application metadata. + + Application metadata may be used in LaunchDarkly analytics or other + product features, but does not affect feature flag evaluations. + + If you want to set non-default values for any of these fields, provide + the appropriately configured dict to the {Config} object. + """ + return self.__application + def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key == '': log.warning("Missing or blank sdk_key.") - __all__ = ['Config', 'BigSegmentsConfig', 'HTTPConfig'] diff --git a/ldclient/impl/http.py b/ldclient/impl/http.py index ef36c8ba..858fd371 100644 --- a/ldclient/impl/http.py +++ b/ldclient/impl/http.py @@ -3,14 +3,34 @@ from os import environ import urllib3 +def _application_header_value(application: dict) -> str: + parts = [] + id = application.get('id', '') + version = application.get('version', '') + + if id: + parts.append("application-id/%s" % id) + + if version: + parts.append("application-version/%s" % version) + + return " ".join(parts) + + def _base_headers(config): headers = {'Authorization': config.sdk_key or '', 'User-Agent': 'PythonClient/' + VERSION} + + app_value = _application_header_value(config.application) + if app_value: + headers['X-LaunchDarkly-Tags'] = app_value + if isinstance(config.wrapper_name, str) and config.wrapper_name != "": wrapper_version = "" if isinstance(config.wrapper_version, str) and config.wrapper_version != "": wrapper_version = "/" + config.wrapper_version headers.update({'X-LaunchDarkly-Wrapper': config.wrapper_name + wrapper_version}) + return headers def _http_factory(config): diff --git a/ldclient/impl/util.py b/ldclient/impl/util.py index 47820100..6cf63195 100644 --- a/ldclient/impl/util.py +++ b/ldclient/impl/util.py @@ -1,7 +1,9 @@ import logging +import re import sys import time +from typing import Any from ldclient.impl.http import _base_headers @@ -24,6 +26,26 @@ def current_time_millis() -> int: _retryable_statuses = [400, 408, 429] +def validate_application_info(application: dict, logger: logging.Logger) -> dict: + return { + "id": validate_application_value(application.get("id", ""), "id", logger), + "version": validate_application_value(application.get("version", ""), "version", logger), + } + +def validate_application_value(value: Any, name: str, logger: logging.Logger) -> str: + if not isinstance(value, str): + return "" + + if len(value) > 64: + logger.warning('Value of application[%s] was longer than 64 characters and was discarded' % name) + return "" + + if re.search(r"[^a-zA-Z0-9._-]", value): + logger.warning('Value of application[%s] contained invalid characters and was discarded' % name) + return "" + + return value + def _headers(config): base_headers = _base_headers(config) base_headers.update({'Content-Type': "application/json"}) diff --git a/testing/impl/datasource/test_feature_requester.py b/testing/impl/datasource/test_feature_requester.py index b6eacc1e..2deb47b2 100644 --- a/testing/impl/datasource/test_feature_requester.py +++ b/testing/impl/datasource/test_feature_requester.py @@ -32,6 +32,7 @@ def test_get_all_data_sends_headers(): assert req.headers['Authorization'] == 'sdk-key' assert req.headers['User-Agent'] == 'PythonClient/' + VERSION assert req.headers.get('X-LaunchDarkly-Wrapper') is None + assert req.headers.get('X-LaunchDarkly-Tags') is None def test_get_all_data_sends_wrapper_header(): with start_server() as server: @@ -59,6 +60,19 @@ def test_get_all_data_sends_wrapper_header_without_version(): req = server.require_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' +def test_get_all_data_sends_tags_header(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri, + application = {"id": "my-id", "version": "my-version"}) + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + server.for_path('/sdk/latest-all', JsonResponse(resp_data)) + + fr.get_all_data() + req = server.require_request() + assert req.headers.get('X-LaunchDarkly-Tags') == 'application-id/my-id application-version/my-version' + def test_get_all_data_can_use_cached_data(): with start_server() as server: config = Config(sdk_key = 'sdk-key', base_uri = server.uri) diff --git a/testing/impl/datasource/test_streaming.py b/testing/impl/datasource/test_streaming.py index 5bf3ba38..b017b9a8 100644 --- a/testing/impl/datasource/test_streaming.py +++ b/testing/impl/datasource/test_streaming.py @@ -40,6 +40,7 @@ def test_request_properties(): assert req.headers.get('Authorization') == 'sdk-key' assert req.headers.get('User-Agent') == 'PythonClient/' + VERSION assert req.headers.get('X-LaunchDarkly-Wrapper') is None + assert req.headers.get('X-LaunchDarkly-Tags') is None def test_sends_wrapper_header(): store = InMemoryFeatureStore() @@ -71,6 +72,21 @@ def test_sends_wrapper_header_without_version(): req = server.await_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' +def test_sends_tag_header(): + store = InMemoryFeatureStore() + ready = Event() + + with start_server() as server: + with stream_content(make_put_event()) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, + application = {"id": "my-id", "version": "my-version"}) + server.for_path('/all', stream) + + with StreamingUpdateProcessor(config, store, ready, None) as sp: + sp.start() + req = server.await_request() + assert req.headers.get('X-LaunchDarkly-Tags') == 'application-id/my-id application-version/my-version' + def test_receives_put_event(): store = InMemoryFeatureStore() ready = Event() diff --git a/testing/test_config.py b/testing/test_config.py index 701e70e5..7c5e342d 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1,4 +1,5 @@ from ldclient.config import Config +import pytest def test_copy_config(): @@ -40,3 +41,29 @@ def test_trims_trailing_slashes_on_uris(): assert config.base_uri == "https://launchdarkly.com" assert config.events_uri == "https://docs.launchdarkly.com/bulk" assert config.stream_base_uri == "https://blog.launchdarkly.com" + +def application_can_be_set_and_read(): + application = {"id": "my-id", "version": "abcdef"} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "my-id", "version": "abcdef"} + +def application_can_handle_non_string_values(): + application = {"id": 1, "version": 2} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "1", "version": "2"} + +def application_will_ignore_invalid_keys(): + application = {"invalid": 1, "key": 2} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "", "version": ""} + +@pytest.fixture(params = [ + " ", + "@", + ":", + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-a" +]) +def application_will_drop_invalid_values(value): + application = {"id": value, "version": value} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "", "version": ""} From 60049c80c2beb49bcc1b73816a8db4e7b35b0522 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Mon, 30 Jan 2023 09:16:51 -0500 Subject: [PATCH 347/356] Upgrade pip to fix failing CI build (#216) The CI build was failing because pip had an outdated list of available wheels for installation. Since it couldn't find a match, it was trying to build a package from source, which requires the rust compiler, which in turn isn't present on some of the docker images. By updating pip we get the updated list of available wheels, thereby allowing us to bypass source building and the need for the rust compiler entirely. --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 007b5fb2..075baf29 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -60,6 +60,7 @@ jobs: - run: name: install requirements command: | + pip install --upgrade pip pip install -r test-requirements.txt; pip install -r test-filesource-optional-requirements.txt; pip install -r consul-requirements.txt; From c06f80f2ae47121ef6479264b41df76f9a5497da Mon Sep 17 00:00:00 2001 From: LaunchDarklyReleaseBot <86431345+LaunchDarklyReleaseBot@users.noreply.github.com> Date: Tue, 31 Jan 2023 07:32:09 -0800 Subject: [PATCH 348/356] prepare 7.6.0 release (#192) * comment * add end-to-end unit tests for proxy config * indents * add 3.8 build * image name * fail on SyntaxWarning * typo * command syntax * pin expiringdict dependency for Python 3.3 compatibility * add Windows CircleCI job * periods are no longer valid in CircleCI job names * syntax fix * install Python in Windows * set path * move command * turn off debug logging * Py3 in Windows * config param * rm redundant step * choco switch * refactor Linux jobs using CircleCI 2.1 features * set log level before anything else * rm Azure config * use yaml.safe_load() to avoid code execution vulnerability in file data source * Initial work on wrapper_name, wrapper_version, diagnostic config options and start of diagnostic config event creation. * Python 2 compat changes. * More event generation code and starting to integrate tracking diagnostic values. * Add minimum diagnostic recording interval. Fix diagnostic.py to be importable. Add more diagnostic event fields. * don't let user fall outside of last bucket in rollout * fixing conditional logic * Add docstrings for diagnostic configuration options. * fix off-by-1 error * avoid redundant dict lookups * add unit tests for basic bucketing logic and edge case * Stream init tracking. Feeding of accumulator object through SDK. Various fixes. * Track events in last batch. * Fix sdk version field, some stylistic improvements. * Last of diagnostic configuration object fields. * Fill out rest of platform fields. * Cleanup and failed stream initialization tracking. * Add diagnostic config option test. * Add tests for diagnostics.py * Testing rest of diagnostic fields. * Test that streaming update processor records successful and unsuccessful connection attempts in the diagnostic accumulator when available. * Improvements to testability of event processor. * Rest of event processor tests. * Remove janky reflection. * Test change to filesource optional test requirements. * [ch61092] Add event payload ID on event requests. * normalize data store type and OS name in diagnostic events * gitignore * copyedit to diagnostic event config property comment * fix spurious error after sending diagnostic event * make verify_ssl=False turn off certificate verification too (#129) * add more TLS config options and collect HTTP/HTTPS config options in a class (#130) * make stream retry/backoff/jitter behavior consistent with other SDKs + improve testing (#131) * streams shouldn't use the same read timeout as the rest of the SDK (#132) * implement our own retry logic & logging for event posts, don't use urllib3.Retry (#133) * remove support for indirect/patch and indirect/put * remove unused logic for individual flag/segment poll for indirect/patch * Ehaisley/84082/remove python2 (#136) * remove all references to six and remove queue fallback imports * remove NullHandler logger backwards compat * update circleci config to remove python 2.7 tests * remove ordereddict backwards compat * update setup.py to no longer list python 2.7 as compatible * no longer inherit from object for python 2 backwards compat * update readme and manifest to reflect python 2.7 removal * remove unicode type compatibility * remove 2.7 support from circleci * Allow authenticating with proxy This commit allows for authenticating with a proxy configured with the `http_proxy` environment variable. Authentication requires passing a header, and is not parsed by urllib3 from the proxy_url. * reimplement proxy tests for DRY and add test of proxy auth params * doc comment on auth params in proxy URL * add type hints to some of the public facing api. update some docs * Revert "add type hints to some of the public facing api." This reverts commit c35fa6184ce1a274fd5c6d226cb3f1f7a795901a. * Ehaisley/ch86857/type hints (#138) * add typehints to the public API * validate typehints in the public api and tests with mypy * remove all current deprecations (#139) * remove all currently deprecated classes, methods, arguments, and tests * also update semver usage to remove calls to deprecated functions and classes * remove global set_sdk_key, make SDK key required in Config (#140) * Removed the guides link * Pinning mypy and running it against different python versions (#141) * fix time zone mishandling that could make event debugging not work (#142) * fix 6.x build (#143) * fix time zone mishandling that could make event debugging not work (6.x) (#144) * prepare 6.13.3 release (#154) * Releasing version 6.13.3 * [ch99756] Add alias events (#145) * add support for experiment rollouts * fix unit test * address PR comments * use Releaser v2 config * Use newer docker images (#147) * Updates docs URLs * Add support for 3.10 (#150) * started work on FlagBuilder in as part of test data source implementation * finished FlagBuilder implementation and added FlagRuleBuilder implementation * added initial TestData interface and updated tests to not rely on test data internals * started data source implementation * changed FlagBuilder to public class; changed FlagBuilder attributes to be initialized in __init__ and eliminated use of try ... except: pass for handling empty attributes * (big segments 1) add public config/interface types * added implementation of test data source * docstring * formatting * ensure property doesn't return None * (big segments 2) implement evaluation, refactor eval logic & modules * linting * (big segments 3) implement big segment status tracking, wire up components * typing fixes * typing fixes * implement SSE contract tests * fix CI * fix CI again * fix CI * disable SSE tests in Python 3.5 * make test service port configurable * better SSE implementation that fixes linefeed and multi-byte char issues * fix constructor parameters in test service * comment * test improvements * rm obsolete default config logic * (big segments 4) implement big segment stores in Redis+DynamoDB, refactor db tests (#158) * converted ldclient.integrations module from file to directory; started moving public classes out of ldclient.impl.integrations.test_data* and instead into ldclient.integrations.test_data*; started adding TestData documentation * removed setup/teardown functions leftover from test scaffold * added TestData, FlagBuilder, and FlagRuleBuilder documentation; minor adjustments to implementation details * removed warning supression from TestData tests * fix big segments user hash algorithm to use SHA256 * update mypy version * updates to tests and related bug fixes * always cache Big Segment query result even if it's None * fix test assertion * lint * fix big segment ref format * fix big segments cache TTL being set to wrong value * fixed structure of fallthrough variation in result of FlagBuilder.build() * moved __test__ attribute into TestData class definition to prevent mypy from complaining about a missing class attribute * minor doc comment fix * Apply suggestions related to Sphinx docstring formatting from code review Co-authored-by: Eli Bishop * fixed errors in the implementation of FlagBuilder's fallthrough_variation and off_variation when passing boolean variation values; updated tests to assert the expected behavior * added missing value_for_all_users() method to FlagBuilder class * Fix operator parsing errors (#169) * identify should not emit event if user key is empty (#164) * secondary should be treated as built-in attribute (#168) * URIs should have trailing slashes trimmed (#165) * all_flags_state should always include flag version (#166) * output event should not include a null prereqOf key (#167) * Account for traffic allocation on all flags (#171) * Add SDK contract tests (#170) * misc fixes to test data docs + add type hints * more type hints * remove some methods from the public test_data API * can't use "x|y" shortcut in typehints in older Pythons; use Union * fix misc type mistakes because I forgot to run the linter * update CONTRIBUTING.md and provide make targets * fixed a bug with flag rule clause builder internals; added unit test to verify rule evaluation * added ready argument to _TestDataSource class and indicated ready upon start to avoid delays in TestData initialization * Update contract tests to latest flask version (#176) Our contract tests depend on flask v1, which in turn depends on Jinja 2. Both of these are terribly dated and no longer supported. Jinja depends on markupsafe. markupsafe recently updated its code to no longer provide soft_unicode which in turn broke Jinja. Updating to the latest flask keeps all transitive dependencies better aligned and addresses this mismatch. * Adds link to Relay Proxy docs * Handle explicit None values in test payload (#179) The test harness may send explicit None values which should be treated the same as if the value was omitted entirely. * Fix "unhandled response" error in test harness (#180) When we return a `('', 204)` response from the flask handler, [Werkzeug intentionally removes the 'Content-Type' header][1], which causes the response to be created as a chunked response. The test harness is likely seeing a 204 response and isn't trying to read anything more from the stream. But since we are re-using connections, the next time it reads from the stream, it sees the `0\r\n\r\n` chunk and outputs an error: > 2022/04/20 14:23:39 Unsolicited response received on idle HTTP channel starting with "0\r\n\r\n"; err= Changing this response to 202 causes Werkzeug to return an empty response and silences the error. [1]: https://github.com/pallets/werkzeug/blob/560dd5f320bff318175f209595d42f5a80045417/src/werkzeug/wrappers/response.py#L540 * Exclude booleans when getting bucketable value (#181) When calculating a bucket, we get the bucketable value from the specified bucket by attribute. If this value is a string or an int, we can use it. Otherwise, we return None. Python considers a bool an instance of an int, which isn't what we want. So we need to add an explicit exclusion for this. * master -> main (#182) * Loosen restriction on expiringdict (#183) Originally this was pinned to a max version to deal with the incompatibility of Python 3.3 and the `typing` package. See [this PR][1]. Now that we now only support >=3.5, we can safely relax this restriction again. [1]: https://github.com/launchdarkly/python-server-sdk-private/pull/120 * Fix mypy type checking (#184) A [customer requested][original-pr] that we start including a py.typed file in our repository. This would enable mypy to take advantage of our typehints. Unfortunately, this didn't completely solve the customers issue. A [second pr][second-pr] was opened to address the missing step of including the py.typed file in the `Manifest.in` file. However, this change alone is not sufficient. According to the [documentation][include_package_data], you must also include the `include_package_data=True` directive so that files specified in the `Manifest.in` file are included in distribution. [original-pr]: https://github.com/launchdarkly/python-server-sdk/pull/166 [second-pr]: https://github.com/launchdarkly/python-server-sdk/pull/172 [include_package_data]: https://setuptools.pypa.io/en/latest/userguide/datafiles.html#include-package-data * Add support for extra Redis connection parameters (#185) * Include wheel artifact when publishing package (#186) * skip tests that use a self-signed TLS cert in Python 3.7 * remove warn-level logging done for every Big Segments query (#190) * remove warn-level logging done for every Big Segments query * skip tests that use a self-signed TLS cert in Python 3.7 * update release metadata * Add application info support (#214) * Upgrade pip to fix failing CI build (#216) The CI build was failing because pip had an outdated list of available wheels for installation. Since it couldn't find a match, it was trying to build a package from source, which requires the rust compiler, which in turn isn't present on some of the docker images. By updating pip we get the updated list of available wheels, thereby allowing us to bypass source building and the need for the rust compiler entirely. --------- Co-authored-by: Eli Bishop Co-authored-by: LaunchDarklyCI Co-authored-by: Ben Woskow Co-authored-by: Gavin Whelan Co-authored-by: Elliot <35050275+Apache-HB@users.noreply.github.com> Co-authored-by: Gabor Angeli Co-authored-by: Elliot Co-authored-by: Ben Woskow <48036130+bwoskow-ld@users.noreply.github.com> Co-authored-by: LaunchDarklyCI Co-authored-by: hroederld Co-authored-by: Robert J. Neal Co-authored-by: Robert J. Neal Co-authored-by: Ember Stevens Co-authored-by: ember-stevens <79482775+ember-stevens@users.noreply.github.com> Co-authored-by: Matthew M. Keeler Co-authored-by: charukiewicz Co-authored-by: LaunchDarklyReleaseBot Co-authored-by: Christian Charukiewicz Co-authored-by: Matthew M. Keeler --- .circleci/config.yml | 1 + .ldrelease/config.yml | 3 ++- contract-tests/client_entity.py | 7 +++++++ contract-tests/service.py | 1 + ldclient/config.py | 20 ++++++++++++++++++-- ldclient/impl/big_segments.py | 1 - ldclient/impl/http.py | 20 ++++++++++++++++++++ ldclient/util.py | 22 ++++++++++++++++++++++ testing/test_config.py | 27 +++++++++++++++++++++++++++ testing/test_event_processor.py | 16 ++++++++++++++++ testing/test_feature_requester.py | 14 ++++++++++++++ testing/test_ldclient_end_to_end.py | 12 ++++++------ testing/test_streaming.py | 16 ++++++++++++++++ 13 files changed, 150 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 007b5fb2..075baf29 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -60,6 +60,7 @@ jobs: - run: name: install requirements command: | + pip install --upgrade pip pip install -r test-requirements.txt; pip install -r test-filesource-optional-requirements.txt; pip install -r consul-requirements.txt; diff --git a/.ldrelease/config.yml b/.ldrelease/config.yml index 9021210c..cc14b358 100644 --- a/.ldrelease/config.yml +++ b/.ldrelease/config.yml @@ -12,7 +12,8 @@ publications: branches: - name: main - description: 7.x + description: 8.x + - name: 7.x - name: 6.x jobs: diff --git a/contract-tests/client_entity.py b/contract-tests/client_entity.py index 5d2d5220..e835e4f4 100644 --- a/contract-tests/client_entity.py +++ b/contract-tests/client_entity.py @@ -12,6 +12,13 @@ def __init__(self, tag, config): self.log = logging.getLogger(tag) opts = {"sdk_key": config["credential"]} + tags = config.get('tags', {}) + if tags: + opts['application'] = { + 'id': tags.get('applicationId', ''), + 'version': tags.get('applicationVersion', ''), + } + if config.get("streaming") is not None: streaming = config["streaming"] if streaming.get("baseUri") is not None: diff --git a/contract-tests/service.py b/contract-tests/service.py index d9f8e0a5..79b0e621 100644 --- a/contract-tests/service.py +++ b/contract-tests/service.py @@ -63,6 +63,7 @@ def status(): 'all-flags-with-reasons', 'all-flags-client-side-only', 'all-flags-details-only-for-tracked-flags', + 'tags', ] } return (json.dumps(body), 200, {'Content-type': 'application/json'}) diff --git a/ldclient/config.py b/ldclient/config.py index dfe1a29a..faf6fcc1 100644 --- a/ldclient/config.py +++ b/ldclient/config.py @@ -7,7 +7,7 @@ from typing import Optional, Callable, List, Any, Set from ldclient.feature_store import InMemoryFeatureStore -from ldclient.util import log +from ldclient.util import log, validate_application_info from ldclient.interfaces import BigSegmentStore, EventProcessor, FeatureStore, UpdateProcessor, FeatureRequester GET_LATEST_FEATURES_PATH = '/sdk/latest-flags' @@ -173,7 +173,8 @@ def __init__(self, wrapper_name: Optional[str]=None, wrapper_version: Optional[str]=None, http: HTTPConfig=HTTPConfig(), - big_segments: Optional[BigSegmentsConfig]=None): + big_segments: Optional[BigSegmentsConfig]=None, + application: Optional[dict]=None): """ :param sdk_key: The SDK key for your LaunchDarkly account. This is always required. :param base_uri: The base URL for the LaunchDarkly server. Most users should use the default @@ -239,6 +240,7 @@ def __init__(self, servers. :param http: Optional properties for customizing the client's HTTP/HTTPS behavior. See :class:`HTTPConfig`. + :param application: Optional properties for setting application metadata. See :py:attr:`~application` """ self.__sdk_key = sdk_key @@ -271,6 +273,7 @@ def __init__(self, self.__wrapper_version = wrapper_version self.__http = http self.__big_segments = BigSegmentsConfig() if not big_segments else big_segments + self.__application = validate_application_info(application or {}, log) def copy_with_new_sdk_key(self, new_sdk_key: str) -> 'Config': """Returns a new ``Config`` instance that is the same as this one, except for having a different SDK key. @@ -441,3 +444,16 @@ def big_segments(self) -> BigSegmentsConfig: def _validate(self): if self.offline is False and self.sdk_key is None or self.sdk_key == '': log.warning("Missing or blank sdk_key.") + + @property + def application(self) -> dict: + """ + An object that allows configuration of application metadata. + + Application metadata may be used in LaunchDarkly analytics or other + product features, but does not affect feature flag evaluations. + + If you want to set non-default values for any of these fields, provide + the appropriately configured dict to the {Config} object. + """ + return self.__application diff --git a/ldclient/impl/big_segments.py b/ldclient/impl/big_segments.py index b6a013d3..bcd6e2b8 100644 --- a/ldclient/impl/big_segments.py +++ b/ldclient/impl/big_segments.py @@ -81,7 +81,6 @@ def get_user_membership(self, user_key: str) -> Tuple[Optional[dict], str]: membership = self.__cache.get(user_key) if membership is None: user_hash = _hash_for_user_key(user_key) - log.warn("*** querying Big Segments for user hash: %s" % user_hash) try: membership = self.__store.get_membership(user_hash) if membership is None: diff --git a/ldclient/impl/http.py b/ldclient/impl/http.py index ef36c8ba..858fd371 100644 --- a/ldclient/impl/http.py +++ b/ldclient/impl/http.py @@ -3,14 +3,34 @@ from os import environ import urllib3 +def _application_header_value(application: dict) -> str: + parts = [] + id = application.get('id', '') + version = application.get('version', '') + + if id: + parts.append("application-id/%s" % id) + + if version: + parts.append("application-version/%s" % version) + + return " ".join(parts) + + def _base_headers(config): headers = {'Authorization': config.sdk_key or '', 'User-Agent': 'PythonClient/' + VERSION} + + app_value = _application_header_value(config.application) + if app_value: + headers['X-LaunchDarkly-Tags'] = app_value + if isinstance(config.wrapper_name, str) and config.wrapper_name != "": wrapper_version = "" if isinstance(config.wrapper_version, str) and config.wrapper_version != "": wrapper_version = "/" + config.wrapper_version headers.update({'X-LaunchDarkly-Wrapper': config.wrapper_name + wrapper_version}) + return headers def _http_factory(config): diff --git a/ldclient/util.py b/ldclient/util.py index 66c0c70b..042f33dc 100644 --- a/ldclient/util.py +++ b/ldclient/util.py @@ -4,10 +4,12 @@ # currently excluded from documentation - see docs/README.md import logging +import re from os import environ import sys import urllib3 +from typing import Any from ldclient.impl.http import HTTPFactory, _base_headers log = logging.getLogger(sys.modules[__name__].__name__) @@ -25,6 +27,26 @@ _retryable_statuses = [400, 408, 429] +def validate_application_info(application: dict, logger: logging.Logger) -> dict: + return { + "id": validate_application_value(application.get("id", ""), "id", logger), + "version": validate_application_value(application.get("version", ""), "version", logger), + } + +def validate_application_value(value: Any, name: str, logger: logging.Logger) -> str: + if not isinstance(value, str): + return "" + + if len(value) > 64: + logger.warning('Value of application[%s] was longer than 64 characters and was discarded' % name) + return "" + + if re.search(r"[^a-zA-Z0-9._-]", value): + logger.warning('Value of application[%s] contained invalid characters and was discarded' % name) + return "" + + return value + def _headers(config): base_headers = _base_headers(config) base_headers.update({'Content-Type': "application/json"}) diff --git a/testing/test_config.py b/testing/test_config.py index 701e70e5..7c5e342d 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1,4 +1,5 @@ from ldclient.config import Config +import pytest def test_copy_config(): @@ -40,3 +41,29 @@ def test_trims_trailing_slashes_on_uris(): assert config.base_uri == "https://launchdarkly.com" assert config.events_uri == "https://docs.launchdarkly.com/bulk" assert config.stream_base_uri == "https://blog.launchdarkly.com" + +def application_can_be_set_and_read(): + application = {"id": "my-id", "version": "abcdef"} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "my-id", "version": "abcdef"} + +def application_can_handle_non_string_values(): + application = {"id": 1, "version": 2} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "1", "version": "2"} + +def application_will_ignore_invalid_keys(): + application = {"invalid": 1, "key": 2} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "", "version": ""} + +@pytest.fixture(params = [ + " ", + "@", + ":", + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789._-a" +]) +def application_will_drop_invalid_values(value): + application = {"id": value, "version": value} + config = Config(sdk_key = "SDK_KEY", application = application) + assert config.application == {"id": "", "version": ""} diff --git a/testing/test_event_processor.py b/testing/test_event_processor.py index 363d980e..ec777812 100644 --- a/testing/test_event_processor.py +++ b/testing/test_event_processor.py @@ -484,6 +484,22 @@ def test_wrapper_header_sent_without_version(): assert mock_http.request_headers.get('X-LaunchDarkly-Wrapper') == "Flask" +def test_application_header_not_sent_when_not_set(): + with DefaultTestProcessor() as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + assert mock_http.request_headers.get('X-LaunchDarkly-Tags') is None + +def test_application_header_sent_when_set(): + with DefaultTestProcessor(wrapper_name = "Flask", application = {"id": "my-id", "version": "my-version"}) as ep: + ep.send_event({ 'kind': 'identify', 'user': user }) + ep.flush() + ep._wait_until_inactive() + + assert mock_http.request_headers.get('X-LaunchDarkly-Tags') == "application-id/my-id application-version/my-version" + def test_event_schema_set_on_event_send(): with DefaultTestProcessor() as ep: ep.send_event({ 'kind': 'identify', 'user': user }) diff --git a/testing/test_feature_requester.py b/testing/test_feature_requester.py index db18f555..031167dc 100644 --- a/testing/test_feature_requester.py +++ b/testing/test_feature_requester.py @@ -35,6 +35,7 @@ def test_get_all_data_sends_headers(): assert req.headers['Authorization'] == 'sdk-key' assert req.headers['User-Agent'] == 'PythonClient/' + VERSION assert req.headers.get('X-LaunchDarkly-Wrapper') is None + assert req.headers.get('X-LaunchDarkly-Tags') is None def test_get_all_data_sends_wrapper_header(): with start_server() as server: @@ -62,6 +63,19 @@ def test_get_all_data_sends_wrapper_header_without_version(): req = server.require_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' +def test_get_all_data_sends_tags_header(): + with start_server() as server: + config = Config(sdk_key = 'sdk-key', base_uri = server.uri, + application = {"id": "my-id", "version": "my-version"}) + fr = FeatureRequesterImpl(config) + + resp_data = { 'flags': {}, 'segments': {} } + server.for_path('/sdk/latest-all', JsonResponse(resp_data)) + + fr.get_all_data() + req = server.require_request() + assert req.headers.get('X-LaunchDarkly-Tags') == 'application-id/my-id application-version/my-version' + def test_get_all_data_can_use_cached_data(): with start_server() as server: config = Config(sdk_key = 'sdk-key', base_uri = server.uri) diff --git a/testing/test_ldclient_end_to_end.py b/testing/test_ldclient_end_to_end.py index 7003805a..3f550d0f 100644 --- a/testing/test_ldclient_end_to_end.py +++ b/testing/test_ldclient_end_to_end.py @@ -102,12 +102,12 @@ def test_client_sends_diagnostics(): data = json.loads(r.body) assert data['kind'] == 'diagnostic-init' -# The TLS tests are skipped in Python 3.3 because the embedded HTTPS server does not work correctly, causing +# The TLS tests are skipped in Python 3.7 because the embedded HTTPS server does not work correctly, causing # a TLS handshake failure on the client side. It's unclear whether this is a problem with the self-signed # certificate we are using or with some other server settings, but it does not appear to be a client-side -# problem. +# problem since we know that the SDK is able to connect to secure LD endpoints. -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_cannot_connect_with_selfsigned_cert_by_default(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) @@ -120,7 +120,7 @@ def test_cannot_connect_with_selfsigned_cert_by_default(): with LDClient(config = config, start_wait = 1.5) as client: assert not client.is_initialized() -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) @@ -134,7 +134,7 @@ def test_can_connect_with_selfsigned_cert_if_ssl_verify_is_false(): with LDClient(config = config) as client: assert client.is_initialized() -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_can_connect_with_selfsigned_cert_if_disable_ssl_verification_is_true(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) @@ -148,7 +148,7 @@ def test_can_connect_with_selfsigned_cert_if_disable_ssl_verification_is_true(): with LDClient(config = config) as client: assert client.is_initialized() -@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 3, reason = "test is skipped in Python 3.3") +@pytest.mark.skipif(sys.version_info.major == 3 and sys.version_info.minor == 7, reason = "test is skipped in Python 3.7") def test_can_connect_with_selfsigned_cert_by_setting_ca_certs(): with start_secure_server() as server: server.for_path('/sdk/latest-all', poll_content()) diff --git a/testing/test_streaming.py b/testing/test_streaming.py index 1838e500..82700b4d 100644 --- a/testing/test_streaming.py +++ b/testing/test_streaming.py @@ -38,6 +38,7 @@ def test_request_properties(): assert req.headers.get('Authorization') == 'sdk-key' assert req.headers.get('User-Agent') == 'PythonClient/' + VERSION assert req.headers.get('X-LaunchDarkly-Wrapper') is None + assert req.headers.get('X-LaunchDarkly-Tags') is None def test_sends_wrapper_header(): store = InMemoryFeatureStore() @@ -69,6 +70,21 @@ def test_sends_wrapper_header_without_version(): req = server.await_request() assert req.headers.get('X-LaunchDarkly-Wrapper') == 'Flask' +def test_sends_tag_header(): + store = InMemoryFeatureStore() + ready = Event() + + with start_server() as server: + with stream_content(make_put_event()) as stream: + config = Config(sdk_key = 'sdk-key', stream_uri = server.uri, + application = {"id": "my-id", "version": "my-version"}) + server.for_path('/all', stream) + + with StreamingUpdateProcessor(config, store, ready, None) as sp: + sp.start() + req = server.await_request() + assert req.headers.get('X-LaunchDarkly-Tags') == 'application-id/my-id application-version/my-version' + def test_receives_put_event(): store = InMemoryFeatureStore() ready = Event() From 18f62be05795fb96f2e008a78cf2e5bbf8a81094 Mon Sep 17 00:00:00 2001 From: LaunchDarklyReleaseBot Date: Tue, 31 Jan 2023 15:32:51 +0000 Subject: [PATCH 349/356] Releasing version 7.6.0 --- CHANGELOG.md | 4 ++++ ldclient/version.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c905eecf..f29fd0f6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ All notable changes to the LaunchDarkly Python SDK will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org). +## [7.6.0] - 2023-01-31 +### Added: +- Introduced support for an `application` config property which sets application metadata that may be used in LaunchDarkly analytics or other product features. . This does not affect feature flag evaluations. + ## [7.5.1] - 2022-09-29 ### Added: - Publishing this package now includes a pre-built wheel distribution in addition to the customary source distribution. diff --git a/ldclient/version.py b/ldclient/version.py index 2d14753a..1d8b1488 100644 --- a/ldclient/version.py +++ b/ldclient/version.py @@ -1 +1 @@ -VERSION = "7.5.1" +VERSION = "7.6.0" From 76c4d96722395dc16345ad72d200f342a370cf2b Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Mon, 6 Feb 2023 15:15:29 -0500 Subject: [PATCH 350/356] Fix double spaces in changelog (#217) --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f29fd0f6..a2199947 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ All notable changes to the LaunchDarkly Python SDK will be documented in this fi ## [7.6.0] - 2023-01-31 ### Added: -- Introduced support for an `application` config property which sets application metadata that may be used in LaunchDarkly analytics or other product features. . This does not affect feature flag evaluations. +- Introduced support for an `application` config property which sets application metadata that may be used in LaunchDarkly analytics or other product features. This does not affect feature flag evaluations. ## [7.5.1] - 2022-09-29 ### Added: From 966f75e7bb7ad091c6d26c6823c0c63aea89efdb Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Mon, 6 Feb 2023 16:40:13 -0500 Subject: [PATCH 351/356] Fix unsafe access to flag's trackEvents field (#218) When using the test data source, the trackEvents may not be required. Additionally, when receiving this information from LD, there is no guarantee that field will be provided. Instead of assuming that field always exists on the flag model, we should instead retrieve it with a default value to prevent index errors. --- ldclient/client.py | 2 +- testing/integrations/test_test_data_source.py | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/ldclient/client.py b/ldclient/client.py index 86a45e06..98d937ab 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -408,7 +408,7 @@ def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: 'variation': detail.variation_index, 'reason': detail.reason, 'version': flag['version'], - 'trackEvents': flag['trackEvents'] or requires_experiment_data, + 'trackEvents': flag.get('trackEvents', False) or requires_experiment_data, 'trackReason': requires_experiment_data, 'debugEventsUntilDate': flag.get('debugEventsUntilDate', None), } diff --git a/testing/integrations/test_test_data_source.py b/testing/integrations/test_test_data_source.py index 47f0d025..fbf09266 100644 --- a/testing/integrations/test_test_data_source.py +++ b/testing/integrations/test_test_data_source.py @@ -329,3 +329,28 @@ def test_flag_can_evaluate_rules(): assert eval2.variation_index == 1 assert eval2.reason['kind'] == 'FALLTHROUGH' +def test_flag_can_evaluate_all_flags(): + td = TestData.data_source() + store = InMemoryFeatureStore() + + client = LDClient(config=Config('SDK_KEY', + update_processor_class = td, + send_events = False, + feature_store = store)) + + td.update(td.flag(key='test-flag') + .fallthrough_variation(False) + .if_match('firstName', 'Mike') + .and_not_match('country', 'gb') + .then_return(True)) + + user1 = { 'key': 'user1', 'firstName': 'Mike', 'country': 'us' } + flags_state = client.all_flags_state(user1, with_reasons=True) + + assert flags_state.valid + + value = flags_state.get_flag_value('test-flag') + reason = flags_state.get_flag_reason('test-flag') or {} + + assert value == True + assert reason.get('kind', None) == 'RULE_MATCH' From 0121ddb6446e2cc819767c445820405709b667c5 Mon Sep 17 00:00:00 2001 From: LaunchDarklyReleaseBot <86431345+LaunchDarklyReleaseBot@users.noreply.github.com> Date: Tue, 7 Feb 2023 10:47:58 -0800 Subject: [PATCH 352/356] prepare 7.6.1 release (#195) * add 3.8 build * image name * fail on SyntaxWarning * typo * command syntax * pin expiringdict dependency for Python 3.3 compatibility * add Windows CircleCI job * periods are no longer valid in CircleCI job names * syntax fix * install Python in Windows * set path * move command * turn off debug logging * Py3 in Windows * config param * rm redundant step * choco switch * refactor Linux jobs using CircleCI 2.1 features * set log level before anything else * rm Azure config * use yaml.safe_load() to avoid code execution vulnerability in file data source * Initial work on wrapper_name, wrapper_version, diagnostic config options and start of diagnostic config event creation. * Python 2 compat changes. * More event generation code and starting to integrate tracking diagnostic values. * Add minimum diagnostic recording interval. Fix diagnostic.py to be importable. Add more diagnostic event fields. * don't let user fall outside of last bucket in rollout * fixing conditional logic * Add docstrings for diagnostic configuration options. * fix off-by-1 error * avoid redundant dict lookups * add unit tests for basic bucketing logic and edge case * Stream init tracking. Feeding of accumulator object through SDK. Various fixes. * Track events in last batch. * Fix sdk version field, some stylistic improvements. * Last of diagnostic configuration object fields. * Fill out rest of platform fields. * Cleanup and failed stream initialization tracking. * Add diagnostic config option test. * Add tests for diagnostics.py * Testing rest of diagnostic fields. * Test that streaming update processor records successful and unsuccessful connection attempts in the diagnostic accumulator when available. * Improvements to testability of event processor. * Rest of event processor tests. * Remove janky reflection. * Test change to filesource optional test requirements. * [ch61092] Add event payload ID on event requests. * normalize data store type and OS name in diagnostic events * gitignore * copyedit to diagnostic event config property comment * fix spurious error after sending diagnostic event * make verify_ssl=False turn off certificate verification too (#129) * add more TLS config options and collect HTTP/HTTPS config options in a class (#130) * make stream retry/backoff/jitter behavior consistent with other SDKs + improve testing (#131) * streams shouldn't use the same read timeout as the rest of the SDK (#132) * implement our own retry logic & logging for event posts, don't use urllib3.Retry (#133) * remove support for indirect/patch and indirect/put * remove unused logic for individual flag/segment poll for indirect/patch * Ehaisley/84082/remove python2 (#136) * remove all references to six and remove queue fallback imports * remove NullHandler logger backwards compat * update circleci config to remove python 2.7 tests * remove ordereddict backwards compat * update setup.py to no longer list python 2.7 as compatible * no longer inherit from object for python 2 backwards compat * update readme and manifest to reflect python 2.7 removal * remove unicode type compatibility * remove 2.7 support from circleci * Allow authenticating with proxy This commit allows for authenticating with a proxy configured with the `http_proxy` environment variable. Authentication requires passing a header, and is not parsed by urllib3 from the proxy_url. * reimplement proxy tests for DRY and add test of proxy auth params * doc comment on auth params in proxy URL * add type hints to some of the public facing api. update some docs * Revert "add type hints to some of the public facing api." This reverts commit c35fa6184ce1a274fd5c6d226cb3f1f7a795901a. * Ehaisley/ch86857/type hints (#138) * add typehints to the public API * validate typehints in the public api and tests with mypy * remove all current deprecations (#139) * remove all currently deprecated classes, methods, arguments, and tests * also update semver usage to remove calls to deprecated functions and classes * remove global set_sdk_key, make SDK key required in Config (#140) * Removed the guides link * Pinning mypy and running it against different python versions (#141) * fix time zone mishandling that could make event debugging not work (#142) * fix 6.x build (#143) * fix time zone mishandling that could make event debugging not work (6.x) (#144) * prepare 6.13.3 release (#154) * Releasing version 6.13.3 * [ch99756] Add alias events (#145) * add support for experiment rollouts * fix unit test * address PR comments * use Releaser v2 config * Use newer docker images (#147) * Updates docs URLs * Add support for 3.10 (#150) * started work on FlagBuilder in as part of test data source implementation * finished FlagBuilder implementation and added FlagRuleBuilder implementation * added initial TestData interface and updated tests to not rely on test data internals * started data source implementation * changed FlagBuilder to public class; changed FlagBuilder attributes to be initialized in __init__ and eliminated use of try ... except: pass for handling empty attributes * (big segments 1) add public config/interface types * added implementation of test data source * docstring * formatting * ensure property doesn't return None * (big segments 2) implement evaluation, refactor eval logic & modules * linting * (big segments 3) implement big segment status tracking, wire up components * typing fixes * typing fixes * implement SSE contract tests * fix CI * fix CI again * fix CI * disable SSE tests in Python 3.5 * make test service port configurable * better SSE implementation that fixes linefeed and multi-byte char issues * fix constructor parameters in test service * comment * test improvements * rm obsolete default config logic * (big segments 4) implement big segment stores in Redis+DynamoDB, refactor db tests (#158) * converted ldclient.integrations module from file to directory; started moving public classes out of ldclient.impl.integrations.test_data* and instead into ldclient.integrations.test_data*; started adding TestData documentation * removed setup/teardown functions leftover from test scaffold * added TestData, FlagBuilder, and FlagRuleBuilder documentation; minor adjustments to implementation details * removed warning supression from TestData tests * fix big segments user hash algorithm to use SHA256 * update mypy version * updates to tests and related bug fixes * always cache Big Segment query result even if it's None * fix test assertion * lint * fix big segment ref format * fix big segments cache TTL being set to wrong value * fixed structure of fallthrough variation in result of FlagBuilder.build() * moved __test__ attribute into TestData class definition to prevent mypy from complaining about a missing class attribute * minor doc comment fix * Apply suggestions related to Sphinx docstring formatting from code review Co-authored-by: Eli Bishop * fixed errors in the implementation of FlagBuilder's fallthrough_variation and off_variation when passing boolean variation values; updated tests to assert the expected behavior * added missing value_for_all_users() method to FlagBuilder class * Fix operator parsing errors (#169) * identify should not emit event if user key is empty (#164) * secondary should be treated as built-in attribute (#168) * URIs should have trailing slashes trimmed (#165) * all_flags_state should always include flag version (#166) * output event should not include a null prereqOf key (#167) * Account for traffic allocation on all flags (#171) * Add SDK contract tests (#170) * misc fixes to test data docs + add type hints * more type hints * remove some methods from the public test_data API * can't use "x|y" shortcut in typehints in older Pythons; use Union * fix misc type mistakes because I forgot to run the linter * update CONTRIBUTING.md and provide make targets * fixed a bug with flag rule clause builder internals; added unit test to verify rule evaluation * added ready argument to _TestDataSource class and indicated ready upon start to avoid delays in TestData initialization * Update contract tests to latest flask version (#176) Our contract tests depend on flask v1, which in turn depends on Jinja 2. Both of these are terribly dated and no longer supported. Jinja depends on markupsafe. markupsafe recently updated its code to no longer provide soft_unicode which in turn broke Jinja. Updating to the latest flask keeps all transitive dependencies better aligned and addresses this mismatch. * Adds link to Relay Proxy docs * Handle explicit None values in test payload (#179) The test harness may send explicit None values which should be treated the same as if the value was omitted entirely. * Fix "unhandled response" error in test harness (#180) When we return a `('', 204)` response from the flask handler, [Werkzeug intentionally removes the 'Content-Type' header][1], which causes the response to be created as a chunked response. The test harness is likely seeing a 204 response and isn't trying to read anything more from the stream. But since we are re-using connections, the next time it reads from the stream, it sees the `0\r\n\r\n` chunk and outputs an error: > 2022/04/20 14:23:39 Unsolicited response received on idle HTTP channel starting with "0\r\n\r\n"; err= Changing this response to 202 causes Werkzeug to return an empty response and silences the error. [1]: https://github.com/pallets/werkzeug/blob/560dd5f320bff318175f209595d42f5a80045417/src/werkzeug/wrappers/response.py#L540 * Exclude booleans when getting bucketable value (#181) When calculating a bucket, we get the bucketable value from the specified bucket by attribute. If this value is a string or an int, we can use it. Otherwise, we return None. Python considers a bool an instance of an int, which isn't what we want. So we need to add an explicit exclusion for this. * master -> main (#182) * Loosen restriction on expiringdict (#183) Originally this was pinned to a max version to deal with the incompatibility of Python 3.3 and the `typing` package. See [this PR][1]. Now that we now only support >=3.5, we can safely relax this restriction again. [1]: https://github.com/launchdarkly/python-server-sdk-private/pull/120 * Fix mypy type checking (#184) A [customer requested][original-pr] that we start including a py.typed file in our repository. This would enable mypy to take advantage of our typehints. Unfortunately, this didn't completely solve the customers issue. A [second pr][second-pr] was opened to address the missing step of including the py.typed file in the `Manifest.in` file. However, this change alone is not sufficient. According to the [documentation][include_package_data], you must also include the `include_package_data=True` directive so that files specified in the `Manifest.in` file are included in distribution. [original-pr]: https://github.com/launchdarkly/python-server-sdk/pull/166 [second-pr]: https://github.com/launchdarkly/python-server-sdk/pull/172 [include_package_data]: https://setuptools.pypa.io/en/latest/userguide/datafiles.html#include-package-data * Add support for extra Redis connection parameters (#185) * Include wheel artifact when publishing package (#186) * skip tests that use a self-signed TLS cert in Python 3.7 * remove warn-level logging done for every Big Segments query (#190) * remove warn-level logging done for every Big Segments query * skip tests that use a self-signed TLS cert in Python 3.7 * update release metadata * Add application info support (#214) * Upgrade pip to fix failing CI build (#216) The CI build was failing because pip had an outdated list of available wheels for installation. Since it couldn't find a match, it was trying to build a package from source, which requires the rust compiler, which in turn isn't present on some of the docker images. By updating pip we get the updated list of available wheels, thereby allowing us to bypass source building and the need for the rust compiler entirely. * Fix double spaces in changelog (#217) * Fix unsafe access to flag's trackEvents field (#218) When using the test data source, the trackEvents may not be required. Additionally, when receiving this information from LD, there is no guarantee that field will be provided. Instead of assuming that field always exists on the flag model, we should instead retrieve it with a default value to prevent index errors. --------- Co-authored-by: Eli Bishop Co-authored-by: LaunchDarklyCI Co-authored-by: Ben Woskow Co-authored-by: Gavin Whelan Co-authored-by: Elliot <35050275+Apache-HB@users.noreply.github.com> Co-authored-by: Gabor Angeli Co-authored-by: Elliot Co-authored-by: Ben Woskow <48036130+bwoskow-ld@users.noreply.github.com> Co-authored-by: LaunchDarklyCI Co-authored-by: hroederld Co-authored-by: Robert J. Neal Co-authored-by: Robert J. Neal Co-authored-by: Ember Stevens Co-authored-by: ember-stevens <79482775+ember-stevens@users.noreply.github.com> Co-authored-by: Matthew M. Keeler Co-authored-by: charukiewicz Co-authored-by: LaunchDarklyReleaseBot Co-authored-by: Christian Charukiewicz Co-authored-by: Matthew M. Keeler --- CHANGELOG.md | 2 +- ldclient/client.py | 2 +- testing/integrations/test_test_data_source.py | 25 +++++++++++++++++++ 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f29fd0f6..a2199947 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,7 @@ All notable changes to the LaunchDarkly Python SDK will be documented in this fi ## [7.6.0] - 2023-01-31 ### Added: -- Introduced support for an `application` config property which sets application metadata that may be used in LaunchDarkly analytics or other product features. . This does not affect feature flag evaluations. +- Introduced support for an `application` config property which sets application metadata that may be used in LaunchDarkly analytics or other product features. This does not affect feature flag evaluations. ## [7.5.1] - 2022-09-29 ### Added: diff --git a/ldclient/client.py b/ldclient/client.py index 86a45e06..98d937ab 100644 --- a/ldclient/client.py +++ b/ldclient/client.py @@ -408,7 +408,7 @@ def all_flags_state(self, user: dict, **kwargs) -> FeatureFlagsState: 'variation': detail.variation_index, 'reason': detail.reason, 'version': flag['version'], - 'trackEvents': flag['trackEvents'] or requires_experiment_data, + 'trackEvents': flag.get('trackEvents', False) or requires_experiment_data, 'trackReason': requires_experiment_data, 'debugEventsUntilDate': flag.get('debugEventsUntilDate', None), } diff --git a/testing/integrations/test_test_data_source.py b/testing/integrations/test_test_data_source.py index 47f0d025..fbf09266 100644 --- a/testing/integrations/test_test_data_source.py +++ b/testing/integrations/test_test_data_source.py @@ -329,3 +329,28 @@ def test_flag_can_evaluate_rules(): assert eval2.variation_index == 1 assert eval2.reason['kind'] == 'FALLTHROUGH' +def test_flag_can_evaluate_all_flags(): + td = TestData.data_source() + store = InMemoryFeatureStore() + + client = LDClient(config=Config('SDK_KEY', + update_processor_class = td, + send_events = False, + feature_store = store)) + + td.update(td.flag(key='test-flag') + .fallthrough_variation(False) + .if_match('firstName', 'Mike') + .and_not_match('country', 'gb') + .then_return(True)) + + user1 = { 'key': 'user1', 'firstName': 'Mike', 'country': 'us' } + flags_state = client.all_flags_state(user1, with_reasons=True) + + assert flags_state.valid + + value = flags_state.get_flag_value('test-flag') + reason = flags_state.get_flag_reason('test-flag') or {} + + assert value == True + assert reason.get('kind', None) == 'RULE_MATCH' From 88eca594d6cd3ee4ab6638ffda913d92aa8e0434 Mon Sep 17 00:00:00 2001 From: LaunchDarklyReleaseBot Date: Tue, 7 Feb 2023 18:48:41 +0000 Subject: [PATCH 353/356] Releasing version 7.6.1 --- CHANGELOG.md | 4 ++++ ldclient/version.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2199947..931abab5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ All notable changes to the LaunchDarkly Python SDK will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org). +## [7.6.1] - 2023-02-07 +### Fixed: +- Fixed indexing error raised by calling `all_flags_state` while using the `TestData` data source. + ## [7.6.0] - 2023-01-31 ### Added: - Introduced support for an `application` config property which sets application metadata that may be used in LaunchDarkly analytics or other product features. This does not affect feature flag evaluations. diff --git a/ldclient/version.py b/ldclient/version.py index 1d8b1488..67ded196 100644 --- a/ldclient/version.py +++ b/ldclient/version.py @@ -1 +1 @@ -VERSION = "7.6.0" +VERSION = "7.6.1" From e99c1881bc7e1a1a5987940966bc6b8d039bdcb2 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Fri, 17 Feb 2023 09:28:55 -0500 Subject: [PATCH 354/356] docs: Fix sphinx documentation failures (#219) Our documentation file had a configuration issue, which emits a warning when building with sphinx. This caused the build process on readthedocs.org to fail because they treat warnings as errors. We didn't catch this because: 1. Warnings are treated like warnings when generating documentation locally. 2. We don't try to generate the documentation as part of our CI process. This commit should resolve both the underlying issue and the deficiencies that prevented us from catching this initially. --- .circleci/config.yml | 11 +++++++++++ docs/Makefile | 2 +- docs/api-main.rst | 2 -- docs/conf.py | 2 +- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ac781205..107c0079 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,6 +9,7 @@ workflows: - test-linux: name: Python 3.7 docker-image: cimg/python:3.7 + test-build-docs: true - test-linux: name: Python 3.8 docker-image: cimg/python:3.8 @@ -33,6 +34,9 @@ jobs: test-packaging: type: boolean default: true + test-build-docs: + type: boolean + default: false test-with-codeclimate: type: boolean default: false @@ -95,6 +99,13 @@ jobs: command: | export PATH="/home/circleci/.local/bin:$PATH" make lint + - when: + condition: <> + steps: + - run: + name: verify docs can be built successfully + command: | + make docs - unless: condition: <> diff --git a/docs/Makefile b/docs/Makefile index fb0093da..91f52ddc 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -3,7 +3,7 @@ .PHONY: help install html -SPHINXOPTS = +SPHINXOPTS = -W --keep-going SPHINXBUILD = sphinx-build SPHINXPROJ = launchdarkly-server-sdk SOURCEDIR = . diff --git a/docs/api-main.rst b/docs/api-main.rst index 514dc698..0947fa6a 100644 --- a/docs/api-main.rst +++ b/docs/api-main.rst @@ -6,14 +6,12 @@ ldclient module .. automodule:: ldclient :members: Context,ContextBuilder,ContextMultiBuilder,get,set_config - :special-members: __init__ ldclient.client module ---------------------- .. automodule:: ldclient.client :members: LDClient - :special-members: __init__ ldclient.config module ---------------------- diff --git a/docs/conf.py b/docs/conf.py index 6d5e9556..e2dfde12 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -68,7 +68,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. From eee1bb2b7df6b55a3b65b8357b45ad4d94da2214 Mon Sep 17 00:00:00 2001 From: "Matthew M. Keeler" Date: Fri, 10 Mar 2023 12:38:48 -0500 Subject: [PATCH 355/356] Fix documentation generation (#197) (#220) From 0f4c00433d31052d655398a91bcb9d29f4bab026 Mon Sep 17 00:00:00 2001 From: prpnmac <95777763+prpnmac@users.noreply.github.com> Date: Fri, 28 Apr 2023 09:42:24 -0400 Subject: [PATCH 356/356] fix: Restrict urllib to <2.0.0 (#202) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 078df9e0..d4258c10 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,4 @@ certifi>=2018.4.16 expiringdict>=1.1.4 pyRFC3339>=1.0 semver>=2.10.2,<3.0.0 -urllib3>=1.22.0 +urllib3>=1.22.0,<2.0.0