diff --git a/.github/workflows/python3Test.yml b/.github/workflows/python3Test.yml new file mode 100644 index 0000000..25a3a23 --- /dev/null +++ b/.github/workflows/python3Test.yml @@ -0,0 +1,92 @@ +name: Test python package dpath-python with regexp extension + # ------------------------------------------------------------ + # (C) Alain Lichnewsky, 2021, 2022, 2023 + # + # For running under Github's Actions + # + # ------------------------------------------------------------ + + # ***************************** + # ADDED FOR TESTING PRIOR TO PR + # REMOVE FROM PR submission + # ***************************** + +on: + workflow_dispatch: + # Allows manual dispatch from the Actions tab + +jobs: + test-python3: + + timeout-minutes: 60 + + runs-on: ubuntu-latest + + strategy: + matrix: + # Match versions specified in tox.ini and tox-set-rex.ini + python-version: ['3.8', '3.11', 'pypy3.7', 'pypy3.9'] + + steps: + - name: Checkout code + uses: actions/checkout@main + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@main + with: + python-version: ${{ matrix.python-version }} + architecture: 'x64' + + - name: Ascertain configuration + # + # Collect information concerning $HOME and the location of + # file(s) loaded from Github/ + run: | + echo Working dir: $(pwd) + echo Files at this location: + ls -ltha + echo HOME: ${HOME} + echo LANG: ${LANG} SHELL: ${SHELL} + which python + echo LD_LIBRARY_PATH: ${LD_LIBRARY_PATH} + echo PYTHONPATH: \'${PYTHONPATH}\' + + - name: Install dependencies + shell: bash + if: always() + # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + # requirements install the test framework, which is not + # required by the package in setup.py + # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + run: | + python -m pip install --upgrade pip setuptools wheel \ + nose2 hypothesis + if [ -f requirements.txt ]; then + pip install -r requirements.txt; + fi + python setup.py install + pip install tox + echo "Installed tox" + + - name: Tox test with default DPATH_ALLOW_REGEX not set + shell: bash + if: always() + # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + # tox testing, here tox.ini is used + # DPATH_ALLOW_REGEX not set + # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + run: | + tox + echo "Ran tox" + + - name: Tox test with DPATH_ALLOW_REGEX = TRUE + shell: bash + if: always() + # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + # tox testing, here tox-set-rex.ini is used + # DPATH_ALLOW_REGEX = TRUE + # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + run: | + tox -c tox-set-rex.ini + echo "Ran tox -c tox-set-rex.ini" + \ No newline at end of file diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 52fd738..0000000 --- a/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -/MANIFEST -/.tox -/build -/env -.hypothesis -*.pyc -.vscode -venv_39 -.idea/ -dpath.egg-info/ -dist/ -tests/.hypothesis \ No newline at end of file diff --git a/README.rst b/README.rst index 0ad3ad2..6360ddf 100644 --- a/README.rst +++ b/README.rst @@ -11,8 +11,8 @@ A python library for accessing and searching dictionaries via Basically it lets you glob over a dictionary as if it were a filesystem. It allows you to specify globs (ala the bash eglob syntax, through some -advanced fnmatch.fnmatch magic) to access dictionary elements, and -provides some facility for filtering those results. +advanced fnmatch.fnmatch magic, or using Python's `re`regular expressions ) +to access dictionary elements, and provides some facility for filtering those results. sdists are available on pypi: http://pypi.python.org/pypi/dpath @@ -111,6 +111,9 @@ elements in ``x['a']['b']`` where the key is equal to the glob ``'[cd]'``. Okay. } } +**Note** : Using Python's `re` regular expressions instead of globs is explained +below re_regexp_; defining your own string matcher objects is shown in generalized_string_match_ below. + ... Wow that was easy. What if I want to iterate over the results, and not get a merged view? @@ -438,6 +441,165 @@ To get around this, you can sidestep the whole "filesystem path" style, and aban >>> dpath.get(['a', 'b/c']) 0 +.. _re_regexp: + +Globs too imprecise? Use Python's `re` Regular Expressions +========================================================== + +Python's `re` regular expressions PythonRe_ may be used as follows: + + .. _PythonRe: https://docs.python.org/3/library/re.html + + - The recognition of such regular expressions in strings is disabled by default, but may be easily + enabled ( Set up this way for backwards compatibility in the cases where a path + expression component would start with '{' and end in '}'). + - Irrespective of this setting, the user can use `re` regular expressions in the list form of + paths (see below). + + .. code-block:: python + + >>> import dpath + >>> # enable + >>> dpath.options.ALLOW_REGEX = True + >>> # disable + >>> dpath.options.ALLOW_REGEX = False + + - Now a path component may also be specified : + + - in a path expression, as {} where `` is a regular expression + accepted by the standard Python module `re`. For example: + + .. code-block:: python + + >>> selPath = 'Config/{(Env|Cmd)}' + >>> x = dpath.search(js.lod, selPath) + + .. code-block:: python + + >>> selPath = '{(Config|Graph)}/{(Env|Cmd|Data)}' + >>> x = dpath.search(js.lod, selPath) + + - When using the list form for a path, a list element can also + be expressed as + + - a string as above + - the output of :: `re.compile( args )`` + + An example: + + .. code-block:: python + + >>> selPath = [ re.compile('(Config|Graph)') , re.compile('(Env|Cmd|Data)') ] + >>> x = dpath.search(js.lod, selPath) + + More examples from a realistic json context: + + +-----------------------------------------+--------------------------------------+ + + **Extended path glob** | **Designates** + + +-----------------------------------------+--------------------------------------+ + + "\*\*/{[^A-Za-z]{2}$}" | "Id" + + +-----------------------------------------+--------------------------------------+ + + r"\*/{[A-Z][A-Za-z\\d]*$}" | "Name","Id","Created", "Scope",... + + +-----------------------------------------+--------------------------------------+ + + r"\*\*/{[A-Z][A-Za-z\\d]*\d$}" | EnableIPv6" + + +-----------------------------------------+--------------------------------------+ + + r"\*\*/{[A-Z][A-Za-z\\d]*Address$}" | "Containers/199c5/MacAddress" + + +-----------------------------------------+--------------------------------------+ + + With Python's character string conventions, required backslashes in the `re` syntax + can be entered either in raw strings or using double backslashes, thus + the following are equivalent: + + +-----------------------------------------+----------------------------------------+ + + *with raw strings* | *equivalent* with double backslash + + +-----------------------------------------+----------------------------------------+ + + r"\*\*/{[A-Z][A-Za-z\\d]*\\d$}" | "\*\*/{[A-Z][A-Za-z\\\\d]*\\\\d$}" + + +-----------------------------------------+----------------------------------------+ + + r"\*\*/{[A-Z][A-Za-z\\d]*Address$}" | "\*\*/{[A-Z][A-Za-z\\\\d]*Address$}" + + +-----------------------------------------+----------------------------------------+ + +.. _generalized_string_match: + +Need still more customization ? Roll your own match method! +=========================================================== + +We provide the following abstract types, where `StringMatcher` is allowed in Glob in the +sequence form (definitions in `dpath.types`) : + +- `StringMatcher` (descriptive Union type ), + +- `Duck_StringMatcher`: which will accept a class as a **subtype**, provided it offers a `match` method. Instances may then be used as components in the list form of paths. This method of structural subtyping is explained in PEP 544 [https://peps.python.org/pep-0544/]. + + +- `Basic_StringMatcher`: an abstract base class, enabling your derived class to be recognized and participate in a match. + +**Notes:** + - It is required that the `match` method: `match(self, str) -> Optional[object]`, + returns `None` to reject the match. + - Using `Duck_StringMatcher` requires a version of Python and Pypy not less than 3.8, + otherwise you should derive from base class `Basic_StringMatcher`. The + variable `dpath.options.PEP544_PROTOCOL_AVAILABLE` indicates when duck typing is possible. + +Then it is up to you... Examples are provided in `tests/test_duck_typing.py`, + including: + + - *match anagrams*: + + .. code-block:: python + + class Anagram(): + def __init__(self, s): + self.ref = "".join(sorted(s)) + + def match(self, st): + retval = True if "".join(sorted(st)) == self.ref else None + return retval + + mydict = TestBasics.mydict + + r1 = dpath.search(mydict, "**/label") + r2 = dpath.search(mydict, [ '**', Anagram("bella")]) + + assert r1 == r2 + +- and *approximate match* (requires `rapidfuzz` https://maxbachmann.github.io/RapidFuzz/): + + .. code-block:: python + + class Approx(): + def __init__(self, s, quality=90): + self.ref = s + self.quality=quality + + def match(self, st): + fratio = rapidfuzz.fuzz.ratio(st, self.ref) + retval = True if fratio > self.quality else None + return retval + + mydict = TestBasics.mydict + + + r1 = dpath.search(mydict, "**/placeholder") + r2 = dpath.search(mydict, [ '**', Approx("placecolder")]) + r3 = dpath.search(mydict, [ '**', Approx("acecolder",75)]) + assert r1 == r2 + assert r1 == r3 + +For comparison, we show now the first example reimplemented to avoid duck typing: + + .. code-block:: python + + if not dpath.options.PEP544_PROTOCOL_AVAILABLE: + class Anagram(dpath.types.Basic_StringMatcher): + def __init__(self, s): + self.ref = "".join(sorted(s)) + + def match(self, st): + retval = True if "".join(sorted(st)) == self.ref else None + return retval + + dpath.search(mydict, ['**', Anagram("bella")]) + dpath.segments : The Low-Level Backend ====================================== diff --git a/dpath/__init__.py b/dpath/__init__.py index 9f56e6b..23da12c 100644 --- a/dpath/__init__.py +++ b/dpath/__init__.py @@ -20,31 +20,55 @@ "Creator", ] +import re from collections.abc import MutableMapping, MutableSequence from typing import Union, List, Any, Callable, Optional from dpath import segments, options -from dpath.exceptions import InvalidKeyName, PathNotFound +from dpath.exceptions import InvalidKeyName, PathNotFound, InvalidRegex from dpath.types import MergeType, PathSegment, Creator, Filter, Glob, Path, Hints _DEFAULT_SENTINEL = object() -def _split_path(path: Path, separator: Optional[str] = "/") -> Union[List[PathSegment], PathSegment]: +def _split_path(path: Glob, separator: Optional[str] = "/") -> Union[List[PathSegment], PathSegment]: """ - Given a path and separator, return a tuple of segments. If path is - already a non-leaf thing, return it. + Given a path and separator, return a tuple of segments. + + If path is already a non-leaf thing, return it: this covers sequences of strings + and re.Patterns. Note that a string path with the separator at index[0] will have the separator stripped off. If you pass a list path, the separator is ignored, and is assumed to be part of each key glob. It will not be - stripped. + stripped (i.e. a first list element can be an empty string). + + If RegEx support is enabled then str segments which are wrapped with curly braces will be handled as regular + expressions. These segments will be compiled using re.compile. + Errors during RegEx compilation will raise an InvalidRegex exception. """ if not segments.leaf(path): split_segments = path + elif isinstance(path, re.Pattern): + # Handle paths which are comprised of a single re.Pattern + split_segments = (path,) else: split_segments = path.lstrip(separator).split(separator) + if options.ALLOW_REGEX: + # Handle RegEx segments + + def compile_regex_segment(segment: PathSegment): + if isinstance(segment, str) and segment.startswith("{") and segment.endswith("}"): + try: + return re.compile(segment[1:-1]) + except re.error as re_err: + raise InvalidRegex(f"Could not compile RegEx in path segment '{segment}' ({re_err})") + + return segment + + split_segments = list(map(compile_regex_segment, split_segments)) + return split_segments diff --git a/dpath/exceptions.py b/dpath/exceptions.py index 3b1a7da..cc85313 100644 --- a/dpath/exceptions.py +++ b/dpath/exceptions.py @@ -3,16 +3,21 @@ class InvalidGlob(Exception): pass +class InvalidRegex(Exception): + """Invalid regular expression in path segment.""" + pass + + class PathNotFound(Exception): - """One or more elements of the requested path did not exist in the object""" + """One or more elements of the requested path did not exist in the object.""" pass class InvalidKeyName(Exception): - """This key contains the separator character or another invalid character""" + """This key contains the separator character or another invalid character.""" pass class FilteredValue(Exception): - """Unable to return a value, since the filter rejected it""" + """Unable to return a value, since the filter rejected it.""" pass diff --git a/dpath/options.py b/dpath/options.py index 41f35c4..2f0b55d 100644 --- a/dpath/options.py +++ b/dpath/options.py @@ -1 +1,41 @@ +from os import environ + ALLOW_EMPTY_STRING_KEYS = False + +ALLOW_REGEX = "ALLOW_REGEX" in environ +"""Enables regular expression support. + +Enabling this feature will allow usage of regular expressions as part of paths. +Regular expressions must be wrapped in curly brackets. For example: "a/b/{[cd]}". +Expressions will be compiled using the standard library re.compile function. +""" + +# -------------------------------------------------------------------- +# Language processor and library variations +# -------------------------------------------------------------------- + +# PEP544_PROTOCOL_AVAILABLE indicates that the language processor permits duck typing +# PEP-0544 by defining typing.Protocol. (Introduced in Python 3.8) +# See https://peps.python.org/pep-0544/. +# Otherwise provide a fall back using a derived class technique. +try: + from typing import Protocol + PEP544_PROTOCOL_AVAILABLE = True + assert type(Protocol) != int # inserted to quiesce flake8 (F401) !!! +except Exception: + PEP544_PROTOCOL_AVAILABLE = False + +# -------------------------------------------------------------------- +# Language processor and library variations +# -------------------------------------------------------------------- + +# PEP544_PROTOCOL_AVAILABLE indicates that the language processor permits duck typing +# PEP-0544 by defining typing.Protocol. (Introduced in Python 3.8) +# See https://peps.python.org/pep-0544/. +# Otherwise provide a fall back using a derived class technique. +try: + from typing import Protocol + PEP544_PROTOCOL_AVAILABLE = True + assert type(Protocol) != int # inserted to quiesce flake8 (F401) !!! +except Exception: + PEP544_PROTOCOL_AVAILABLE = False diff --git a/dpath/segments.py b/dpath/segments.py index c3c9846..f56acd7 100644 --- a/dpath/segments.py +++ b/dpath/segments.py @@ -1,10 +1,12 @@ from copy import deepcopy from fnmatch import fnmatchcase +from re import Pattern from typing import Sequence, Tuple, Iterator, Any, Union, Optional, MutableMapping, MutableSequence from dpath import options from dpath.exceptions import InvalidGlob, InvalidKeyName, PathNotFound from dpath.types import PathSegment, Creator, Hints, Glob, Path, SymmetricInt +from dpath.types import StringMatcher_astuple def make_walkable(node) -> Iterator[Tuple[PathSegment, Any]]: @@ -36,7 +38,7 @@ def leaf(thing): """ Return True if thing is a leaf, otherwise False. """ - leaves = (bytes, str, int, float, bool, type(None)) + leaves = (bytes, str, int, float, bool, type(None), Pattern) return isinstance(thing, leaves) @@ -182,9 +184,13 @@ def match(segments: Path, glob: Glob): or more star segments and the type will be coerced to match that of the segment. - A segment is considered to match a glob if the function - fnmatch.fnmatchcase returns True. If fnmatchcase returns False or - throws an exception the result will be False. + A segment is considered to match a glob when either: + - the glob is a String : the function fnmatch.fnmatchcase returns True. + If fnmatchcase returns False or throws an exception the result will be False. + - or, the glob is a re.Pattern (result of re.compile) and re.Pattern.match returns + a match + - or, the glob is a generalized match object (duck typed if available, derivative + of class Basic_StringMatcher (always available)), and the method match return is not 'None'. match(segments, glob) -> bool """ @@ -234,6 +240,7 @@ def match(segments: Path, glob: Glob): # If search path segment (s) is an int then assume currently evaluated index (g) might be a sequence # index as well. Try converting it to an int. if isinstance(s, int) and s == int(g): + continue except: # Will reach this point if g can't be converted to an int (e.g. when g is a RegEx pattern). @@ -241,10 +248,13 @@ def match(segments: Path, glob: Glob): s = str(s) try: - # Let's see if the glob matches. We will turn any kind of - # exception while attempting to match into a False for the - # match. - if not fnmatchcase(s, g): + # Let's see if the glob, regular expression or the generalized match object matches. + # We will turn any kind of exception while attempting to match into a False for the match. + if isinstance(g, StringMatcher_astuple): + mobj = g.match(s) + if mobj is None: + return False + elif not fnmatchcase(s, g): return False except: return False diff --git a/dpath/types.py b/dpath/types.py index c4a4a56..89d7471 100644 --- a/dpath/types.py +++ b/dpath/types.py @@ -1,5 +1,17 @@ from enum import IntFlag, auto from typing import Union, Any, Callable, Sequence, Tuple, List, Optional, MutableMapping +from re import Pattern +from abc import ABC +from dpath.options import PEP544_PROTOCOL_AVAILABLE + +if PEP544_PROTOCOL_AVAILABLE: + try: + # Use PEP544 for Duck Typing style generalized match objects + # Requires Python 3.8 + from typing import Protocol, runtime_checkable + from abc import abstractmethod + except Exception: + PEP544_PROTOCOL_AVAILABLE = False class SymmetricInt(int): @@ -46,7 +58,7 @@ class MergeType(IntFlag): replaces the destination in this situation.""" -PathSegment = Union[int, str, bytes] +PathSegment = Union[int, str, bytes, Pattern] """Type alias for dict path segments where integers are explicitly casted.""" Filter = Callable[[Any], bool] @@ -54,10 +66,73 @@ class MergeType(IntFlag): (Any) -> bool""" -Glob = Union[str, Sequence[str]] -"""Type alias for glob parameters.""" -Path = Union[str, Sequence[PathSegment]] +class Basic_StringMatcher (ABC): + """ Base class to be used when typing.Protocol is not available, or if the user so chooses. + (always supported). + In this case, a derived class defining the method match can be used to match path components. + (see examples) + """ + + def __init__(self): + raise RuntimeError("This is a pseudo abstract class") + + def match(self, str): + """ This must be provided by the user derived class to define a custom matcher + Args: + str ( str): the string to be matched + Returns: + None: the string does not match + otherwise: the match is accepted, in particular False is not a rejection. + """ + pass + + +if PEP544_PROTOCOL_AVAILABLE: + # Introduced in Python 3.8 + + @runtime_checkable + class Duck_StringMatcher(Protocol): + """ Permits match component matching using duck typing (see examples): + The user must provide and object that defines the match method to + implement the generalized matcher. + + Uses PEP 544: Protocols: Structural subtyping (static duck typing) + to define requirements for a string matcher that can be used in + an extended glob. + + Requirement: + - match(str) -> Optional (Object) + """ + @abstractmethod + def match(self, str) -> Optional[object]: + """ Requirement for match function, must return None if matching + rejected. False is not a rejection ! + """ + pass + + StringMatcher = Union[Pattern, Duck_StringMatcher, Basic_StringMatcher] + + # for use with isinstance + StringMatcher_astuple = (Pattern, Basic_StringMatcher, Duck_StringMatcher) +else: + + Duck_StringMatcher = Basic_StringMatcher + + StringMatcher = Union[Pattern, Basic_StringMatcher] + """ Either a re.Pattern or a type that satisfies duck typing requirements + for matching strings + """ + + # For use with isinstance. Apparently, isinstance ability to deal with Union + # is not available before Python 3.10, see https://bugs.python.org/issue44529 + # and https://www.python.org/dev/peps/pep-0604/#isinstance-and-issubclass + StringMatcher_astuple = (Pattern, Basic_StringMatcher) + +"""Type alias for glob parameters, allows re.Pattern and generalized matchers""" +Glob = Union[str, Pattern, Sequence[Union[str, Pattern, StringMatcher]]] + +Path = Union[str, Pattern, Sequence[PathSegment]] """Type alias for path parameters.""" Hints = Sequence[Tuple[PathSegment, type]] diff --git a/tests/regexpTestLib.py b/tests/regexpTestLib.py new file mode 100644 index 0000000..0778938 --- /dev/null +++ b/tests/regexpTestLib.py @@ -0,0 +1,179 @@ +# -*- coding: utf-8 -*- +# -*- mode: Python -*- +# +# (C) Alain Lichnewsky, 2023 +# +# These classes are used to simplify testing code in test_regexp_exts_simple.py. +# +# They allow: +# 1) to iterate over a sequence of test-cases specifying: +# a) a glob expression describing a list of paths +# b) a list of expected results +# 2) apply a function on each test-case +# 3) check that all outputs are withing the specified results and that all +# expected results have been produced at least once +# +# The currently implemented classes: do not check the order of the output results, +# or the multiplicity (beyond "at least once") +# +# Examples are shown in file test_regexp_ext_simple.py. +# ------------------------------------------------------------------------ + +import sys + +from typing import Sequence, Union, Dict +from pprint import PrettyPrinter + +# single point of parametrization of output stream +_print_file = sys.stderr + + +def show(*posArgs, **keyArgs): + print(*posArgs, file=_print_file, **keyArgs) + + +class Loop: + """ Given a dict and a specification table (containing a path/glob specification + and a list of expected result), apply a function to each spec and verify the + result wrt. the expected result. + + The run method checks that all results are in the expected list and that each + expected result has been produced once. No consideration is given to multiplicity and order. + """ + def __init__(self, data: Union[Sequence, Dict], specs: Sequence): + """ Defines the data dict/sequence to which functions are applied, and + a sequence of test cases specified with tuples of an input and a sequence of outputs. + + Args: + data (Union[Sequence, Dict]): the dict to which dpath functions are applied + specs (Sequence): Each entry is a tuple: (test specification, output) + """ + self.data = data + self.specs = specs + self.verbose = False + self.indent = 12 + self.pretty = PrettyPrinter(indent=self.indent, width=120) + self.pp = lambda x: x + + def setVerbose(self, v=True): + """set the verbosity level, if true all tests cases and results are listed + + Args: + v (bool, optional):Defaults to True + + Returns: self, for chaining methods + """ + self.verbose = v + return self + + def setPrettyPrint(self): + """Set pretty printing mode + + Returns: self, for chaining methods + """ + self.pp = self._pretty + return self + + def _pretty(self, x): + """Internal method for returning : + - if PrettyPrint is set: a pretty printed/indented result + - otherwise : the unchanged input + + Args: + x (Any): object which can be processed by Python's pretty printer + + Returns: a pretty string + """ + def do_NL(x): + if "\n" in x: + return "\n" + " " * self.indent + x + else: + return x + + return do_NL(self.pretty.pformat(x)) + + def _validate_collect(self, result, expected, found): + """ (internal) Checks that the result produced is in the 'expected' field of the test + specification table. No exception is expected, but the user may have special + result strings to denote exception. (see examples in test_regexp_ext_simple) + + The result is collected for later determination of missing results wrt. expected. + + Args: + result : the result to be tested or an Exception instance + expected : sequence of expected results + found ( set): set used to collect results + """ + if result is None: + assert expected is None + elif expected is None: + show(f"Error: Expected result: None, result={result}") + assert result is None + else: + # this simplifies specs tables when a single output is expected + if isinstance(expected, (dict, bool, str)): + expected = (expected,) + assert result in expected + found.append(result) + + def _validate_collection(self, expected, found, spec, specCount): + """ (internal) Checks that the found sequence covers all expected values. + Args: + expected (Sequence): expected results + found ( Set): observed results + spec (Sequence): dpath parameter (Glob) + specCount (int): position in specification table, printed to facilitate identification + of diagnostics. + """ + if expected is not None: + if isinstance(expected, (dict, bool, str)): + expected = (expected,) + + # compute difference between found and expected + diff = [x for x in expected if (x not in found)] + if len(found) == 0: + found = "None" + + # tell the user + if len(diff) != 0: + if not self.verbose: + show(f"\t{specCount:2} spec:{spec}") + show(f"Error\t(Sets) Found:{self.pp(found)}\n\tExpected:{self.pp(expected)}") + show(f"Expected values missing : {self.pp(diff)}") + assert len(diff) == 0 + else: + if len(found) > 0: + if not self.verbose: + show(f"\t{specCount:2} spec:{self.pp(spec)},\n\t expected={self.pp(expected)},\n\tself.pp(found)") + assert len(found) == 0 + + def run(self, func): + """For each tuple in the specification table, apply function func with + arguments (data, specification) and check that the result is valid. + + If verbose set, outputs test case and sequence of results. + + The set of results of function application is collected and analyzed. + + Args: + func (data, spec) -> result: function called with arguments data and test case + specification, returns result to be monitored. + """ + specCount = 0 + for (spec, expected) in self.specs: + specCount += 1 + + if isinstance(expected, str): + expected = (expected,) + + if self.verbose: + show(f"\t{specCount:2} spec:{self.pp(spec)},\t expected={self.pp(expected)}") + + found = [] + for result, value in func(self.data, spec): + if self.verbose: + show(f"\t\tpath:{result}\tvalue:{self.pp(value)}\texpected:{self.pp(expected)}") + + self._validate_collect(result, expected, found) + + self._validate_collection(expected, found, spec, specCount) diff --git a/tests/test_duck_typing.py b/tests/test_duck_typing.py new file mode 100644 index 0000000..1801d0c --- /dev/null +++ b/tests/test_duck_typing.py @@ -0,0 +1,275 @@ +#!/usr / bin / env python3 +# -*- coding: utf-8 -*- +# -*- mode: Python -*- +# +# (C) Alain Lichnewsky, 2022, 2023 +# +# Tests of generalized path segment match using ad-hoc matcher objects +# + +import dpath as DP +import unittest +import re +import sys +import rapidfuzz + + +def _sprint(*args, **kwdargs): + print(*args, **kwdargs, file=sys.stderr) + + +# check that how the options have been set +_sprint(f"At entry in test_path_ext ALLOW_REGEX = {DP.options.ALLOW_REGEX}") + +if not DP.options.ALLOW_REGEX: + _sprint("This test only works with ALLOW_REGEX = True") + DP.options.ALLOW_REGEX = True # enable re.regexp support in path expr. + +if DP.options.PEP544_PROTOCOL_AVAILABLE: + _sprint(f"\tPEP544_PROTOCOL_AVAILABLE={DP.options.PEP544_PROTOCOL_AVAILABLE}") + _sprint("\tWhen True, this permits duck typing, only available since Python3.8") + + +class TestLangProc(unittest.TestCase): + """This tests the language processor support of 'typing' module (Protocol, + runtime_checkable), and the abilty to test types """ + + def test1(self): + if not DP.options.PEP544_PROTOCOL_AVAILABLE: + _sprint("Test1 PEPS544 Protocol not available") + + class Anagram(): + def __init__(self, s): + self.ref = "".join(sorted(s)) + + def match(self, st): + retval = True if "".join(sorted(st)) == self.ref else None + return retval + + ana = Anagram("sire") + test_cases = [ + (ana, Anagram, True), + ("ana", Anagram, False), + ("ana", DP.types.StringMatcher_astuple, False), + (re.compile("ana"), DP.types.StringMatcher_astuple, True), + (re.compile("ana"), DP.types.Basic_StringMatcher, False)] + + if DP.options.PEP544_PROTOCOL_AVAILABLE: + # these require Python > 3.7 + test_cases.extend([ + (ana, DP.types.Duck_StringMatcher, True), + (ana, DP.types.StringMatcher_astuple, True), + ("ana", DP.types.Duck_StringMatcher, False), + (re.compile("ana"), DP.types.Duck_StringMatcher, True)]) + else: + # This executes when Python < 3.7, but rejects object ana + # since it is neither a re.Pattern, nor derived from BasicStringMatcher. + # Shows that object ana requires non available duck typing! + test_cases.extend([(ana, DP.types.StringMatcher_astuple, False)]) + + success = True + for (o, c, e) in test_cases: + r = isinstance(o, c) + exp = "OK" if (e == r) else "Unexpected" + _sprint(f"isinstance({o},{c}) returns {r} expected {e}: {exp}") + if (e != r): + success = False + assert success + _sprint("Performed TestLangProc.test1") + + +class TestBasics(unittest.TestCase): + """ This tests mixing lists with dicts, inspired by Issue #178 + """ + + tble = [{'info': {'label': 'a', + 'placeholder': 'A', + 'value': 'some text'} + }, + {'info': {'label': 'b', + 'placeholder': 'B', + 'value': ''}}, + {'info': {'label': 'c', + 'placeholder': 'C', + 'value': ''}}, + {2: [{'a': "A", 'b': "B"}, + {'aa': "AA", 'bb': "BB"}]} + ] + + mydict = {"first": [{'info': {'label': 'a', + 'placeholder': 'A', + 'value': 'some text'} + }, + {'info': {'label': 'b', + 'placeholder': 'B', + 'value': ''}}, + {'info': {'label': 'c', + 'placeholder': 'C', + 'value': ''}}], + 2: [{'a': "A", 'b': "B"}, + {'aa': "AA", 'bb': "BB"} + ] + } + + def testDuckStringMatcher(self): + """ Test types + """ + _sprint(f"PEP544_PROTOCOL_AVAILABLE={DP.options.PEP544_PROTOCOL_AVAILABLE}") + if not DP.options.PEP544_PROTOCOL_AVAILABLE: + _sprint("skipping TestBasics.testDuckStringMatcher, not available for Python <= 3.7") + return + _sprint("Entered TestBasics.testDuckStringMatcher") + + str1 = "a string" + if isinstance(str1, DP.types.Duck_StringMatcher): + raise RuntimeError("A string should not be accepted as a StringMatcher") + + rex = re.compile(r"\d+") + assert isinstance(rex, DP.types.Duck_StringMatcher) + + class Anagram(): + def __init__(self, s): + self.ref = "".join(sorted(s)) + + def match(self, st): + retval = True if "".join(sorted(st)) == self.ref else None + return retval + + class Weird(): + def __init__(self, s): + self.s = s + + class Bad(): + pass + + ana = Anagram("tryit") + catAna = isinstance(ana, DP.types.Duck_StringMatcher) + _sprint(f"Anagram is instance Duck_StringMatcher={catAna}") + assert catAna + + catWeird = isinstance(Weird("oh"), DP.types.Duck_StringMatcher) + _sprint(f"Weird is instance Duck_StringMatcher={catWeird}") + assert not catWeird + + catBad = isinstance(Bad(), DP.types.Duck_StringMatcher) + _sprint(f"Bad is instance Duck_StringMatcher={catBad}") + assert not catBad + + def test1(self): + """ Test1: reference, test extended glob with embedded re.regex + """ + _sprint("Entered test1") + + mydict = TestBasics.mydict + + r1 = DP.search(mydict, '**/placeholder') + r2 = DP.search(mydict, '**/{plac\\S+r$}') + assert r1 == r2 + _sprint("TestBasics.test1 : PASSED") + + def test2(self): + """ Test2: using a StringMatcher duck typed class + """ + if not DP.options.PEP544_PROTOCOL_AVAILABLE: + _sprint("TestBasics.test2 disabled, cannot use PEPS544 Protocol with this Python") + return + + _sprint("Entered test2") + + class MySM(): + def match(self, st): + return st == "placeholder" + + mydict = TestBasics.mydict + + r1 = DP.search(mydict, '**/placeholder', afilter=lambda x: 'C' == x) + r2 = DP.search(mydict, ['**', MySM()], afilter=lambda x: 'C' == x) + r3 = DP.search(mydict, '**/{plac\\S+r$}', afilter=lambda x: 'C' == x) + + assert r1 == r2 + assert r1 == r3 + _sprint("TestBasics.test2 : PASSED") + + def test3(self): + """ Test3: using a StringMatcher (duck typed or derivative) class, according to + Python version's ability. + """ + # This test corresponds to example in README.rst + + _sprint("Entered test3 (anagram)") + _sprint(f"PEP544_PROTOCOL_AVAILABLE={DP.options.PEP544_PROTOCOL_AVAILABLE}") + + if DP.options.PEP544_PROTOCOL_AVAILABLE: + class Anagram(): + def __init__(self, s): + self.ref = "".join(sorted(s)) + + def match(self, st): + retval = True if "".join(sorted(st)) == self.ref else None + return retval + else: + class Anagram(DP.types.Basic_StringMatcher): + def __init__(self, s): + self.ref = "".join(sorted(s)) + + def match(self, st): + retval = True if "".join(sorted(st)) == self.ref else None + return retval + + mydict = TestBasics.mydict + + r1 = DP.search(mydict, "**/label") + r2 = DP.search(mydict, ['**', Anagram("bella")]) + _sprint(f"Explicit {r1}") + _sprint(f"Anagram {r2}") + expected = {'first': [{'info': {'label': 'a'}}, + {'info': {'label': 'b'}}, + {'info': {'label': 'c'}}]} + assert r1 == r2 + assert r1 == expected + _sprint("TestBasics.test3 : PASSED") + + def test4(self): + """ Test4: using a StringMatcher (duck typed or derivative) class (with RapidFuzz pkg + https://github.com/maxbachmann/RapidFuzz) + """ + # This test corresponds to example in README.rst + + _sprint("Entered test4") + + if DP.options.PEP544_PROTOCOL_AVAILABLE: + class Approx(): + def __init__(self, s, quality=90): + self.ref = s + self.quality = quality + + def match(self, st): + fratio = rapidfuzz.fuzz.ratio(st, self.ref) + retval = True if fratio > self.quality else None + return retval + else: + class Approx(DP.types.Basic_StringMatcher): + def __init__(self, s, quality=90): + self.ref = s + self.quality = quality + + def match(self, st): + fratio = rapidfuzz.fuzz.ratio(st, self.ref) + retval = True if fratio > self.quality else None + return retval + + mydict = TestBasics.mydict + + r1 = DP.search(mydict, "**/placeholder") + r2 = DP.search(mydict, ['**', Approx("placecolder")]) + r3 = DP.search(mydict, ['**', Approx("acecolder", 75)]) + _sprint(f"Explicit {r1}") + _sprint(f"Approx {r2}") + _sprint(f"Approx {r3}") + expected = {'first': [{'info': {'placeholder': 'A'}}, + {'info': {'placeholder': 'B'}}, + {'info': {'placeholder': 'C'}}]} + assert r1 == r2 + assert r1 == r3 + assert r1 == expected + _sprint("TestBasics.test4 : PASSED") diff --git a/tests/test_regexp.py b/tests/test_regexp.py new file mode 100644 index 0000000..d161685 --- /dev/null +++ b/tests/test_regexp.py @@ -0,0 +1,334 @@ +# -*- coding: utf-8 -*- +# -*- mode: Python -*- +# +# (C) Alain Lichnewsky, 2022, 2023 +# +# Test support of extended specs with re.regex in many dpath functions +# +import sys +from os import environ +import re + +from copy import copy + +import unittest +import dpath as DP +from dpath.exceptions import InvalidRegex + +# reusable classes to perform tests on lists of (case, expected result) +import tests.regexpTestLib as T + +# Allow for command line/environment setup of verbose output +# The default is not set. +_verbosity = "VERBOSE_TEST" in environ and environ["VERBOSE_TEST"] == "TRUE" + + +class SampleDicts: + + d1 = { + "a001": { + "b2": { + "c1.2": { + "d.dd": 0, + "e.ee": 1, + "f.f0": 2, + }, + }, + }, + } + + d2 = { + "Name": "bridge", + "Id": "333d22b3724", + "Created": "2022-12-08T09:02:33.360812052+01:00", + "Driver": "bridge", + "EnableIPv6": False, + "IPAM": { + "Driver": "default", + "Options": None, + "Config": + { + "Subnet": "172.17.0.0/16", + "Gateway": "172.17.0.1" + }}, + "ConfigFrom": { + "Network": "172.O.0.0/32"}, + "Containers": { + "199c590e8f13477": { + "Name": "al_dpath", + "MacAddress": "02:34:0a:11:10:22", + "IPv4Address": "172.17.0.2/16", + "IPv6Address": ""}}, + "Labels": {}} + + +specs1_A = (([re.compile(".*")], "a001"), + ([re.compile("[a-z]+$")], None), + (["*", re.compile(".*")], "a001/b2"), + (["*", "*", re.compile(".*")], "a001/b2/c1.2"), + (["*", re.compile("[a-z]+\\d+$")], "a001/b2"), + (["*", re.compile("[a-z]+[.][a-z]+$")], None), + (["**", re.compile(".*")], ("a001", "a001/b2", "a001/b2/c1.2", "a001/b2/c1.2/d.dd", + "a001/b2/c1.2/e.ee", "a001/b2/c1.2/f.f0")), + (["**", re.compile("[a-z]+\\d+$")], ("a001", "a001/b2")), + (["**", re.compile("[a-z]+[.][a-z]+$")], ('a001/b2/c1.2/d.dd', 'a001/b2/c1.2/e.ee'))) + +specs1_B = (([re.compile(".*")], True), + ([re.compile("[a-z]+$")], False), + (["*", re.compile(".*")], False), + (["*", "*", re.compile(".*")], False), + (["*", re.compile("[a-z]+\\d+$")], False), + (["*", re.compile("[a-z]+[.][a-z]+$")], False), + (["**", re.compile(".*")], True), + (["**", re.compile("[a-z]+\\d+$")], True), + (["**", re.compile("[a-z]+[.][a-z]+$")], False)) + +specs1_C = (([re.compile(".*")], set()), + ([re.compile("[a-z]+$")], set()), + (["*", re.compile(".*")], set()), + (["*", "*", re.compile(".*")], set()), + (["*", re.compile("[a-z]+\\d+$")], set()), + (["*", re.compile("[a-z]+[.][a-z]+$")], set()), + (["**", re.compile(".*")], set((0, 1, 2))), + (["**", re.compile("[a-z]+\\d+$")], set()), + (["**", re.compile("[a-z]+[.][a-z]+$")], set((0, 1)))) + +specs1_D = (([re.compile(".*")], None), + ([re.compile("[a-z]+$")], None), + (["*", re.compile(".*")], None), + (["*", "*", re.compile(".*")], None), + (["*", re.compile("[a-z]+\\d+$")], None), + (["*", re.compile("[a-z]+[.][a-z]+$")], None), + (["**", re.compile(".*")], ("a001/b2/c1.2/d.dd", + "a001/b2/c1.2/e.ee", "a001/b2/c1.2/f.f0")), + (["**", re.compile("[a-z]+\\d+$")], None), + (["**", re.compile("[a-z]+[.][a-z]+$")], ('a001/b2/c1.2/d.dd', 'a001/b2/c1.2/e.ee'))) + +specs1_View = (([re.compile(".*")], ({'a001': {'b2': {'c1.2': {'d.dd': 0, 'e.ee': 1, 'f.f0': 2}}}},)), + ([re.compile("[a-z]+$")], ({},)), + (["*", re.compile(".*")], ({'a001': {'b2': {'c1.2': {'d.dd': 0, 'e.ee': 1, 'f.f0': 2}}}},)), + (["*", "*", re.compile(".*")], ({'a001': {'b2': {'c1.2': {'d.dd': 0, 'e.ee': 1, 'f.f0': 2}}}},)), + (["*", re.compile("[a-z]+\\d+$")], ({'a001': {'b2': {'c1.2': {'d.dd': 0, 'e.ee': 1, 'f.f0': 2}}}},)), + (["*", re.compile("[a-z]+[.][a-z]+$")], ({},)), + (["**", re.compile(".*")], ({'a001': {'b2': {'c1.2': {'d.dd': 0, 'e.ee': 1, 'f.f0': 2}}}},)), ) + +specs1_Get = (([re.compile(".*")], {'b2': {'c1.2': {'d.dd': 0, 'e.ee': 1, 'f.f0': 2}}}), + ([re.compile("[a-z]+$")], (('*NONE*',),)), + (["*", re.compile(".*")], {'c1.2': {'d.dd': 0, 'e.ee': 1, 'f.f0': 2}}), + (["*", "*", re.compile(".*")], {'d.dd': 0, 'e.ee': 1, 'f.f0': 2}), + (["*", re.compile("[a-z]+\\d+$")], {'c1.2': {'d.dd': 0, 'e.ee': 1, 'f.f0': 2}}), + (["*", re.compile("[a-z]+[.][a-z]+$")], (('*NONE*',),)), + (["**", re.compile(".*")], ("Exception",)), + (["**", re.compile("[a-z]+\\d+$")], ("Exception",)), + (["**", re.compile("[a-z]+[.][a-z]+$")], ("Exception",)),) + +specs2_Search = ((["*", re.compile("[A-Z][a-z\\d]*$")], + ("IPAM/Driver", "IPAM/Options", "IPAM/Config", "ConfigFrom/Network")), + (["**", re.compile("[A-Z][a-z\\d]*$")], + ("Name", "Id", "Created", "Driver", + "Containers", "Labels", "IPAM/Driver", "IPAM/Options", + "IPAM/Config", "IPAM/Config/Subnet", "IPAM/Config/Gateway", + "ConfigFrom/Network", "Containers/199c590e8f13477/Name")), + (["**", re.compile("[A-Z][A-Za-z\\d]*Address$")], + ("Containers/199c590e8f13477/MacAddress", "Containers/199c590e8f13477/IPv4Address", + "Containers/199c590e8f13477/IPv6Address")), + (["**", re.compile("[A-Za-z]+\\d+$")], ("EnableIPv6",)), + (["**", re.compile("\\d+[.]\\d+")], None), + + # repeated intentionally using raw strings rather than '\\' escapes + + (["*", re.compile(r"[A-Z][a-z\d]*$")], + ("IPAM/Driver", "IPAM/Options", "IPAM/Config", "ConfigFrom/Network")), + (["**", re.compile(r"[A-Za-z]+\d+$")], ("EnableIPv6",)), + (["**", re.compile(r"\d+[.]\d+")], None)) + +specs2_SearchPar = (("**/{^[A-Za-z]{2}$}", ("Id",)), + ("{^[A-Za-z]{2}$}", ("Id",)), + (re.compile("^[A-Za-z]{2}$"), ("Id",)), + ("*/{[A-Z][A-Za-z\\d]*$}", ("IPAM/Driver", "IPAM/Options", "IPAM/Config", "ConfigFrom/Network")), + ("{.*}/{[A-Z][A-Za-z\\d]*$}", ("IPAM/Driver", "IPAM/Options", "IPAM/Config", "ConfigFrom/Network")), + ("**/{[A-Z][A-Za-z\\d]*\\d$}", ("EnableIPv6",)), + ("**/{[A-Z][A-Za-z\\d]*Address$}", ("Containers/199c590e8f13477/MacAddress", + "Containers/199c590e8f13477/IPv4Address", + "Containers/199c590e8f13477/IPv6Address")), + + # repeated intentionally using raw strings rather than '\\' escapes + + (r"**/{[A-Z][A-Za-z\d]*\d$}", ("EnableIPv6",)),) + + +# one class per function to be tested, postpone tests that need +# DP.options.ALLOW_REGEX == True + +class TestSearchAlways(): + def setUp(self): + if "ALLOW_REGEX" in environ: + DP.options.ALLOW_REGEX = True + + def test1(self): + T.show(f"In {self.test1}") + tests = T.Loop(SampleDicts.d1, specs1_A) + + def fn(_data, _spec): + return DP.search(_data, _spec, yielded=True) + + tests.setVerbose(_verbosity).run(fn) + + def test2(self): + T.show(f"In {self.test2}") + tests = T.Loop(SampleDicts.d1, specs1_D) + + def afilter(x): + if isinstance(x, int): + return True + return False + + def fn(_data, _spec): + return DP.search(_data, _spec, yielded=True, afilter=afilter) + + tests.setVerbose(_verbosity).run(fn) + + def test3(self): + T.show(f"In {self.test3}") + tests = T.Loop(SampleDicts.d2, specs2_Search) + + def fn(_data, _spec): + return DP.search(_data, _spec, yielded=True) + + tests.setVerbose(_verbosity).setPrettyPrint().run(fn) + + +class TestGet(): + def setUp(self): + if "ALLOW_REGEX" in environ: + DP.options.ALLOW_REGEX = True + + def test1(self): + T.show(f"In {self.test1}") + tests = T.Loop(SampleDicts.d1, specs1_Get) + + def fn(_data, _spec): + try: + return ((DP.get(_data, _spec, default=("*NONE*",)), None),) + except InvalidRegex as err: + T.show(f"Exception: {err}") + return (("InvalidRegex", None), ) + except Exception as err: + T.show(f"Exception: {err}") + return (("Exception", None), ) + + tests.setVerbose(_verbosity).run(fn) + + +class TestView(): + def setUp(self): + if "ALLOW_REGEX" in environ: + DP.options.ALLOW_REGEX = True + + def test1(self): + T.show(f"In {self.test1}") + tests = T.Loop(SampleDicts.d1, specs1_View) + + def fn(_data, _spec): + r = DP.segments.view(_data, _spec) + return ((r, None), ) + + tests.setVerbose(_verbosity).run(fn) + + +class TestMatch(): + def setUp(self): + if "ALLOW_REGEX" in environ: + DP.options.ALLOW_REGEX = True + + def test1(self): + T.show(f"In {self.test1}") + tests = T.Loop(SampleDicts.d1, specs1_B) + + def fn(_data, _spec): + r = DP.segments.match(_data, _spec) + return ((r, None), ) + + tests.setVerbose(_verbosity).run(fn) + + +class TestSearch(): + def setUp(self): + # these tests involve regex in parenthesized strings + if "ALLOW_REGEX" in environ: + DP.options.ALLOW_REGEX = True + + if DP.options.ALLOW_REGEX is not True: + DP.options.ALLOW_REGEX = True + T.show("ALLOW_REGEX == True required for this test: forced") + + def test1(self): + T.show(f"In {self.test1}") + tests = T.Loop(SampleDicts.d2, specs2_SearchPar) + + def fn(_data, _spec): + return DP.search(_data, _spec, yielded=True) + + tests.setVerbose(_verbosity).setPrettyPrint().run(fn) + + def test2(self): + T.show(f"In {self.test1}") + specs = (("/**/{zz)bad}", ("InvalidRegex",)), + ("{zz)bad}/yyy", ("InvalidRegex",)), + ("**/{zz)bad}/yyy", ("InvalidRegex",)), + ("**/{zz)bad}/yyy/.*", ("InvalidRegex",)), + (123, ("Exception",))) + + tests = T.Loop(SampleDicts.d2, specs) + + def fn(_data, _spec): + try: + return DP.search(_data, _spec, yielded=True) + except InvalidRegex as err: + if tests.verbose: + T.show(f"\tErrMsg: {err}") + return (("InvalidRegex", None),) + except Exception as err: + if tests.verbose: + T.show(f"\tErrMsg: {err}") + return (("Exception", None),) + + tests.setVerbose(_verbosity).setPrettyPrint().run(fn) + + +class TestDelete(unittest.TestCase): + def setUp(self): + # these tests involve regex in parenthesized strings + if "ALLOW_REGEX" in environ: + DP.options.ALLOW_REGEX = True + + if DP.options.ALLOW_REGEX is not True: + DP.options.ALLOW_REGEX = True + T.show("ALLOW_REGEX == True required for this test: forced") + + def test1(self): + T.show(f"In {self.test1}") + dict1 = { + "a": { + "b": 0, + "12": 0, + }, + "a0": { + "b": 0, + }, + } + + specs = (re.compile("[a-z]+$"), re.compile("[a-z]+\\d+$"), + "{[a-z]+\\d+$}") + i = 0 + for spec in specs: + dict = copy(dict1) + print(f"spec={spec}") + print(f"Before deletion dict={dict}", file=sys.stderr) + DP.delete(dict, [spec]) + print(f"After deletion dict={dict}", file=sys.stderr) + if i == 0: + assert (dict == {"a0": {"b": 0, }, }) + else: + assert (dict == {"a": {"b": 0, "12": 0, }}) + i += 1 diff --git a/tox-set-rex.ini b/tox-set-rex.ini new file mode 100644 index 0000000..bee628e --- /dev/null +++ b/tox-set-rex.ini @@ -0,0 +1,32 @@ +# ******** THIS VERSION ENABLES ALLOW_REGEX +# ******** + +# Tox (http://tox.testrun.org/) is a tool for running tests +# in multiple virtualenvs. This configuration file will run the +# test suite on all supported python versions. To use it, "pip install tox" +# and then run "tox" from this directory. + + +[flake8] +ignore = E501,E722,W503 + +[tox] +envlist = pypy37, pypy39, py38, py311 + +[gh-actions] +python = + pypy-3.7: pypy37 + pypy-3.9: pypy39 + 3.8: py38 + 3.11: py311 + +[testenv] +deps = + hypothesis + mock + nose2 + rapidfuzz +commands = nose2 {posargs} +setenv = + ALLOW_REGEX = TRUE + diff --git a/tox.ini b/tox.ini index d613837..cef184e 100644 --- a/tox.ini +++ b/tox.ini @@ -4,21 +4,22 @@ # and then run "tox" from this directory. [flake8] -ignore = E501,E722 +ignore = E501,E722,W503 [tox] -envlist = pypy37, py38, py39, py310 +envlist = pypy37, pypy39, py38, py311 [gh-actions] python = pypy-3.7: pypy37 + pypy-3.9: pypy39 3.8: py38 - 3.9: py39 - 3.10: py310 + 3.11: py311 [testenv] deps = hypothesis mock nose2 + rapidfuzz commands = nose2 {posargs}