diff --git a/docs/docs/api/rankers/lexicon_entry.md b/docs/docs/api/rankers/lexicon_entry.md
index d42ff46..82479e5 100644
--- a/docs/docs/api/rankers/lexicon_entry.md
+++ b/docs/docs/api/rankers/lexicon_entry.md
@@ -496,3 +496,28 @@ assert ((expected_ranks, expected_lowest_ranked_matches)
== ranker(token_ranking_data))
```
+
+
+### \_\_eq\_\_
+
+```python
+class ContextualRuleBasedRanker(LexiconEntryRanker):
+ | ...
+ | def __eq__(other: object) -> bool
+```
+
+Given another object to compare too it will return `True` if the other
+object is the same class and was initialised using with the same
+`maximum_n_gram_length` and `maximum_number_wildcards` values.
+
+
Parameters¶
+
+
+- __other__ : `object`
+ The object to compare too.
+
+Returns¶
+
+
+- `True`
+
diff --git a/pymusas/rankers/lexicon_entry.py b/pymusas/rankers/lexicon_entry.py
index 3676d1b..206b61e 100644
--- a/pymusas/rankers/lexicon_entry.py
+++ b/pymusas/rankers/lexicon_entry.py
@@ -96,6 +96,10 @@ def from_bytes(bytes_data: bytes) -> "LexiconEntryRanker":
'''
... # pragma: no cover
+ @abstractmethod
+ def __eq__(self, other: object) -> bool:
+ ... # pragma: no cover
+
class ContextualRuleBasedRanker(LexiconEntryRanker):
'''
@@ -529,3 +533,29 @@ def __call__(self, token_ranking_data: List[List[RankingMetaData]]
rankings, None)
return (rankings, global_lowest_rank_indexes)
+
+ def __eq__(self, other: object) -> bool:
+ '''
+ Given another object to compare too it will return `True` if the other
+ object is the same class and was initialised using with the same
+ `maximum_n_gram_length` and `maximum_number_wildcards` values.
+
+ # Parameters
+
+ other : `object`
+ The object to compare too.
+
+ # Returns
+
+ `True`
+ '''
+ if not isinstance(other, ContextualRuleBasedRanker):
+ return False
+
+ if self._maximum_n_gram_length != other._maximum_n_gram_length:
+ return False
+
+ if self._maximum_number_wildcards != other._maximum_number_wildcards:
+ return False
+
+ return True
diff --git a/tests/rankers/test_ranker_lexicon_entry.py b/tests/rankers/test_ranker_lexicon_entry.py
index faf9cdd..2a35e71 100644
--- a/tests/rankers/test_ranker_lexicon_entry.py
+++ b/tests/rankers/test_ranker_lexicon_entry.py
@@ -31,12 +31,16 @@ def to_bytes(self) -> bytes:
def from_bytes(bytes_data: bytes) -> 'TestRanker':
return TestRanker()
+ def __eq__(self, other: object) -> bool:
+ return True
+
concrete_ranker = TestRanker()
assert ([[0]], [None]) == concrete_ranker([[RANKING_META_DATA]])
assert isinstance(concrete_ranker, LexiconEntryRanker)
assert b'test' == concrete_ranker.to_bytes()
assert isinstance(concrete_ranker.from_bytes(b'test'), TestRanker)
+ assert concrete_ranker == TestRanker()
def test_contextual_rule_based_ranker__init__() -> None:
@@ -66,7 +70,7 @@ def test_contextual_rule_based_ranker__init__() -> None:
assert {1: 2, 2: 1} == ranker.n_gram_ranking_dictionary
-def test_to_from_bytes() -> None:
+def test_contextual_rule_based_ranker_to_from_bytes() -> None:
maximum_n_gram_length = 2
maximum_number_wildcards = 1
ranker = ContextualRuleBasedRanker(maximum_n_gram_length,
@@ -77,6 +81,15 @@ def test_to_from_bytes() -> None:
assert {2: 1, 1: 2} == ranker_from_bytes.n_gram_ranking_dictionary
+def test_contextual_rule_based_ranker__eq__() -> None:
+ ranker = ContextualRuleBasedRanker(1, 2)
+ assert 1 != ranker
+
+ assert ranker != ContextualRuleBasedRanker(1, 1)
+ assert ranker != ContextualRuleBasedRanker(2, 1)
+ assert ranker == ContextualRuleBasedRanker(1, 2)
+
+
def test_contextual_rule_based_ranker__call__() -> None:
ranker = ContextualRuleBasedRanker(2, 1)
assert ([], []) == ranker([])