From 995c7b2bc1b7343e2b81acb59adb65463a2c541b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Feb 2022 08:25:54 -0500 Subject: [PATCH 01/21] Requesting of relations of redacted events is allowed. But should not include edits of the event. --- synapse/rest/client/relations.py | 82 ++++++++++----------- synapse/storage/databases/main/relations.py | 18 +++-- tests/rest/client/test_relations.py | 9 ++- 3 files changed, 53 insertions(+), 56 deletions(-) diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 07fa1cdd4c67..2e6c91e203d3 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -27,7 +27,7 @@ from synapse.http.servlet import RestServlet, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.client._base import client_patterns -from synapse.storage.relations import AggregationPaginationToken, PaginationChunk +from synapse.storage.relations import AggregationPaginationToken from synapse.types import JsonDict, StreamToken if TYPE_CHECKING: @@ -82,28 +82,24 @@ async def on_GET( from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") - if event.internal_metadata.is_redacted(): - # If the event is redacted, return an empty list of relations - pagination_chunk = PaginationChunk(chunk=[]) - else: - # Return the relations - from_token = None - if from_token_str: - from_token = await StreamToken.from_string(self.store, from_token_str) - to_token = None - if to_token_str: - to_token = await StreamToken.from_string(self.store, to_token_str) - - pagination_chunk = await self.store.get_relations_for_event( - event_id=parent_id, - room_id=room_id, - relation_type=relation_type, - event_type=event_type, - limit=limit, - direction=direction, - from_token=from_token, - to_token=to_token, - ) + # Return the relations + from_token = None + if from_token_str: + from_token = await StreamToken.from_string(self.store, from_token_str) + to_token = None + if to_token_str: + to_token = await StreamToken.from_string(self.store, to_token_str) + + pagination_chunk = await self.store.get_relations_for_event( + event=event, + room_id=room_id, + relation_type=relation_type, + event_type=event_type, + limit=limit, + direction=direction, + from_token=from_token, + to_token=to_token, + ) events = await self.store.get_events_as_list( [c["event_id"] for c in pagination_chunk.chunk] @@ -193,27 +189,23 @@ async def on_GET( from_token_str = parse_string(request, "from") to_token_str = parse_string(request, "to") - if event.internal_metadata.is_redacted(): - # If the event is redacted, return an empty list of relations - pagination_chunk = PaginationChunk(chunk=[]) - else: - # Return the relations - from_token = None - if from_token_str: - from_token = AggregationPaginationToken.from_string(from_token_str) - - to_token = None - if to_token_str: - to_token = AggregationPaginationToken.from_string(to_token_str) - - pagination_chunk = await self.store.get_aggregation_groups_for_event( - event_id=parent_id, - room_id=room_id, - event_type=event_type, - limit=limit, - from_token=from_token, - to_token=to_token, - ) + # Return the relations + from_token = None + if from_token_str: + from_token = AggregationPaginationToken.from_string(from_token_str) + + to_token = None + if to_token_str: + to_token = AggregationPaginationToken.from_string(to_token_str) + + pagination_chunk = await self.store.get_aggregation_groups_for_event( + event_id=parent_id, + room_id=room_id, + event_type=event_type, + limit=limit, + from_token=from_token, + to_token=to_token, + ) return 200, await pagination_chunk.to_dict(self.store) @@ -294,7 +286,7 @@ async def on_GET( to_token = await StreamToken.from_string(self.store, to_token_str) result = await self.store.get_relations_for_event( - event_id=parent_id, + event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 36aa1092f602..1705fee2e475 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -94,7 +94,7 @@ def __init__( @cached(tree=True) async def get_relations_for_event( self, - event_id: str, + event: EventBase, room_id: str, relation_type: Optional[str] = None, event_type: Optional[str] = None, @@ -124,7 +124,7 @@ async def get_relations_for_event( """ where_clause = ["relates_to_id = ?", "room_id = ?"] - where_args: List[Union[str, int]] = [event_id, room_id] + where_args: List[Union[str, int]] = [event.event_id, room_id] if relation_type is not None: where_clause.append("relation_type = ?") @@ -157,7 +157,7 @@ async def get_relations_for_event( order = "ASC" sql = """ - SELECT event_id, topological_ordering, stream_ordering + SELECT event_id, relation_type, topological_ordering, stream_ordering FROM event_relations INNER JOIN events USING (event_id) WHERE %s @@ -177,10 +177,14 @@ def _get_recent_references_for_event_txn( last_topo_id = None last_stream_id = None events = [] + is_redacted = event.internal_metadata.is_redacted() for row in txn: - events.append({"event_id": row[0]}) - last_topo_id = row[1] - last_stream_id = row[2] + # Do not include edits for redacted events as they leak event + # content. + if not is_redacted or row[1] != RelationTypes.REPLACE: + events.append({"event_id": row[0]}) + last_topo_id = row[2] + last_stream_id = row[3] # If there are more events, generate the next pagination key. next_token = None @@ -776,7 +780,7 @@ async def _get_bundled_aggregation_for_event( ) references = await self.get_relations_for_event( - event_id, room_id, RelationTypes.REFERENCE, direction="f" + event, room_id, RelationTypes.REFERENCE, direction="f" ) if references.chunk: aggregations.references = await references.to_dict(cast("DataStore", self)) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index a40a5de3991c..695e5dfc3c05 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1475,12 +1475,13 @@ def test_redact_parent_edit(self) -> None: self.assertEqual(relations, {}) def test_redact_parent_annotation(self) -> None: - """Test that annotations of an event are redacted when the original event + """Test that annotations of an event are viewable when the original event is redacted. """ # Add a relation channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") self.assertEqual(200, channel.code, channel.json_body) + related_event_id = channel.json_body["event_id"] # The relations should exist. event_ids, relations = self._make_relation_requests() @@ -1494,11 +1495,11 @@ def test_redact_parent_annotation(self) -> None: # Redact the original event. self._redact(self.parent_id) - # The relations are not returned. + # The relations are returned. event_ids, relations = self._make_relation_requests() - self.assertEqual(event_ids, []) + self.assertEquals(event_ids, [related_event_id]) self.assertEqual(relations, {}) # There's nothing to aggregate. chunk = self._get_aggregations() - self.assertEqual(chunk, []) + self.assertEqual(chunk, [{"count": 1, "key": "👍", "type": "m.reaction"}]) From 1b182a0e7e5a120c61b7d210a88ab441223415e5 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 1 Mar 2022 09:18:57 -0500 Subject: [PATCH 02/21] Refactor to keep a map of input event ID -> event. --- synapse/storage/databases/main/relations.py | 25 +++++++-------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 1705fee2e475..eec5877fedd1 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -801,41 +801,32 @@ async def get_bundled_aggregations( A map of event ID to the bundled aggregation for the event. Not all events may have bundled aggregations in the results. """ - # The already processed event IDs. Tracked separately from the result - # since the result omits events which do not have bundled aggregations. - seen_event_ids = set() - + # De-duplicate events by ID to handle the same event requested multiple times. + # # State events and redacted events do not get bundled aggregations. - events = [ - event + events_by_id = { + event.event_id: event for event in events if not event.is_state() and not event.internal_metadata.is_redacted() - ] + } # event ID -> bundled aggregation in non-serialized form. results: Dict[str, BundledAggregations] = {} # Fetch other relations per event. - for event in events: - # De-duplicate events by ID to handle the same event requested multiple - # times. The caches that _get_bundled_aggregation_for_event use should - # capture this, but best to reduce work. - if event.event_id in seen_event_ids: - continue - seen_event_ids.add(event.event_id) - + for event in events_by_id.values(): event_result = await self._get_bundled_aggregation_for_event(event, user_id) if event_result: results[event.event_id] = event_result # Fetch any edits. - edits = await self._get_applicable_edits(seen_event_ids) + edits = await self._get_applicable_edits(events_by_id.keys()) for event_id, edit in edits.items(): results.setdefault(event_id, BundledAggregations()).replace = edit # Fetch thread summaries. if self._msc3440_enabled: - summaries = await self._get_thread_summaries(seen_event_ids) + summaries = await self._get_thread_summaries(events_by_id.keys()) # Only fetch participated for a limited selection based on what had # summaries. participated = await self._get_threads_participated( From a1fe62a02fd966cc27bcc4e4c973be8dd3f7d0cf Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 1 Mar 2022 09:24:13 -0500 Subject: [PATCH 03/21] Include bundled aggregations for redacted events. But do NOT include edits of redacted events. --- synapse/storage/databases/main/relations.py | 16 ++++++++++------ tests/rest/client/test_relations.py | 5 ++++- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index eec5877fedd1..37fa369b5047 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -803,11 +803,9 @@ async def get_bundled_aggregations( """ # De-duplicate events by ID to handle the same event requested multiple times. # - # State events and redacted events do not get bundled aggregations. + # State events do not get bundled aggregations. events_by_id = { - event.event_id: event - for event in events - if not event.is_state() and not event.internal_metadata.is_redacted() + event.event_id: event for event in events if not event.is_state() } # event ID -> bundled aggregation in non-serialized form. @@ -819,8 +817,14 @@ async def get_bundled_aggregations( if event_result: results[event.event_id] = event_result - # Fetch any edits. - edits = await self._get_applicable_edits(events_by_id.keys()) + # Fetch any edits (but not for redacted events). + edits = await self._get_applicable_edits( + [ + event_id + for event_id, event in events_by_id.items() + if not event.internal_metadata.is_redacted() + ] + ) for event_id, edit in edits.items(): results.setdefault(event_id, BundledAggregations()).replace = edit diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 695e5dfc3c05..32684724db84 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1498,7 +1498,10 @@ def test_redact_parent_annotation(self) -> None: # The relations are returned. event_ids, relations = self._make_relation_requests() self.assertEquals(event_ids, [related_event_id]) - self.assertEqual(relations, {}) + self.assertEquals( + relations["m.annotation"], + {"chunk": [{"type": "m.reaction", "key": "👍", "count": 1}]}, + ) # There's nothing to aggregate. chunk = self._get_aggregations() From 0ed4bac46a5756bceb0bd42e71819a3416e4ea5e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 1 Mar 2022 10:02:00 -0500 Subject: [PATCH 04/21] Add tests for thread relations. --- tests/rest/client/test_relations.py | 31 +++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 32684724db84..f9ae6e663f95 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1506,3 +1506,34 @@ def test_redact_parent_annotation(self) -> None: # There's nothing to aggregate. chunk = self._get_aggregations() self.assertEqual(chunk, [{"count": 1, "key": "👍", "type": "m.reaction"}]) + + @unittest.override_config({"experimental_features": {"msc3440_enabled": True}}) + def test_redact_parent_thread(self) -> None: + """ + Test that thread replies are still available when the root event is redacted. + """ + channel = self._send_relation( + RelationTypes.THREAD, + EventTypes.Message, + content={"body": "reply 1", "msgtype": "m.text"}, + ) + self.assertEqual(200, channel.code, channel.json_body) + related_event_id = channel.json_body["event_id"] + + # Redact one of the reactions. + self._redact(self.parent_id) + + # The unredacted relation should still exist. + event_ids, relations = self._make_relation_requests() + self.assertEquals(len(event_ids), 1) + self.assertDictContainsSubset( + { + "count": 1, + "current_user_participated": True, + }, + relations[RelationTypes.THREAD], + ) + self.assertEqual( + relations[RelationTypes.THREAD]["latest_event"]["event_id"], + related_event_id, + ) From 05ecc6151c51ae2369ce9b880708be1012286196 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Mar 2022 10:19:29 -0500 Subject: [PATCH 05/21] Newsfragment --- changelog.d/12130.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/12130.bugfix diff --git a/changelog.d/12130.bugfix b/changelog.d/12130.bugfix new file mode 100644 index 000000000000..df9b0dc413dd --- /dev/null +++ b/changelog.d/12130.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. From f9ed38c4ef02c1f1087223c999eb2278a7bdd315 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Mar 2022 14:17:33 -0500 Subject: [PATCH 06/21] Pass both event_id and event for caching. --- synapse/rest/client/relations.py | 6 ++++-- synapse/storage/databases/main/relations.py | 14 ++++++++++---- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 2e6c91e203d3..d3e84209753d 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -91,7 +91,7 @@ async def on_GET( to_token = await StreamToken.from_string(self.store, to_token_str) pagination_chunk = await self.store.get_relations_for_event( - event=event, + event_id=parent_id, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -99,6 +99,7 @@ async def on_GET( direction=direction, from_token=from_token, to_token=to_token, + event=event, ) events = await self.store.get_events_as_list( @@ -286,7 +287,7 @@ async def on_GET( to_token = await StreamToken.from_string(self.store, to_token_str) result = await self.store.get_relations_for_event( - event=event, + event_id=parent_id, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -294,6 +295,7 @@ async def on_GET( limit=limit, from_token=from_token, to_token=to_token, + event=event, ) events = await self.store.get_events_as_list( diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 37fa369b5047..cd3a435ae739 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -91,10 +91,10 @@ def __init__( self._msc3440_enabled = hs.config.experimental.msc3440_enabled - @cached(tree=True) + @cached(num_args=9, tree=True) async def get_relations_for_event( self, - event: EventBase, + event_id: str, room_id: str, relation_type: Optional[str] = None, event_type: Optional[str] = None, @@ -103,6 +103,7 @@ async def get_relations_for_event( direction: str = "b", from_token: Optional[StreamToken] = None, to_token: Optional[StreamToken] = None, + event: Optional[EventBase] = None, ) -> PaginationChunk: """Get a list of relations for an event, ordered by topological ordering. @@ -117,14 +118,20 @@ async def get_relations_for_event( oldest first (`"f"`). from_token: Fetch rows from the given token, or from the start if None. to_token: Fetch rows up to the given token, or up to the end if None. + event: The matching EventBase to event_id. This *must* be provided. Returns: List of event IDs that match relations requested. The rows are of the form `{"event_id": "..."}`. """ + # We don't use `event_id`, its there so that we can cache based on + # it. The `event_id` must match the `event.event_id`. + assert event is not None + assert event.event_id == event_id where_clause = ["relates_to_id = ?", "room_id = ?"] where_args: List[Union[str, int]] = [event.event_id, room_id] + is_redacted = event.internal_metadata.is_redacted() if relation_type is not None: where_clause.append("relation_type = ?") @@ -177,7 +184,6 @@ def _get_recent_references_for_event_txn( last_topo_id = None last_stream_id = None events = [] - is_redacted = event.internal_metadata.is_redacted() for row in txn: # Do not include edits for redacted events as they leak event # content. @@ -780,7 +786,7 @@ async def _get_bundled_aggregation_for_event( ) references = await self.get_relations_for_event( - event, room_id, RelationTypes.REFERENCE, direction="f" + event_id, room_id, RelationTypes.REFERENCE, direction="f", event=event ) if references.chunk: aggregations.references = await references.to_dict(cast("DataStore", self)) From c97c4a8af54df9b20bd2559db42afc39b9c61eb9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Mar 2022 15:25:03 -0500 Subject: [PATCH 07/21] Fix incorrect redaction parameters. --- synapse/storage/databases/main/events.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1dc83aa5e3a6..19000a47dd58 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1812,9 +1812,7 @@ def _handle_event_relations( txn.call_after(self.store.get_applicable_edit.invalidate, (parent_id,)) if rel_type == RelationTypes.THREAD: - txn.call_after( - self.store.get_thread_summary.invalidate, (parent_id, event.room_id) - ) + txn.call_after(self.store.get_thread_summary.invalidate, (parent_id,)) # It should be safe to only invalidate the cache if the user has not # previously participated in the thread, but that's difficult (and # potentially error-prone) so it is always invalidated. From 16cf9c0ec39827d5c2b12ddc42f94cb133f11b05 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 8 Mar 2022 15:33:51 -0500 Subject: [PATCH 08/21] Properly invalidate edits when redacting a parent event. --- synapse/storage/databases/main/cache.py | 4 ++++ synapse/storage/databases/main/events.py | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index abd54c7dc703..d6a2df1afeb6 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -191,6 +191,10 @@ def _invalidate_caches_for_event( if redacts: self._invalidate_get_event_cache(redacts) + # Caches which might leak edits must be invalidated for the event being + # redacted. + self.get_relations_for_event.invalidate((redacts,)) + self.get_applicable_edit.invalidate((redacts,)) if etype == EventTypes.Member: self._membership_stream_cache.entity_has_changed(state_key, stream_ordering) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 19000a47dd58..662c6362792f 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1978,6 +1978,15 @@ def _handle_redact_relations( txn, self.store.get_thread_participated, (redacted_relates_to,) ) + # Caches which might leak edits must be invalidated for the event being + # redacted. + self.store._invalidate_cache_and_stream( + txn, self.store.get_relations_for_event, (redacted_event_id,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_applicable_edit, (redacted_event_id,) + ) + self.db_pool.simple_delete_txn( txn, table="event_relations", keyvalues={"event_id": redacted_event_id} ) From 6c585094ea62c1e7a7a00d5652c882772f9e4cb9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 08:14:40 -0500 Subject: [PATCH 09/21] Allow for ignoring some arguments when caching. --- synapse/util/caches/descriptors.py | 29 +++++++++++++++++--- tests/util/caches/test_descriptors.py | 39 +++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1cdead02f14b..b4cc5067a862 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -20,6 +20,7 @@ Any, Awaitable, Callable, + Collection, Dict, Generic, Hashable, @@ -69,6 +70,7 @@ def __init__( self, orig: Callable[..., Any], num_args: Optional[int], + uncached_args: Optional[Collection[str]] = None, cache_context: bool = False, ): self.orig = orig @@ -88,6 +90,9 @@ def __init__( " named `cache_context`" ) + if num_args is not None and uncached_args is not None: + raise ValueError("Cannot provide both num_args and uncached_args") + if num_args is None: num_args = len(all_args) - 1 if cache_context: @@ -105,6 +110,11 @@ def __init__( # list of the names of the args used as the cache key self.arg_names = all_args[1 : num_args + 1] + # If there are args to not cache on, filter them out (and fix the size of num_args). + if uncached_args is not None: + self.num_args -= len(uncached_args) + self.arg_names = [n for n in self.arg_names if n not in uncached_args] + # self.arg_defaults is a map of arg name to its default value for each # argument that has a default value if arg_spec.defaults: @@ -186,7 +196,9 @@ def __init__( max_entries: int = 1000, cache_context: bool = False, ): - super().__init__(orig, num_args=None, cache_context=cache_context) + super().__init__( + orig, num_args=None, uncached_args=None, cache_context=cache_context + ) self.max_entries = max_entries def __get__(self, obj: Optional[Any], owner: Optional[Type]) -> Callable[..., Any]: @@ -260,6 +272,9 @@ def foo(self, key, cache_context): num_args: number of positional arguments (excluding ``self`` and ``cache_context``) to use as cache keys. Defaults to all named args of the function. + uncached_args: a list of argument names to not use as the cache key. + (``self`` and ``cache_context`` are always ignored.) Cannot be used + with num_args. tree: cache_context: iterable: @@ -273,12 +288,18 @@ def __init__( orig: Callable[..., Any], max_entries: int = 1000, num_args: Optional[int] = None, + uncached_args: Optional[Collection[str]] = None, tree: bool = False, cache_context: bool = False, iterable: bool = False, prune_unread_entries: bool = True, ): - super().__init__(orig, num_args=num_args, cache_context=cache_context) + super().__init__( + orig, + num_args=num_args, + uncached_args=uncached_args, + cache_context=cache_context, + ) if tree and self.num_args < 2: raise RuntimeError( @@ -369,7 +390,7 @@ def __init__( but including list_name) to use as cache keys. Defaults to all named args of the function. """ - super().__init__(orig, num_args=num_args) + super().__init__(orig, num_args=num_args, uncached_args=None) self.list_name = list_name @@ -532,6 +553,7 @@ def get_instance( def cached( max_entries: int = 1000, num_args: Optional[int] = None, + uncached_args: Optional[Collection[str]] = None, tree: bool = False, cache_context: bool = False, iterable: bool = False, @@ -541,6 +563,7 @@ def cached( orig, max_entries=max_entries, num_args=num_args, + uncached_args=uncached_args, tree=tree, cache_context=cache_context, iterable=iterable, diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 19741ffcdaf1..b95742f0e432 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -141,6 +141,45 @@ def fn(self, arg1, arg2): self.assertEqual(r, "chips") obj.mock.assert_not_called() + @defer.inlineCallbacks + def test_cache_uncached_args(self): + """ + Only the arguments not named in uncached_args should matter to the cache + + Note that this is identical to test_cache_num_args, but provides the + arguments differently. + """ + + class Cls: + @descriptors.cached(uncached_args=("arg2",)) + def fn(self, arg1, arg2): + return self.mock(arg1, arg2) + + def __init__(self): + self.mock = mock.Mock() + + obj = Cls() + obj.mock.return_value = "fish" + r = yield obj.fn(1, 2) + self.assertEqual(r, "fish") + obj.mock.assert_called_once_with(1, 2) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = "chips" + r = yield obj.fn(2, 3) + self.assertEqual(r, "chips") + obj.mock.assert_called_once_with(2, 3) + obj.mock.reset_mock() + + # the two values should now be cached; we should be able to vary + # the second argument and still get the cached result. + r = yield obj.fn(1, 4) + self.assertEqual(r, "fish") + r = yield obj.fn(2, 5) + self.assertEqual(r, "chips") + obj.mock.assert_not_called() + def test_cache_with_sync_exception(self): """If the wrapped function throws synchronously, things should continue to work""" From a4a1c315d90ee29f538389ec2ddfeeaf07e5cb6f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 08:22:18 -0500 Subject: [PATCH 10/21] Require caches to use kwargs. --- synapse/storage/databases/main/events_worker.py | 4 ++-- synapse/util/caches/descriptors.py | 6 +++--- tests/util/caches/test_descriptors.py | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 26784f755e40..59454a47dfdd 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1286,7 +1286,7 @@ async def have_seen_events( ) return {eid for ((_rid, eid), have_event) in res.items() if have_event} - @cachedList("have_seen_event", "keys") + @cachedList(cached_method_name="have_seen_event", list_name="keys") async def _have_seen_events_dict( self, keys: Iterable[Tuple[str, str]] ) -> Dict[Tuple[str, str], bool]: @@ -1954,7 +1954,7 @@ def get_event_id_for_timestamp_txn(txn: LoggingTransaction) -> Optional[str]: get_event_id_for_timestamp_txn, ) - @cachedList("is_partial_state_event", list_name="event_ids") + @cachedList(cached_method_name="is_partial_state_event", list_name="event_ids") async def get_partial_state_events( self, event_ids: Collection[str] ) -> Dict[str, bool]: diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index b4cc5067a862..8edde490a42f 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -140,8 +140,7 @@ class _LruCachedFunction(Generic[F]): def lru_cache( - max_entries: int = 1000, - cache_context: bool = False, + *, max_entries: int = 1000, cache_context: bool = False ) -> Callable[[F], _LruCachedFunction[F]]: """A method decorator that applies a memoizing cache around the function. @@ -551,6 +550,7 @@ def get_instance( def cached( + *, max_entries: int = 1000, num_args: Optional[int] = None, uncached_args: Optional[Collection[str]] = None, @@ -574,7 +574,7 @@ def cached( def cachedList( - cached_method_name: str, list_name: str, num_args: Optional[int] = None + *, cached_method_name: str, list_name: str, num_args: Optional[int] = None ) -> Callable[[F], _CachedFunction[F]]: """Creates a descriptor that wraps a function in a `CacheListDescriptor`. diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index b95742f0e432..a79c875ec53b 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -695,7 +695,7 @@ def __init__(self): def fn(self, arg1, arg2): pass - @descriptors.cachedList("fn", "args1") + @descriptors.cachedList(cached_method_name="fn", list_name="args1") async def list_fn(self, args1, arg2): assert current_context().name == "c1" # we want this to behave like an asynchronous function @@ -754,7 +754,7 @@ def __init__(self): def fn(self, arg1): pass - @descriptors.cachedList("fn", "args1") + @descriptors.cachedList(cached_method_name="fn", list_name="args1") def list_fn(self, args1) -> "Deferred[dict]": return self.mock(args1) @@ -797,7 +797,7 @@ def __init__(self): def fn(self, arg1, arg2): pass - @descriptors.cachedList("fn", "args1") + @descriptors.cachedList(cached_method_name="fn", list_name="args1") async def list_fn(self, args1, arg2): # we want this to behave like an asynchronous function await run_on_reactor() From 18805d02ba09beec941f2df40ab8cea56b043d37 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 08:23:15 -0500 Subject: [PATCH 11/21] Make a function private. --- synapse/util/caches/descriptors.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 8edde490a42f..ef7a36b21bcd 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -129,7 +129,7 @@ def __init__( self.add_cache_context = cache_context - self.cache_key_builder = get_cache_key_builder( + self.cache_key_builder = _get_cache_key_builder( self.arg_names, self.arg_defaults ) @@ -613,7 +613,7 @@ def batch_do_something(self, first_arg, second_args): return cast(Callable[[F], _CachedFunction[F]], func) -def get_cache_key_builder( +def _get_cache_key_builder( param_names: Sequence[str], param_defaults: Mapping[str, Any] ) -> Callable[[Sequence[Any], Mapping[str, Any]], CacheKey]: """Construct a function which will build cache keys suitable for a cached function From 020f0c60b6cca5b9c4a04bb80142cb8a8d1201a1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 09:23:42 -0500 Subject: [PATCH 12/21] Add a test for keyword arguments. --- tests/util/caches/test_descriptors.py | 37 +++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index a79c875ec53b..141afe245ce0 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -180,6 +180,43 @@ def __init__(self): self.assertEqual(r, "chips") obj.mock.assert_not_called() + @defer.inlineCallbacks + def test_cache_kwargs(self): + """Test that keyword arguments are treated properly""" + + class Cls: + def __init__(self): + self.mock = mock.Mock() + + @descriptors.cached() + def fn(self, arg1, kwarg1=2): + return self.mock(arg1, kwarg1=kwarg1) + + obj = Cls() + obj.mock.return_value = "fish" + r = yield obj.fn(1, kwarg1=2) + self.assertEqual(r, "fish") + obj.mock.assert_called_once_with(1, kwarg1=2) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = "chips" + r = yield obj.fn(1, kwarg1=3) + self.assertEqual(r, "chips") + obj.mock.assert_called_once_with(1, kwarg1=3) + obj.mock.reset_mock() + + # the values should now be cached. + r = yield obj.fn(1, kwarg1=2) + self.assertEqual(r, "fish") + # We should be able to not provide kwarg1 and get the cached value back. + r = yield obj.fn(1) + self.assertEqual(r, "fish") + # Keyword arguments can be in any order. + r = yield obj.fn(kwarg1=2, arg1=1) + self.assertEqual(r, "fish") + obj.mock.assert_not_called() + def test_cache_with_sync_exception(self): """If the wrapped function throws synchronously, things should continue to work""" From f69b643d903d8e3226070065af1f43bf250fadc1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 09:23:54 -0500 Subject: [PATCH 13/21] Keyword-only arguments are not supported. --- synapse/util/caches/descriptors.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index ef7a36b21bcd..db377a629538 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -78,6 +78,11 @@ def __init__( arg_spec = inspect.getfullargspec(orig) all_args = arg_spec.args + if arg_spec.kwonlyargs: + raise ValueError( + "_CacheDescriptorBase does not support keyword-only arguments." + ) + if "cache_context" in all_args: if not cache_context: raise ValueError( From 8fa311e1f23d0e9770b820c738abdfb9b38383bc Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 09:26:25 -0500 Subject: [PATCH 14/21] Newsfragment --- changelog.d/12189.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/12189.misc diff --git a/changelog.d/12189.misc b/changelog.d/12189.misc new file mode 100644 index 000000000000..015e808e63c7 --- /dev/null +++ b/changelog.d/12189.misc @@ -0,0 +1 @@ +Support skipping some arguments when generating cache keys. From 2d41a6cc9fed68dc914108b601192961095b0a43 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 09:45:20 -0500 Subject: [PATCH 15/21] Add a comment. --- synapse/util/caches/descriptors.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index db377a629538..4ff424481d81 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -78,6 +78,8 @@ def __init__( arg_spec = inspect.getfullargspec(orig) all_args = arg_spec.args + # There's no reason that keyword-only arguments couldn't be supported, + # but right now they're buggy so do not allow them. if arg_spec.kwonlyargs: raise ValueError( "_CacheDescriptorBase does not support keyword-only arguments." From 4460e921397c48e6d1dfd2fdc6517343d7d2980b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 10:35:21 -0500 Subject: [PATCH 16/21] Properly handle passing a uncached parameter as an arg. --- synapse/util/caches/descriptors.py | 33 +++++++++++++++++++-------- tests/util/caches/test_descriptors.py | 18 ++++++++------- 2 files changed, 34 insertions(+), 17 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 4ff424481d81..f6cf896d1d1f 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -119,8 +119,9 @@ def __init__( # If there are args to not cache on, filter them out (and fix the size of num_args). if uncached_args is not None: - self.num_args -= len(uncached_args) - self.arg_names = [n for n in self.arg_names if n not in uncached_args] + include_arg_in_cache_key = [n not in uncached_args for n in self.arg_names] + else: + include_arg_in_cache_key = [True] * len(self.arg_names) # self.arg_defaults is a map of arg name to its default value for each # argument that has a default value @@ -137,7 +138,7 @@ def __init__( self.add_cache_context = cache_context self.cache_key_builder = _get_cache_key_builder( - self.arg_names, self.arg_defaults + self.arg_names, include_arg_in_cache_key, self.arg_defaults ) @@ -621,12 +622,15 @@ def batch_do_something(self, first_arg, second_args): def _get_cache_key_builder( - param_names: Sequence[str], param_defaults: Mapping[str, Any] + param_names: Sequence[str], + include_params: Sequence[bool], + param_defaults: Mapping[str, Any], ) -> Callable[[Sequence[Any], Mapping[str, Any]], CacheKey]: """Construct a function which will build cache keys suitable for a cached function Args: param_names: list of formal parameter names for the cached function + include_params: list of bools of whether to include the parameter name in the cache key param_defaults: a mapping from parameter name to default value for that param Returns: @@ -638,6 +642,7 @@ def _get_cache_key_builder( if len(param_names) == 1: nm = param_names[0] + assert include_params[0] is True def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: if nm in kwargs: @@ -650,13 +655,18 @@ def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: else: def get_cache_key(args: Sequence[Any], kwargs: Mapping[str, Any]) -> CacheKey: - return tuple(_get_cache_key_gen(param_names, param_defaults, args, kwargs)) + return tuple( + _get_cache_key_gen( + param_names, include_params, param_defaults, args, kwargs + ) + ) return get_cache_key def _get_cache_key_gen( param_names: Iterable[str], + include_params: Iterable[bool], param_defaults: Mapping[str, Any], args: Sequence[Any], kwargs: Mapping[str, Any], @@ -667,16 +677,21 @@ def _get_cache_key_gen( This is essentially the same operation as `inspect.getcallargs`, but optimised so that we don't need to inspect the target function for each call. """ + if param_names == (): + pass # We loop through each arg name, looking up if its in the `kwargs`, # otherwise using the next argument in `args`. If there are no more # args then we try looking the arg name up in the defaults. pos = 0 - for nm in param_names: + for nm, inc in zip(param_names, include_params): if nm in kwargs: - yield kwargs[nm] + if inc: + yield kwargs[nm] elif pos < len(args): - yield args[pos] + if inc: + yield args[pos] pos += 1 else: - yield param_defaults[nm] + if inc: + yield param_defaults[nm] diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 141afe245ce0..6a4b17527a7f 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -151,32 +151,34 @@ def test_cache_uncached_args(self): """ class Cls: + # Note that it is important that this is not the last argument to + # test behaviour of skipping arguments properly. @descriptors.cached(uncached_args=("arg2",)) - def fn(self, arg1, arg2): - return self.mock(arg1, arg2) + def fn(self, arg1, arg2, arg3): + return self.mock(arg1, arg2, arg3) def __init__(self): self.mock = mock.Mock() obj = Cls() obj.mock.return_value = "fish" - r = yield obj.fn(1, 2) + r = yield obj.fn(1, 2, 3) self.assertEqual(r, "fish") - obj.mock.assert_called_once_with(1, 2) + obj.mock.assert_called_once_with(1, 2, 3) obj.mock.reset_mock() # a call with different params should call the mock again obj.mock.return_value = "chips" - r = yield obj.fn(2, 3) + r = yield obj.fn(2, 3, 4) self.assertEqual(r, "chips") - obj.mock.assert_called_once_with(2, 3) + obj.mock.assert_called_once_with(2, 3, 4) obj.mock.reset_mock() # the two values should now be cached; we should be able to vary # the second argument and still get the cached result. - r = yield obj.fn(1, 4) + r = yield obj.fn(1, 4, 3) self.assertEqual(r, "fish") - r = yield obj.fn(2, 5) + r = yield obj.fn(2, 5, 4) self.assertEqual(r, "chips") obj.mock.assert_not_called() From 02fc051fac90bba8453a8de7dbfa850f430f980c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 11:08:25 -0500 Subject: [PATCH 17/21] Remove debugging code. --- synapse/util/caches/descriptors.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index f6cf896d1d1f..c3c5c16db96e 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -677,9 +677,6 @@ def _get_cache_key_gen( This is essentially the same operation as `inspect.getcallargs`, but optimised so that we don't need to inspect the target function for each call. """ - if param_names == (): - pass - # We loop through each arg name, looking up if its in the `kwargs`, # otherwise using the next argument in `args`. If there are no more # args then we try looking the arg name up in the defaults. From ecbc2d9dab180ddaa73ef6f2579684c42df596b6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 11:11:47 -0500 Subject: [PATCH 18/21] Use uncached_args parameter instead of num_args. --- synapse/rest/client/relations.py | 4 ++-- synapse/storage/databases/main/relations.py | 11 +++++------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index d3e84209753d..d9a6be43f793 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -92,6 +92,7 @@ async def on_GET( pagination_chunk = await self.store.get_relations_for_event( event_id=parent_id, + event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -99,7 +100,6 @@ async def on_GET( direction=direction, from_token=from_token, to_token=to_token, - event=event, ) events = await self.store.get_events_as_list( @@ -288,6 +288,7 @@ async def on_GET( result = await self.store.get_relations_for_event( event_id=parent_id, + event=event, room_id=room_id, relation_type=relation_type, event_type=event_type, @@ -295,7 +296,6 @@ async def on_GET( limit=limit, from_token=from_token, to_token=to_token, - event=event, ) events = await self.store.get_events_as_list( diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index cd3a435ae739..be1500092b5b 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -91,10 +91,11 @@ def __init__( self._msc3440_enabled = hs.config.experimental.msc3440_enabled - @cached(num_args=9, tree=True) + @cached(uncached_args=("event",), tree=True) async def get_relations_for_event( self, event_id: str, + event: EventBase, room_id: str, relation_type: Optional[str] = None, event_type: Optional[str] = None, @@ -103,12 +104,12 @@ async def get_relations_for_event( direction: str = "b", from_token: Optional[StreamToken] = None, to_token: Optional[StreamToken] = None, - event: Optional[EventBase] = None, ) -> PaginationChunk: """Get a list of relations for an event, ordered by topological ordering. Args: event_id: Fetch events that relate to this event ID. + event: The matching EventBase to event_id. room_id: The room the event belongs to. relation_type: Only fetch events with this relation type, if given. event_type: Only fetch events with this event type, if given. @@ -118,15 +119,13 @@ async def get_relations_for_event( oldest first (`"f"`). from_token: Fetch rows from the given token, or from the start if None. to_token: Fetch rows up to the given token, or up to the end if None. - event: The matching EventBase to event_id. This *must* be provided. Returns: List of event IDs that match relations requested. The rows are of the form `{"event_id": "..."}`. """ - # We don't use `event_id`, its there so that we can cache based on + # We don't use `event_id`, it's there so that we can cache based on # it. The `event_id` must match the `event.event_id`. - assert event is not None assert event.event_id == event_id where_clause = ["relates_to_id = ?", "room_id = ?"] @@ -786,7 +785,7 @@ async def _get_bundled_aggregation_for_event( ) references = await self.get_relations_for_event( - event_id, room_id, RelationTypes.REFERENCE, direction="f", event=event + event_id, event, room_id, RelationTypes.REFERENCE, direction="f" ) if references.chunk: aggregations.references = await references.to_dict(cast("DataStore", self)) From f15a0310d8f0483ef2789e198ab972763b968c40 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 11:27:01 -0500 Subject: [PATCH 19/21] Move cache invalidation to a single place. --- synapse/storage/databases/main/events.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 662c6362792f..632643db1a8c 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1619,9 +1619,15 @@ def prefill(): txn.call_after(prefill) - def _store_redaction(self, txn, event): - # invalidate the cache for the redacted event + def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None: + # invalidate the caches for the redacted event txn.call_after(self.store._invalidate_get_event_cache, event.redacts) + self.store._invalidate_cache_and_stream( + txn, self.store.get_relations_for_event, (event.redacts,) + ) + self.store._invalidate_cache_and_stream( + txn, self.store.get_applicable_edit, (event.redacts,) + ) self.db_pool.simple_upsert_txn( txn, @@ -1978,15 +1984,6 @@ def _handle_redact_relations( txn, self.store.get_thread_participated, (redacted_relates_to,) ) - # Caches which might leak edits must be invalidated for the event being - # redacted. - self.store._invalidate_cache_and_stream( - txn, self.store.get_relations_for_event, (redacted_event_id,) - ) - self.store._invalidate_cache_and_stream( - txn, self.store.get_applicable_edit, (redacted_event_id,) - ) - self.db_pool.simple_delete_txn( txn, table="event_relations", keyvalues={"event_id": redacted_event_id} ) From f885036621ef541646009c7e1af310ff371b58b2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 9 Mar 2022 13:26:44 -0500 Subject: [PATCH 20/21] Newsfragment --- changelog.d/12189.bugfix | 1 + changelog.d/12189.misc | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/12189.bugfix delete mode 100644 changelog.d/12189.misc diff --git a/changelog.d/12189.bugfix b/changelog.d/12189.bugfix new file mode 100644 index 000000000000..df9b0dc413dd --- /dev/null +++ b/changelog.d/12189.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug when redacting events with relations. diff --git a/changelog.d/12189.misc b/changelog.d/12189.misc deleted file mode 100644 index 015e808e63c7..000000000000 --- a/changelog.d/12189.misc +++ /dev/null @@ -1 +0,0 @@ -Support skipping some arguments when generating cache keys. From d2321b91d271b83b6fca48a74b07c80dc487aed3 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 10 Mar 2022 07:58:33 -0500 Subject: [PATCH 21/21] Avoid streaming caches which will be invalidated anyway. --- synapse/storage/databases/main/events.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 632643db1a8c..1a322882bf3f 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1620,14 +1620,11 @@ def prefill(): txn.call_after(prefill) def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None: - # invalidate the caches for the redacted event + # Invalidate the caches for the redacted event, note that these caches + # are also cleared as part of event replication in _invalidate_caches_for_event. txn.call_after(self.store._invalidate_get_event_cache, event.redacts) - self.store._invalidate_cache_and_stream( - txn, self.store.get_relations_for_event, (event.redacts,) - ) - self.store._invalidate_cache_and_stream( - txn, self.store.get_applicable_edit, (event.redacts,) - ) + txn.call_after(self.store.get_relations_for_event.invalidate, (event.redacts,)) + txn.call_after(self.store.get_applicable_edit.invalidate, (event.redacts,)) self.db_pool.simple_upsert_txn( txn,