Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Commit

Permalink
Support any process writing to cache invalidation stream. (#7436)
Browse files Browse the repository at this point in the history
  • Loading branch information
erikjohnston committed May 7, 2020
1 parent 2929ce2 commit d7983b6
Show file tree
Hide file tree
Showing 26 changed files with 226 additions and 231 deletions.
1 change: 1 addition & 0 deletions changelog.d/7436.misc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Support any process writing to cache invalidation stream.
4 changes: 0 additions & 4 deletions docs/tcp_replication.md
Original file line number Diff line number Diff line change
Expand Up @@ -219,10 +219,6 @@ Asks the server for the current position of all streams.

Inform the server a pusher should be removed

#### INVALIDATE_CACHE (C)

Inform the server a cache should be invalidated

### REMOTE_SERVER_UP (S, C)

Inform other processes that a remote server may have come back online.
Expand Down
4 changes: 2 additions & 2 deletions scripts/synapse_port_db
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ APPEND_ONLY_TABLES = [
"presence_stream",
"push_rules_stream",
"ex_outlier_stream",
"cache_invalidation_stream",
"cache_invalidation_stream_by_instance",
"public_room_list_stream",
"state_group_edges",
"stream_ordering_to_exterm",
Expand Down Expand Up @@ -188,7 +188,7 @@ class MockHomeserver:
self.clock = Clock(reactor)
self.config = config
self.hostname = config.server_name
self.version_string = "Synapse/"+get_version_string(synapse)
self.version_string = "Synapse/" + get_version_string(synapse)

def get_clock(self):
return self.clock
Expand Down
50 changes: 11 additions & 39 deletions synapse/replication/slave/storage/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,10 @@

import six

from synapse.storage.data_stores.main.cache import (
CURRENT_STATE_CACHE_NAME,
CacheInvalidationWorkerStore,
)
from synapse.storage.data_stores.main.cache import CacheInvalidationWorkerStore
from synapse.storage.database import Database
from synapse.storage.engines import PostgresEngine

from ._slaved_id_tracker import SlavedIdTracker
from synapse.storage.util.id_generators import MultiWriterIdGenerator

logger = logging.getLogger(__name__)

Expand All @@ -41,40 +37,16 @@ class BaseSlavedStore(CacheInvalidationWorkerStore):
def __init__(self, database: Database, db_conn, hs):
super(BaseSlavedStore, self).__init__(database, db_conn, hs)
if isinstance(self.database_engine, PostgresEngine):
self._cache_id_gen = SlavedIdTracker(
db_conn, "cache_invalidation_stream", "stream_id"
) # type: Optional[SlavedIdTracker]
self._cache_id_gen = MultiWriterIdGenerator(
db_conn,
database,
instance_name=hs.get_instance_name(),
table="cache_invalidation_stream_by_instance",
instance_column="instance_name",
id_column="stream_id",
sequence_name="cache_invalidation_stream_seq",
) # type: Optional[MultiWriterIdGenerator]
else:
self._cache_id_gen = None

self.hs = hs

def get_cache_stream_token(self):
if self._cache_id_gen:
return self._cache_id_gen.get_current_token()
else:
return 0

def process_replication_rows(self, stream_name, token, rows):
if stream_name == "caches":
if self._cache_id_gen:
self._cache_id_gen.advance(token)
for row in rows:
if row.cache_func == CURRENT_STATE_CACHE_NAME:
if row.keys is None:
raise Exception(
"Can't send an 'invalidate all' for current state cache"
)

room_id = row.keys[0]
members_changed = set(row.keys[1:])
self._invalidate_state_caches(room_id, members_changed)
else:
self._attempt_to_invalidate_cache(row.cache_func, row.keys)

def _invalidate_cache_and_stream(self, txn, cache_func, keys):
txn.call_after(cache_func.invalidate, keys)
txn.call_after(self._send_invalidation_poke, cache_func, keys)

def _send_invalidation_poke(self, cache_func, keys):
self.hs.get_tcp_replication().send_invalidate_cache(cache_func, keys)
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/account_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def __init__(self, database: Database, db_conn, hs):
def get_max_account_data_stream_id(self):
return self._account_data_id_gen.get_current_token()

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == "tag_account_data":
self._account_data_id_gen.advance(token)
for row in rows:
Expand All @@ -51,6 +51,4 @@ def process_replication_rows(self, stream_name, token, rows):
(row.user_id, row.room_id, row.data_type)
)
self._account_data_stream_cache.entity_has_changed(row.user_id, token)
return super(SlavedAccountDataStore, self).process_replication_rows(
stream_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/deviceinbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def __init__(self, database: Database, db_conn, hs):
expiry_ms=30 * 60 * 1000,
)

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == "to_device":
self._device_inbox_id_gen.advance(token)
for row in rows:
Expand All @@ -55,6 +55,4 @@ def process_replication_rows(self, stream_name, token, rows):
self._device_federation_outbox_stream_cache.entity_has_changed(
row.entity, token
)
return super(SlavedDeviceInboxStore, self).process_replication_rows(
stream_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/devices.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,17 +48,15 @@ def __init__(self, database: Database, db_conn, hs):
"DeviceListFederationStreamChangeCache", device_list_max
)

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == DeviceListsStream.NAME:
self._device_list_id_gen.advance(token)
self._invalidate_caches_for_devices(token, rows)
elif stream_name == UserSignatureStream.NAME:
self._device_list_id_gen.advance(token)
for row in rows:
self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
return super(SlavedDeviceStore, self).process_replication_rows(
stream_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)

def _invalidate_caches_for_devices(self, token, rows):
for row in rows:
Expand Down
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def get_room_max_stream_ordering(self):
def get_room_min_stream_ordering(self):
return self._backfill_id_gen.get_current_token()

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == "events":
self._stream_id_gen.advance(token)
for row in rows:
Expand All @@ -111,9 +111,7 @@ def process_replication_rows(self, stream_name, token, rows):
row.relates_to,
backfilled=True,
)
return super(SlavedEventStore, self).process_replication_rows(
stream_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)

def _process_event_stream_row(self, token, row):
data = row.data
Expand Down
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/groups.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,10 @@ def __init__(self, database: Database, db_conn, hs):
def get_group_stream_token(self):
return self._group_updates_id_gen.get_current_token()

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == "groups":
self._group_updates_id_gen.advance(token)
for row in rows:
self._group_updates_stream_cache.entity_has_changed(row.user_id, token)

return super(SlavedGroupServerStore, self).process_replication_rows(
stream_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/presence.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,10 @@ def __init__(self, database: Database, db_conn, hs):
def get_current_presence_token(self):
return self._presence_id_gen.get_current_token()

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == "presence":
self._presence_id_gen.advance(token)
for row in rows:
self.presence_stream_cache.entity_has_changed(row.user_id, token)
self._get_presence_for_user.invalidate((row.user_id,))
return super(SlavedPresenceStore, self).process_replication_rows(
stream_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/push_rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,11 @@ def get_push_rules_stream_token(self):
def get_max_push_rules_stream_id(self):
return self._push_rules_stream_id_gen.get_current_token()

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == "push_rules":
self._push_rules_stream_id_gen.advance(token)
for row in rows:
self.get_push_rules_for_user.invalidate((row.user_id,))
self.get_push_rules_enabled_for_user.invalidate((row.user_id,))
self.push_rules_stream_cache.entity_has_changed(row.user_id, token)
return super(SlavedPushRuleStore, self).process_replication_rows(
stream_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/pushers.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,7 @@ def __init__(self, database: Database, db_conn, hs):
def get_pushers_stream_token(self):
return self._pushers_id_gen.get_current_token()

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == "pushers":
self._pushers_id_gen.advance(token)
return super(SlavedPusherStore, self).process_replication_rows(
stream_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 2 additions & 4 deletions synapse/replication/slave/storage/receipts.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id):
self._invalidate_get_users_with_receipts_in_room(room_id, receipt_type, user_id)
self.get_receipts_for_room.invalidate((room_id, receipt_type))

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == "receipts":
self._receipts_id_gen.advance(token)
for row in rows:
Expand All @@ -60,6 +60,4 @@ def process_replication_rows(self, stream_name, token, rows):
)
self._receipts_stream_cache.entity_has_changed(row.room_id, token)

return super(SlavedReceiptsStore, self).process_replication_rows(
stream_name, token, rows
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
4 changes: 2 additions & 2 deletions synapse/replication/slave/storage/room.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ def __init__(self, database: Database, db_conn, hs):
def get_current_public_room_stream_id(self):
return self._public_room_id_gen.get_current_token()

def process_replication_rows(self, stream_name, token, rows):
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == "public_rooms":
self._public_room_id_gen.advance(token)

return super(RoomStore, self).process_replication_rows(stream_name, token, rows)
return super().process_replication_rows(stream_name, instance_name, token, rows)
6 changes: 3 additions & 3 deletions synapse/replication/tcp/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,10 @@ async def on_rdata(
token: stream token for this batch of rows
rows: a list of Stream.ROW_TYPE objects as returned by Stream.parse_row.
"""
self.store.process_replication_rows(stream_name, token, rows)
self.store.process_replication_rows(stream_name, instance_name, token, rows)

async def on_position(self, stream_name: str, token: int):
self.store.process_replication_rows(stream_name, token, [])
async def on_position(self, stream_name: str, instance_name: str, token: int):
self.store.process_replication_rows(stream_name, instance_name, token, [])

def on_remote_server_up(self, server: str):
"""Called when get a new REMOTE_SERVER_UP command."""
33 changes: 0 additions & 33 deletions synapse/replication/tcp/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,37 +341,6 @@ def to_line(self):
return " ".join((self.app_id, self.push_key, self.user_id))


class InvalidateCacheCommand(Command):
"""Sent by the client to invalidate an upstream cache.
THIS IS NOT RELIABLE, AND SHOULD *NOT* BE USED ACCEPT FOR THINGS THAT ARE
NOT DISASTROUS IF WE DROP ON THE FLOOR.
Mainly used to invalidate destination retry timing caches.
Format::
INVALIDATE_CACHE <cache_func> <keys_json>
Where <keys_json> is a json list.
"""

NAME = "INVALIDATE_CACHE"

def __init__(self, cache_func, keys):
self.cache_func = cache_func
self.keys = keys

@classmethod
def from_line(cls, line):
cache_func, keys_json = line.split(" ", 1)

return cls(cache_func, json.loads(keys_json))

def to_line(self):
return " ".join((self.cache_func, _json_encoder.encode(self.keys)))


class UserIpCommand(Command):
"""Sent periodically when a worker sees activity from a client.
Expand Down Expand Up @@ -439,7 +408,6 @@ class RemoteServerUpCommand(_SimpleCommand):
UserSyncCommand,
FederationAckCommand,
RemovePusherCommand,
InvalidateCacheCommand,
UserIpCommand,
RemoteServerUpCommand,
ClearUserSyncsCommand,
Expand Down Expand Up @@ -467,7 +435,6 @@ class RemoteServerUpCommand(_SimpleCommand):
ClearUserSyncsCommand.NAME,
FederationAckCommand.NAME,
RemovePusherCommand.NAME,
InvalidateCacheCommand.NAME,
UserIpCommand.NAME,
ErrorCommand.NAME,
RemoteServerUpCommand.NAME,
Expand Down
Loading

0 comments on commit d7983b6

Please sign in to comment.