Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Fix additional type hints #9543

Merged
merged 9 commits into from
Mar 9, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.d/9543.misc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix incorrect type hints.
5 changes: 4 additions & 1 deletion synapse/config/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,10 @@
from string import Template

import yaml
from zope.interface import implementer

from twisted.logger import (
ILogObserver,
LogBeginner,
STDLibLogObserver,
eventAsText,
Expand Down Expand Up @@ -227,7 +229,8 @@ def factory(*args, **kwargs):

threadlocal = threading.local()

def _log(event):
@implementer(ILogObserver)
def _log(event: dict) -> None:
if "log_text" in event:
if event["log_text"].startswith("DNSDatagramProtocol starting on "):
return
Expand Down
2 changes: 1 addition & 1 deletion synapse/federation/federation_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,7 @@ async def process_pdus_for_room(room_id: str):
logger.error(
"Failed to handle PDU %s",
event_id,
exc_info=(f.type, f.value, f.getTracebackObject()),
exc_info=(f.type, f.value, f.getTracebackObject()), # type: ignore
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

mypy things that one of these doesn't match the type signature for logger.error, but empirically it works...

synapse/federation/federation_server.py:364: error: Argument "exc_info" to "error" of "Logger" has incompatible type "Tuple[Optional[Type[BaseException]], Optional[BaseException], Any]"; expected "Union[None, bool, Union[Tuple[type, BaseException, Optional[TracebackType]], Tuple[None, None, None]], BaseException]" [arg-type]

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@richvdh do you remember why we can't just use logger.exception here?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

er we probably can just s/error/exception/, if that helps, though I don't think it will.

or is your question why we pass in an explicit exc_info rather than letting logging pull it from sys.exc_info()? in which case the answer is that f.getTracebackObject has magic to build a more useful stacktrace in the face of async execution. (It's possible that it's better nowadays with proper async/await, but I suspect it's not.)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the background! I think leaving the ignore here is the best way forward at the moment without getting into a rabbit hole of logging.

)

await concurrently_execute(
Expand Down
2 changes: 1 addition & 1 deletion synapse/handlers/pagination.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ async def _purge_history(
except Exception:
f = Failure()
logger.error(
"[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject())
"[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject()) # type: ignore
)
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
finally:
Expand Down
3 changes: 2 additions & 1 deletion synapse/http/federation/well_known_resolver.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,8 @@ def _cache_period_from_headers(

def _parse_cache_control(headers: Headers) -> Dict[bytes, Optional[bytes]]:
cache_controls = {}
for hdr in headers.getRawHeaders(b"cache-control", []):
cache_control_headers = headers.getRawHeaders(b"cache-control") or []
for hdr in cache_control_headers:
for directive in hdr.split(b","):
splits = [x.strip() for x in directive.split(b"=", 1)]
k = splits[0].lower()
Expand Down
6 changes: 4 additions & 2 deletions synapse/logging/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -669,7 +669,7 @@ def g(*args, **kwargs):
return g


def run_in_background(f, *args, **kwargs):
def run_in_background(f, *args, **kwargs) -> defer.Deferred:
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this isn't 100% true actually since if the result is not a Deferred than we return it raw, which seems confusing. I think we should likely wrap that in defer.succeed?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, probably. I don't think we should lie about our return value here.

Copy link
Member Author

@clokep clokep Mar 8, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wrapped the raw return value in defer.succeed, I suspect it doesn't really make sense to call run_in_background with a sync function, but 🤷 This would have also broke when using await, so maybe we should make this an error instead?

"""Calls a function, ensuring that the current context is restored after
return from the function, and that the sentinel context is set once the
deferred returned by the function completes.
Expand Down Expand Up @@ -697,8 +697,10 @@ def run_in_background(f, *args, **kwargs):
if isinstance(res, types.CoroutineType):
res = defer.ensureDeferred(res)

# At this point we should have a Deferred, if not then f was a synchronous
# function, wrap it in a Deferred for consistency.
if not isinstance(res, defer.Deferred):
return res
return defer.succeed(res)

if res.called and not res.paused:
# The function should have maintained the logcontext, so we can
Expand Down
27 changes: 17 additions & 10 deletions tests/replication/_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from twisted.internet.task import LoopingCall
from twisted.web.http import HTTPChannel
from twisted.web.resource import Resource
from twisted.web.server import Request, Site

from synapse.app.generic_worker import (
GenericWorkerReplicationHandler,
Expand All @@ -32,7 +33,10 @@
from synapse.replication.http import ReplicationRestResource
from synapse.replication.tcp.handler import ReplicationCommandHandler
from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from synapse.replication.tcp.resource import (
ReplicationStreamProtocolFactory,
ServerReplicationStreamProtocol,
)
from synapse.server import HomeServer
from synapse.util import Clock

Expand All @@ -59,7 +63,9 @@ def prepare(self, reactor, clock, hs):
# build a replication server
server_factory = ReplicationStreamProtocolFactory(hs)
self.streamer = hs.get_replication_streamer()
self.server = server_factory.buildProtocol(None)
self.server = server_factory.buildProtocol(
None
) # type: ServerReplicationStreamProtocol

# Make a new HomeServer object for the worker
self.reactor.lookups["testserv"] = "1.2.3.4"
Expand Down Expand Up @@ -155,9 +161,7 @@ def handle_http_replication_attempt(self) -> SynapseRequest:
request_factory = OneShotRequestFactory()

# Set up the server side protocol
channel = _PushHTTPChannel(self.reactor)
channel.requestFactory = request_factory
channel.site = self.site
channel = _PushHTTPChannel(self.reactor, request_factory, self.site)

# Connect client to server and vice versa.
client_to_server_transport = FakeTransport(
Expand Down Expand Up @@ -188,8 +192,9 @@ def assert_request_is_get_repl_stream_updates(
fetching updates for given stream.
"""

path = request.path # type: bytes # type: ignore
self.assertRegex(
request.path,
path,
br"^/_synapse/replication/get_repl_stream_updates/%s/[^/]+$"
% (stream_name.encode("ascii"),),
)
Expand Down Expand Up @@ -390,9 +395,7 @@ def _handle_http_replication_attempt(self, hs, repl_port):
request_factory = OneShotRequestFactory()

# Set up the server side protocol
channel = _PushHTTPChannel(self.reactor)
channel.requestFactory = request_factory
channel.site = self._hs_to_site[hs]
channel = _PushHTTPChannel(self.reactor, request_factory, self._hs_to_site[hs])

# Connect client to server and vice versa.
client_to_server_transport = FakeTransport(
Expand Down Expand Up @@ -475,9 +478,13 @@ class _PushHTTPChannel(HTTPChannel):
makes it very hard to test.
"""

def __init__(self, reactor: IReactorTime):
def __init__(
self, reactor: IReactorTime, request_factory: Callable[..., Request], site: Site
):
super().__init__()
self.reactor = reactor
self.requestFactory = request_factory
self.site = site

self._pull_to_push_producer = None # type: Optional[_PullToPushProducer]

Expand Down
2 changes: 1 addition & 1 deletion tests/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def getResourceFor(self, request):

def make_request(
reactor,
site: Site,
site: Union[Site, FakeSite],
method,
path,
content=b"",
Expand Down
2 changes: 1 addition & 1 deletion tests/test_utils/logging_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class ToTwistedHandler(logging.Handler):
def emit(self, record):
log_entry = self.format(record)
log_level = record.levelname.lower().replace("warning", "warn")
self.tx_log.emit(
self.tx_log.emit( # type: ignore
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For some reason mypy things that tx_log here is object, I'm not really sure why though... Anyway, it complains that object has no emit method.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I suspect this is fixed by twisted/twisted#1541?

twisted.logger.LogLevel.levelWithName(log_level), "{entry}", entry=log_entry
)

Expand Down