Skip to content

Commit

Permalink
Add StrictFIFOLock as an alias for Lock
Browse files Browse the repository at this point in the history
  • Loading branch information
njsmith committed Apr 28, 2017
1 parent 13b2a8b commit f08220b
Show file tree
Hide file tree
Showing 4 changed files with 88 additions and 6 deletions.
3 changes: 3 additions & 0 deletions docs/source/reference-core.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1289,6 +1289,9 @@ don't have any special access to trio's internals.)
.. autoclass:: Lock
:members:

.. autoclass:: StrictFIFOLock
:members:

.. autoclass:: Condition
:members:

Expand Down
6 changes: 6 additions & 0 deletions docs/source/reference-io.rst
Original file line number Diff line number Diff line change
Expand Up @@ -232,8 +232,14 @@ The abstract Stream API
TLS support
-----------

.. module:: trio.ssl

`Not implemented yet! <https://github.com/python-trio/trio/issues/9>`__

.. autoclass:: SSLStream
:members:
:undoc-members:


Async disk I/O
--------------
Expand Down
71 changes: 68 additions & 3 deletions trio/_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,8 @@
from . import _core
from ._util import aiter_compat

__all__ = ["Event", "Semaphore", "Lock", "Condition", "Queue"]
__all__ = [
"Event", "Semaphore", "Lock", "StrictFIFOLock", "Condition", "Queue"]

@attr.s(slots=True, repr=False, cmp=False, hash=False)
class Event:
Expand Down Expand Up @@ -247,7 +248,8 @@ def __repr__(self):
else:
s1 = "unlocked"
s2 = ""
return "<{} trio.Lock object at {:#x}{}>".format(s1, id(self), s2)
return ("<{} {} object at {:#x}{}>"
.format(s1, self.__class__.__name__, id(self), s2))

def locked(self):
"""Check whether the lock is currently held.
Expand Down Expand Up @@ -327,6 +329,69 @@ def statistics(self):
)


class StrictFIFOLock(Lock):
"""A variant of :class:`Lock` where tasks are guaranteed to acquire the
lock in strict first-come-first-served order.
An example of when this is useful is if you're implementing something like
:class:`trio.ssl.SSLStream` or an HTTP/2 server using `h2
<https://hyper-h2.readthedocs.io/>`__, where you have multiple concurrent
tasks that are interacting with a shared state machine, and at
unpredictable moments the state machine requests that a chunk of data be
sent over the network. (For example, when using h2 simply reading incoming
data can occasionally `create outgoing data to send
<https://http2.github.io/http2-spec/#PING>`__.) The challenge is to make
sure that these chunks are sent in the correct order, without being
garbled.
One option would be to use a regular :class:`Lock`, and wrap it around
every interaction with the state machine::
# This approach is sometimes workable but often sub-optimal; see below
async with lock:
state_machine.do_something()
if state_machine.has_data_to_send():
await conn.sendall(state_machine.get_data_to_send())
But this can be problematic. If you're using h2 then *usually* reading
incoming data doesn't create the need to send any data, so we don't want
to force every task that tries to read from the network to sit and wait
a potentially long time for ``sendall`` to finish. And in some situations
this could even potentially cause a deadlock, if the remote peer is
waiting for you to read some data before it accepts the data you're
sending.
:class:`StrictFIFOLock` provides an alternative. We can rewrite our
example like::
# Note: no awaits between when we start using the state machine and
# when we block to take the lock!
state_machine.do_something()
if state_machine.has_data_to_send():
# Notice that we fetch the data to send out of the state machine
# *before* sleeping, so that other tasks won't see it.
chunk = state_machine.get_data_to_send()
async with strict_fifo_lock:
await conn.sendall(chunk)
First we do all our interaction with the state machine in a single
scheduling quantum (notice there are no ``await``\s in there), so it's
automatically atomic with respect to other tasks. And then if and only if
we have data to send, we get in line to send it – and
:class:`StrictFIFOLock` guarantees that each task will send its data in
the same order that the state machine generated it.
Currently, :class:`StrictFIFOLock` is simply an alias for :class:`Lock`,
but (a) this may not always be true in the future, especially if trio ever
implements `more sophisticated scheduling policies
<https://github.com/python-trio/trio/issues/32>`__, and (b) the above code
is relying on a pretty subtle property of its lock. Using a
:class:`StrictFIFOLock` acts as an executable reminder that you're relying
on this property.
"""


@attr.s(frozen=True)
class _ConditionStatistics:
tasks_waiting = attr.ib()
Expand All @@ -350,7 +415,7 @@ class Condition:
def __init__(self, lock=None):
if lock is None:
lock = Lock()
if not isinstance(lock, Lock):
if not type(lock) is Lock:
raise TypeError("lock must be a trio.Lock")
self._lock = lock
self._lot = _core.ParkingLot()
Expand Down
14 changes: 11 additions & 3 deletions trio/tests/test_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,14 @@ async def test_Semaphore_bounded():
assert bs.value == 1


async def test_Lock():
l = Lock()
@pytest.mark.parametrize(
"lockcls", [Lock, StrictFIFOLock], ids=lambda fn: fn.__name__)
async def test_Lock_and_StrictFIFOLock(lockcls):
l = lockcls()
assert not l.locked()
repr(l) # smoke test
# make sure repr uses the right name for subclasses
assert lockcls.__name__ in repr(l)
with assert_yields():
async with l:
assert l.locked()
Expand Down Expand Up @@ -161,6 +165,8 @@ async def holder():
async def test_Condition():
with pytest.raises(TypeError):
Condition(Semaphore(1))
with pytest.raises(TypeError):
Condition(StrictFIFOLock)
l = Lock()
c = Condition(l)
assert not l.locked()
Expand Down Expand Up @@ -436,13 +442,15 @@ def release(self):
lock_factories = [
lambda: Semaphore(1),
Lock,
StrictFIFOLock,
lambda: QueueLock1(10),
lambda: QueueLock1(1),
QueueLock2,
]
lock_factory_names = [
"Semaphore(1)",
"Lock",
"StrictFIFOLock",
"QueueLock1(10)",
"QueueLock1(1)",
"QueueLock2",
Expand Down Expand Up @@ -483,7 +491,7 @@ async def worker(lock_like):
# Several workers queue on the same lock; make sure they each get it, in
# order.
@generic_lock_test
async def test_generic_lock_fairness(lock_factory):
async def test_generic_lock_fifo_fairness(lock_factory):
initial_order = []
record = []
LOOPS = 5
Expand Down

0 comments on commit f08220b

Please sign in to comment.