-
-
Notifications
You must be signed in to change notification settings - Fork 718
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Do not filter tasks before gathering data #6371
Changes from all commits
fa157c9
913e5e9
3193a8a
7021526
6f9caed
027cdca
216cf2c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3150,61 +3150,32 @@ def total_comm_bytes(self): | |
) | ||
return self.comm_threshold_bytes | ||
|
||
def _filter_deps_for_fetch( | ||
self, to_gather_keys: Iterable[str] | ||
) -> tuple[set[str], set[str], TaskState | None]: | ||
"""Filter a list of keys before scheduling coroutines to fetch data from workers. | ||
def _get_cause(self, keys: Iterable[str]) -> TaskState: | ||
"""For diagnostics, we want to attach a transfer to a single task. This task is | ||
typically the next to be executed but since we're fetching tasks for potentially | ||
many dependents, an exact match is not possible. Additionally, if a key was | ||
fetched through acquire-replicas, dependents may not be known at all. | ||
|
||
Returns | ||
------- | ||
in_flight_keys: | ||
The subset of keys in to_gather_keys in state `flight` or `resumed` | ||
cancelled_keys: | ||
The subset of tasks in to_gather_keys in state `cancelled` or `memory` | ||
cause: | ||
The task to attach startstops of this transfer to | ||
The task to attach startstops of this transfer to | ||
""" | ||
in_flight_tasks: set[TaskState] = set() | ||
cancelled_keys: set[str] = set() | ||
for key in to_gather_keys: | ||
ts = self.tasks.get(key) | ||
if ts is None: | ||
continue | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should never happen. The There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, we worked very hard to ensure tasks are not accidentally forgotten. I encourage being as strict as possible with this. A KeyError is often a sign of a messed up transition somewhere else |
||
|
||
# At this point, a task has been transitioned fetch->flight | ||
# flight is only allowed to be transitioned into | ||
# {memory, resumed, cancelled} | ||
# resumed and cancelled will block any further transition until this | ||
# coro has been finished | ||
|
||
if ts.state in ("flight", "resumed"): | ||
in_flight_tasks.add(ts) | ||
# If the key is already in memory, the fetch should not happen which | ||
# is signalled by the cancelled_keys | ||
elif ts.state in {"cancelled", "memory"}: | ||
cancelled_keys.add(key) | ||
else: | ||
raise RuntimeError( | ||
f"Task {ts.key} found in illegal state {ts.state}. " | ||
"Only states `flight`, `resumed` and `cancelled` possible." | ||
) | ||
|
||
# For diagnostics we want to attach the transfer to a single task. this | ||
# task is typically the next to be executed but since we're fetching | ||
# tasks for potentially many dependents, an exact match is not possible. | ||
# If there are no dependents, this is a pure replica fetch | ||
cause = None | ||
for ts in in_flight_tasks: | ||
for key in keys: | ||
ts = self.tasks[key] | ||
if ts.dependents: | ||
cause = next(iter(ts.dependents)) | ||
break | ||
else: | ||
cause = ts | ||
in_flight_keys = {ts.key for ts in in_flight_tasks} | ||
return in_flight_keys, cancelled_keys, cause | ||
return next(iter(ts.dependents)) | ||
cause = ts | ||
assert cause # Always at least one key | ||
return cause | ||
|
||
def _update_metrics_received_data( | ||
self, start: float, stop: float, data: dict, cause: TaskState, worker: str | ||
self, | ||
start: float, | ||
stop: float, | ||
data: dict[str, Any], | ||
cause: TaskState, | ||
worker: str, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. off topic: post refactor, should this method move to the state machine class, or stay in Worker proper? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's network and diagnostics related so I'm inclined to say this does not belong to the state machine class |
||
) -> None: | ||
|
||
total_bytes = sum(self.tasks[key].get_nbytes() for key in data) | ||
|
@@ -3253,7 +3224,7 @@ def _update_metrics_received_data( | |
async def gather_dep( | ||
self, | ||
worker: str, | ||
to_gather: Iterable[str], | ||
to_gather: Collection[str], | ||
total_nbytes: int, | ||
*, | ||
stimulus_id: str, | ||
|
@@ -3277,46 +3248,23 @@ async def gather_dep( | |
recommendations: Recs = {} | ||
instructions: Instructions = [] | ||
response = {} | ||
to_gather_keys: set[str] = set() | ||
cancelled_keys: set[str] = set() | ||
|
||
def done_event(): | ||
return GatherDepDoneEvent(stimulus_id=f"gather-dep-done-{time()}") | ||
|
||
try: | ||
to_gather_keys, cancelled_keys, cause = self._filter_deps_for_fetch( | ||
to_gather | ||
) | ||
|
||
if not to_gather_keys: | ||
self.log.append( | ||
("nothing-to-gather", worker, to_gather, stimulus_id, time()) | ||
) | ||
return done_event() | ||
|
||
assert cause | ||
# Keep namespace clean since this func is long and has many | ||
# dep*, *ts* variables | ||
del to_gather | ||
|
||
self.log.append( | ||
("request-dep", worker, to_gather_keys, stimulus_id, time()) | ||
) | ||
logger.debug( | ||
"Request %d keys for task %s from %s", | ||
len(to_gather_keys), | ||
cause, | ||
worker, | ||
) | ||
self.log.append(("request-dep", worker, to_gather, stimulus_id, time())) | ||
logger.debug("Request %d keys from %s", len(to_gather), worker) | ||
|
||
start = time() | ||
response = await get_data_from_worker( | ||
self.rpc, to_gather_keys, worker, who=self.address | ||
self.rpc, to_gather, worker, who=self.address | ||
) | ||
stop = time() | ||
if response["status"] == "busy": | ||
return done_event() | ||
|
||
cause = self._get_cause(to_gather) | ||
self._update_metrics_received_data( | ||
start=start, | ||
stop=stop, | ||
|
@@ -3369,9 +3317,7 @@ def done_event(): | |
data = response.get("data", {}) | ||
|
||
if busy: | ||
self.log.append( | ||
("busy-gather", worker, to_gather_keys, stimulus_id, time()) | ||
) | ||
self.log.append(("busy-gather", worker, to_gather, stimulus_id, time())) | ||
# Avoid hammering the worker. If there are multiple replicas | ||
# available, immediately try fetching from a different worker. | ||
self.busy_workers.add(worker) | ||
|
@@ -3382,12 +3328,7 @@ def done_event(): | |
for d in self.in_flight_workers.pop(worker): | ||
ts = self.tasks[d] | ||
ts.done = True | ||
if d in cancelled_keys: | ||
if ts.state == "cancelled": | ||
recommendations[ts] = "released" | ||
else: | ||
recommendations[ts] = "fetch" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ts.state == "memory" |
||
elif d in data: | ||
if d in data: | ||
recommendations[ts] = ("memory", data[d]) | ||
elif busy: | ||
recommendations[ts] = "fetch" | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
if x is already in flight, how could the data end up in memory without us doing it explicitly like in this unit test?
acquire_replica and "fetch_dependency" should not fetch this key a second time.
From me reading the code, the only way this could happen is via
Client.scatter
. I would argue a user should not be allowed to scatter a key that is already known to the cluster to be computed.I don't want to block this PR for this test but if the above outline is how we end up in this situation, I think we should prohibit scattering such keys and shrink the space of possible/allowed transitions.
Specifically, I'm inclined to say
a.update({"x": 3})
should raise an exception if x is in flight.Thoughts?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
would might translate to something like
(Where the exception is supposed to be raised is not the point of my argument. It may not be feasible to raise in the transition itself, idk)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The more I think about this the stronger I feel about it because these kind of race conditions are part of why I introduced cancelled/resumed to avoid us needing to deal with these transitions.
If the fetch task would finish successfully, this would cause a memory->memory transition. Since this is not allowed/possible this would cause a
memory->released
(possibly the released transition would cancel some follow up tasks)
released->memory
or as a concrete story
writing down the expected story made me realize that our transition flow should heal us here but we'd be performing a lot of unnecessary transitions that could expose us to problems.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think that 80% of the problem is caused by the non-sequentiality of RPC calls vs. bulk comms.
distributed/distributed/scheduler.py
Lines 5018 to 5022 in fb3589c
The only way to avoid this would be to fundamentally rewrite the scatter implementation. Which, for the record, I think is long overdue.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'll explain the above in a comment in the test
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
My point is only partially about technical correctness of race conditions but also about whether this is even a sane operation. How can a user know the value of
x
ifx
is supposed to be computed on the cluster?