Skip to content

Commit

Permalink
fix: respect service's suggested retryAfter when throttled (#39)
Browse files Browse the repository at this point in the history
When calling a deadline cloud service API and getting a throttle/retry
response the exception object may contain a "retryAfterSeconds" field
alongside the error. When that field is present, the calling client
should treat that as a request to retry in no sooner than the given number
of seconds; it is a load-shedding mechanism for the service. We should
respect the service's request.

Solution:
 Added to the logic of all of the deadline-cloud API wrappers to have
them extract the value of the "retryAfterSeconds" field if it's present,
and pass that to our backoff-delay calculator. We use the value as a
lower limit on the returned delay.
 I also made the scheduler use the API wrapper for update_worker; it
still had its own implementation that didn't properly handle exceptions.
This necessitated adding the ability to interrupt the update_worker's
throttled-retries so preserve the functionality at that call site.

Signed-off-by: Daniel Neilson <53624638+ddneilson@users.noreply.github.com>
Signed-off-by: Graeme McHale <gmchale@amazon.com>
  • Loading branch information
ddneilson authored and gmchale79 committed Mar 11, 2024
1 parent 48ed733 commit 1294cb4
Show file tree
Hide file tree
Showing 13 changed files with 387 additions and 162 deletions.
72 changes: 59 additions & 13 deletions src/deadline_worker_agent/aws/deadline/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,16 @@
from typing import Any, Optional
from threading import Event
from dataclasses import dataclass
import random

from botocore.retries.standard import RetryContext
from botocore.exceptions import ClientError

from deadline.client.api import get_telemetry_client, TelemetryClient

from ..._version import __version__ as version # noqa
from ...startup.config import Configuration, Capabilities
from ...startup.config import Configuration
from ...startup.capabilities import Capabilities
from ...boto import DeadlineClient, NoOverflowExponentialBackoff as Backoff
from ...api_models import (
AssumeFleetRoleForWorkerResponse,
Expand Down Expand Up @@ -120,6 +122,18 @@ def _get_error_reason_from_header(response: dict[str, Any]) -> Optional[str]:
return response.get("reason", None)


def _get_retry_after_seconds_from_header(response: dict[str, Any]) -> Optional[int]:
return response.get("retryAfterSeconds", None)


def _apply_lower_bound_to_delay(delay: float, lower_bound: Optional[float] = None) -> float:
if lower_bound is not None and delay < lower_bound:
# We add just a tiny bit of jitter (20%) to the lower bound to reduce the probability
# of a group of workers all retry-storming in lock-step.
delay = lower_bound + random.uniform(0, 0.2 * lower_bound)
return delay


def _get_resource_id_and_status_from_conflictexception_header(
response: dict[str, Any]
) -> tuple[Optional[str], Optional[str]]:
Expand Down Expand Up @@ -155,7 +169,10 @@ def assume_fleet_role_for_worker(
# Retry:
# ThrottlingException, InternalServerException
delay = backoff.delay_amount(RetryContext(retry))
code = e.response.get("Error", {}).get("Code", None)
delay = _apply_lower_bound_to_delay(
delay, _get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(
f"Throttled while attempting to refresh Worker AWS Credentials. Retrying in {delay} seconds..."
Expand Down Expand Up @@ -216,12 +233,11 @@ def assume_queue_role_for_worker(
retry = 0
query_start_time = monotonic()

_logger.info("")
# Note: Frozen credentials could expire while doing a retry loop; that's
# probably going to manifest as AccessDenied, but I'm not 100% certain.
while True:
if interrupt_event and interrupt_event.is_set():
raise DeadlineRequestInterrupted("GetQueueIamCredentials interrupted")
raise DeadlineRequestInterrupted("AssumeQueueRoleForWorker interrupted")
try:
response = deadline_client.assume_queue_role_for_worker(
farmId=farm_id, fleetId=fleet_id, workerId=worker_id, queueId=queue_id
Expand All @@ -235,7 +251,10 @@ def assume_queue_role_for_worker(
# Retry:
# ThrottlingException, InternalServerException
delay = backoff.delay_amount(RetryContext(retry))
code = e.response.get("Error", {}).get("Code", None)
delay = _apply_lower_bound_to_delay(
delay, _get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(
f"Throttled while attempting to refresh Worker AWS Credentials. Retrying in {delay} seconds..."
Expand Down Expand Up @@ -333,7 +352,10 @@ def batch_get_job_entity(
# Retry:
# ThrottlingException, InternalServerException
delay = backoff.delay_amount(RetryContext(retry))
code = e.response.get("Error", {}).get("Code", None)
delay = _apply_lower_bound_to_delay(
delay, _get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(f"Throttled calling BatchGetJobEntity. Retrying in {delay} seconds...")
elif code == "InternalServerException":
Expand Down Expand Up @@ -377,6 +399,9 @@ def create_worker(
break
except ClientError as e:
delay = backoff.delay_amount(RetryContext(retry))
delay = _apply_lower_bound_to_delay(
delay, _get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(f"CreateWorker throttled. Retrying in {delay} seconds...")
Expand Down Expand Up @@ -444,6 +469,9 @@ def delete_worker(
break
except ClientError as e:
delay = backoff.delay_amount(RetryContext(retry))
delay = _apply_lower_bound_to_delay(
delay, _get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(f"DeleteWorker throttled. Retrying in {delay} seconds...")
Expand Down Expand Up @@ -487,16 +515,20 @@ def delete_worker(
def update_worker(
*,
deadline_client: DeadlineClient,
config: Configuration,
farm_id: str,
fleet_id: str,
worker_id: str,
status: WorkerStatus,
capabilities: Optional[Capabilities] = None,
host_properties: Optional[HostProperties] = None,
interrupt_event: Optional[Event] = None,
) -> UpdateWorkerResponse:
"""Calls the UpdateWorker API to update this Worker's status, capabilities, and/or host properties with the service.
Raises:
DeadlineRequestConditionallyRecoverableError
DeadlineRequestUnrecoverableError
DeadlineRequestInterrupted
"""

# Retry API call when being throttled
Expand All @@ -506,26 +538,32 @@ def update_worker(
_logger.info(f"Invoking UpdateWorker to set {worker_id} to status={status.value}.")

request: dict[str, Any] = dict(
farmId=config.farm_id,
fleetId=config.fleet_id,
farmId=farm_id,
fleetId=fleet_id,
workerId=worker_id,
capabilities=config.capabilities.for_update_worker(),
status=status.value,
)
if capabilities:
request["capabilities"] = capabilities.for_update_worker()
if host_properties:
request["hostProperties"] = host_properties

_logger.debug("UpdateWorker request: %s", request)
while True:
# If true, then we're trying to go to STARTED but have determined that we must first
# go to STOPPED
must_stop_first = False

_logger.debug("UpdateWorker request: %s", request)
if interrupt_event and interrupt_event.is_set():
raise DeadlineRequestInterrupted("UpdateWorker interrupted")
try:
response = deadline_client.update_worker(**request)
break
except ClientError as e:
delay = backoff.delay_amount(RetryContext(retry))
delay = _apply_lower_bound_to_delay(
delay, _get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)

skip_sleep = False
Expand Down Expand Up @@ -578,7 +616,10 @@ def update_worker(
raise DeadlineRequestUnrecoverableError(e)

if not skip_sleep:
sleep(delay)
if interrupt_event:
interrupt_event.wait(delay)
else:
sleep(delay)
retry += 1
except Exception as e:
_logger.error("Failed to start worker %s", worker_id)
Expand All @@ -589,9 +630,11 @@ def update_worker(
try:
update_worker(
deadline_client=deadline_client,
config=config,
farm_id=farm_id,
fleet_id=fleet_id,
worker_id=worker_id,
status=WorkerStatus.STOPPED,
capabilities=capabilities,
host_properties=host_properties,
)
except Exception:
Expand Down Expand Up @@ -695,6 +738,9 @@ def update_worker_schedule(
break
except ClientError as e:
delay = backoff.delay_amount(RetryContext(retry))
delay = _apply_lower_bound_to_delay(
delay, _get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)

if code == "ThrottlingException":
Expand Down
68 changes: 30 additions & 38 deletions src/deadline_worker_agent/scheduler/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from pathlib import Path
from threading import Event, RLock, Lock
from time import sleep, monotonic
from threading import Event, RLock, Lock, Timer
from typing import Callable, Tuple, Union, cast, Optional, Any
import logging
import os
Expand All @@ -23,9 +22,9 @@
from openjd.sessions import LOG as OPENJD_SESSION_LOG
from openjd.sessions import ActionState, ActionStatus
from deadline.job_attachments.asset_sync import AssetSync
from botocore.exceptions import ClientError


from ..aws.deadline import update_worker
from ..aws_credentials import QueueBoto3Session, AwsCredentialsRefresher
from ..boto import DeadlineClient, Session as BotoSession
from ..errors import ServiceShutdown
Expand All @@ -40,8 +39,10 @@
AssignedSession,
UpdateWorkerScheduleResponse,
UpdatedSessionActionInfo,
WorkerStatus,
)
from ..aws.deadline import (
DeadlineRequestConditionallyRecoverableError,
DeadlineRequestError,
DeadlineRequestInterrupted,
DeadlineRequestWorkerOfflineError,
Expand Down Expand Up @@ -415,45 +416,36 @@ def _transition_to_stopping(self, timeout: timedelta) -> None:
initiated a worker-initiated drain operation, and that it must not be
given additional new tasks to work on.
"""
request = dict[str, Any](
farmId=self._farm_id,
fleetId=self._fleet_id,
workerId=self._worker_id,
status="STOPPING",
)

start_time = monotonic()
curr_time = start_time
next_backoff = timedelta(microseconds=200 * 1000)

# We're only being given timeout seconds to successfully make this request.
# That is because the drain operation may be expedited, and we need to move
# fast to get to transitioning to STOPPED state after this.
while (curr_time - start_time) < timeout.total_seconds():
try:
self._deadline.update_worker(**request)
logger.info("Successfully set Worker state to STOPPING.")
break
except ClientError as e:
code = e.response.get("Error", {}).get("Code", None)
if code == "ThrottlingException" or code == "InternalServerException":
# backoff
curr_time = monotonic()
elapsed_time = curr_time - start_time
max_backoff = max(
timedelta(seconds=0),
timedelta(seconds=(timeout.total_seconds() - elapsed_time)),
)
backoff = min(max_backoff, next_backoff)
next_backoff = next_backoff * 2
if backoff <= timedelta(seconds=0):
logger.info("Failed to set Worker state to STOPPING: timeout")
break
sleep(backoff.total_seconds())
else:
logger.info("Failed to set Worker state to STOPPING.")
logger.exception(e)
break
timeout_event = Event()
timer = Timer(interval=timeout.total_seconds(), function=timeout_event.set)

try:
update_worker(
deadline_client=self._deadline,
farm_id=self._farm_id,
fleet_id=self._fleet_id,
worker_id=self._worker_id,
status=WorkerStatus.STOPPING,
interrupt_event=timeout_event,
)
logger.info("Successfully set Worker state to STOPPING.")
except DeadlineRequestInterrupted:
logger.info(
"Timeout reached trying to update Worker to STOPPING status. Proceeding without changing status..."
)
except (
DeadlineRequestUnrecoverableError,
DeadlineRequestConditionallyRecoverableError,
) as exc:
logger.warning(
f"Exception updating Worker to STOPPING status. Continuing with drain operation regardless. Exception: {str(exc)}"
)
finally:
timer.cancel()

def _updated_session_actions(
self,
Expand Down
7 changes: 5 additions & 2 deletions src/deadline_worker_agent/startup/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,9 +327,11 @@ def _start_worker(
try:
response = update_worker(
deadline_client=deadline_client,
config=config,
farm_id=config.farm_id,
fleet_id=config.fleet_id,
worker_id=worker_id,
status=WorkerStatus.STARTED,
capabilities=config.capabilities,
host_properties=host_properties,
)
except DeadlineRequestUnrecoverableError:
Expand Down Expand Up @@ -371,7 +373,8 @@ def _enforce_no_instance_profile_or_stop_worker(
try:
update_worker(
deadline_client=deadline_client,
config=config,
farm_id=config.farm_id,
fleet_id=config.fleet_id,
worker_id=worker_id,
status=WorkerStatus.STOPPED,
)
Expand Down
3 changes: 2 additions & 1 deletion src/deadline_worker_agent/startup/entrypoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,8 @@ def filter(self, record: logging.LogRecord) -> bool:
try:
update_worker(
deadline_client=deadline_client,
config=config,
farm_id=config.farm_id,
fleet_id=config.fleet_id,
worker_id=worker_id,
status=WorkerStatus.STOPPED,
)
Expand Down
36 changes: 36 additions & 0 deletions test/unit/aws/deadline/test_assume_fleet_role_for_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,42 @@ def test_retries_when_throttled(
sleep_mock.assert_called_once()


@pytest.mark.parametrize("exception_code", ["ThrottlingException", "InternalServerException"])
def test_respects_retryafter_when_throttled(
client: MagicMock,
farm_id: str,
fleet_id: str,
worker_id: str,
mock_assume_fleet_role_for_worker_response: AssumeFleetRoleForWorkerResponse,
exception_code: str,
sleep_mock: MagicMock,
):
# A test that the time delay for a throttled retry respects the value in the 'retryAfterSeconds'
# property of an exception if one is present.

# GIVEN
min_retry = 30
exc = ClientError(
{"Error": {"Code": exception_code, "Message": "A message"}, "retryAfterSeconds": min_retry},
"AssumeFleetRoleForWorker",
)
client.assume_fleet_role_for_worker.side_effect = [
exc,
mock_assume_fleet_role_for_worker_response,
]

# WHEN
response = assume_fleet_role_for_worker(
deadline_client=client, farm_id=farm_id, fleet_id=fleet_id, worker_id=worker_id
)

# THEN
assert response == mock_assume_fleet_role_for_worker_response
assert client.assume_fleet_role_for_worker.call_count == 2
sleep_mock.assert_called_once()
assert min_retry <= sleep_mock.call_args.args[0] <= (min_retry + 0.2 * min_retry)


@pytest.mark.parametrize(
"exception_code",
[
Expand Down
Loading

0 comments on commit 1294cb4

Please sign in to comment.