Skip to content

Commit

Permalink
fix: respect service's suggested retryAfter when throttled
Browse files Browse the repository at this point in the history
When calling a deadline cloud service API and getting a throttle/retry
response the exception object may contain a "retryAfterSeconds" field
alongside the error. When that field is present, the calling client
should treat that as a request to retry in no sooner than the given number
of seconds; it is a load-shedding mechanism for the service. We should
respect the service's request.

Solution:
 Added to the logic of all of the deadline-cloud API wrappers to have
them extract the value of the "retryAfterSeconds" field if it's present,
and pass that to our backoff-delay calculator. We use the value as a
lower limit on the returned delay.
 I also made the scheduler use the API wrapper for update_worker; it
still had its own implementation that didn't properly handle exceptions.
This necessitated adding the ability to interrupt the update_worker's
throttled-retries so preserve the functionality at that call site.

Signed-off-by: Daniel Neilson <53624638+ddneilson@users.noreply.github.com>
  • Loading branch information
ddneilson committed Sep 27, 2023
1 parent 2b89ad7 commit e4a8739
Show file tree
Hide file tree
Showing 14 changed files with 396 additions and 181 deletions.
68 changes: 49 additions & 19 deletions src/deadline_worker_agent/aws/deadline/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from botocore.exceptions import ClientError

from ...startup.config import Configuration
from ...startup.capabilities import Capabilities
from ...boto import DeadlineClient, NoOverflowExponentialBackoff as Backoff
from ...api_models import (
AssumeFleetRoleForWorkerResponse,
Expand Down Expand Up @@ -117,6 +118,10 @@ def _get_error_reason_from_header(response: dict[str, Any]) -> Optional[str]:
return response.get("reason", None)


def _get_retry_after_seconds_from_header(response: dict[str, Any]) -> Optional[int]:
return response.get("retryAfterSeconds", None)


def _get_resource_id_and_status_from_conflictexception_header(
response: dict[str, Any]
) -> tuple[Optional[str], Optional[str]]:
Expand Down Expand Up @@ -151,8 +156,10 @@ def assume_fleet_role_for_worker(
# * ResourceNotFoundException -> Go back to startup, if able.
# Retry:
# ThrottlingException, InternalServerException
delay = backoff.delay_amount(RetryContext(retry))
code = e.response.get("Error", {}).get("Code", None)
delay = backoff.delay_amount(
RetryContext(retry), min_backoff=_get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(
f"Throttled while attempting to refresh Worker AWS Credentials. Retrying in {delay} seconds..."
Expand Down Expand Up @@ -213,12 +220,11 @@ def assume_queue_role_for_worker(
retry = 0
query_start_time = monotonic()

_logger.info("")
# Note: Frozen credentials could expire while doing a retry loop; that's
# probably going to manifest as AccessDenied, but I'm not 100% certain.
while True:
if interrupt_event and interrupt_event.is_set():
raise DeadlineRequestInterrupted("GetQueueIamCredentials interrupted")
raise DeadlineRequestInterrupted("AssumeQueueRoleForWorker interrupted")
try:
response = deadline_client.assume_queue_role_for_worker(
farmId=farm_id, fleetId=fleet_id, workerId=worker_id, queueId=queue_id
Expand All @@ -231,8 +237,10 @@ def assume_queue_role_for_worker(
# * ResourceNotFoundException -> Go back to startup, if able.
# Retry:
# ThrottlingException, InternalServerException
delay = backoff.delay_amount(RetryContext(retry))
code = e.response.get("Error", {}).get("Code", None)
delay = backoff.delay_amount(
RetryContext(retry), min_backoff=_get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(
f"Throttled while attempting to refresh Worker AWS Credentials. Retrying in {delay} seconds..."
Expand Down Expand Up @@ -329,8 +337,10 @@ def batch_get_job_entity(
# AccessDeniedException, ValidationException, ResourceNotFoundException
# Retry:
# ThrottlingException, InternalServerException
delay = backoff.delay_amount(RetryContext(retry))
code = e.response.get("Error", {}).get("Code", None)
delay = backoff.delay_amount(
RetryContext(retry), min_backoff=_get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(f"Throttled calling BatchGetJobEntity. Retrying in {delay} seconds...")
elif code == "InternalServerException":
Expand Down Expand Up @@ -373,7 +383,9 @@ def create_worker(
)
break
except ClientError as e:
delay = backoff.delay_amount(RetryContext(retry))
delay = backoff.delay_amount(
RetryContext(retry), min_backoff=_get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(f"CreateWorker throttled. Retrying in {delay} seconds...")
Expand Down Expand Up @@ -440,7 +452,9 @@ def delete_worker(
)
break
except ClientError as e:
delay = backoff.delay_amount(RetryContext(retry))
delay = backoff.delay_amount(
RetryContext(retry), min_backoff=_get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)
if code == "ThrottlingException":
_logger.info(f"DeleteWorker throttled. Retrying in {delay} seconds...")
Expand Down Expand Up @@ -484,16 +498,20 @@ def delete_worker(
def update_worker(
*,
deadline_client: DeadlineClient,
config: Configuration,
farm_id: str,
fleet_id: str,
worker_id: str,
status: WorkerStatus,
capabilities: Optional[Capabilities] = None,
host_properties: Optional[HostProperties] = None,
interrupt_event: Optional[Event] = None,
) -> UpdateWorkerResponse:
"""Calls the UpdateWorker API to update this Worker's status, capabilities, and/or host properties with the service.
Raises:
DeadlineRequestConditionallyRecoverableError
DeadlineRequestUnrecoverableError
DeadlineRequestInterrupted
"""

# Retry API call when being throttled
Expand All @@ -503,26 +521,31 @@ def update_worker(
_logger.info(f"Invoking UpdateWorker to set {worker_id} to status={status.value}.")

request: dict[str, Any] = dict(
farmId=config.farm_id,
fleetId=config.fleet_id,
farmId=farm_id,
fleetId=fleet_id,
workerId=worker_id,
capabilities=config.capabilities.for_update_worker(),
status=status.value,
)
if capabilities:
request["capabilities"] = capabilities.for_update_worker()
if host_properties:
request["hostProperties"] = host_properties

_logger.debug("UpdateWorker request: %s", request)
while True:
# If true, then we're trying to go to STARTED but have determined that we must first
# go to STOPPED
must_stop_first = False

_logger.debug("UpdateWorker request: %s", request)
if interrupt_event and interrupt_event.is_set():
raise DeadlineRequestInterrupted("UpdateWorker interrupted")
try:
response = deadline_client.update_worker(**request)
break
except ClientError as e:
delay = backoff.delay_amount(RetryContext(retry))
delay = backoff.delay_amount(
RetryContext(retry), min_backoff=_get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)

skip_sleep = False
Expand Down Expand Up @@ -575,7 +598,10 @@ def update_worker(
raise DeadlineRequestUnrecoverableError(e)

if not skip_sleep:
sleep(delay)
if interrupt_event:
interrupt_event.wait(delay)
else:
sleep(delay)
retry += 1
except Exception as e:
_logger.error("Failed to start worker %s", worker_id)
Expand All @@ -586,9 +612,11 @@ def update_worker(
try:
update_worker(
deadline_client=deadline_client,
config=config,
farm_id=farm_id,
fleet_id=fleet_id,
worker_id=worker_id,
status=WorkerStatus.STOPPED,
capabilities=capabilities,
host_properties=host_properties,
)
except Exception:
Expand Down Expand Up @@ -691,7 +719,9 @@ def update_worker_schedule(
_logger.debug("UpdateWorkerSchedule response: %s", response)
break
except ClientError as e:
delay = backoff.delay_amount(RetryContext(retry))
delay = backoff.delay_amount(
RetryContext(retry), min_backoff=_get_retry_after_seconds_from_header(e.response)
)
code = _get_error_code_from_header(e.response)

if code == "ThrottlingException":
Expand Down
19 changes: 11 additions & 8 deletions src/deadline_worker_agent/boto/retries.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from __future__ import annotations
import math
import random
from typing import Callable
from typing import Callable, Optional, Union

from botocore.retries.standard import ExponentialBackoff

Expand Down Expand Up @@ -34,14 +34,17 @@ def __init__(
self._log_val = math.log(max_backoff, self._base)
self._random_between = random_between

def delay_amount(self, context):
def delay_amount(self, context, *, min_backoff: Optional[Union[int, float]] = None) -> float:
if (context.attempt_number - 1) <= (self._log_val * 2):
return super().delay_amount(context)

return self._random_between(
self._max_backoff * 0.8,
self._max_backoff,
)
delay = super().delay_amount(context)
else:
delay = self._random_between(
self._max_backoff * 0.8,
self._max_backoff,
)
if min_backoff is not None and delay < min_backoff:
return float(min_backoff)
return delay

def _unit_random(self) -> float:
"""Returns a random number between 0 and 1 using the random_between function provided
Expand Down
68 changes: 30 additions & 38 deletions src/deadline_worker_agent/scheduler/scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from pathlib import Path
from threading import Event, RLock, Lock
from time import sleep, monotonic
from threading import Event, RLock, Lock, Timer
from typing import Callable, Tuple, Union, cast, Optional, Any
import logging
import os
Expand All @@ -23,9 +22,9 @@
from openjd.sessions import LOG as OPENJD_SESSION_LOG
from openjd.sessions import ActionState, ActionStatus
from deadline.job_attachments.asset_sync import AssetSync
from botocore.exceptions import ClientError


from ..aws.deadline import update_worker
from ..aws_credentials import QueueBoto3Session, AwsCredentialsRefresher
from ..boto import DeadlineClient, Session as BotoSession
from ..errors import ServiceShutdown
Expand All @@ -40,8 +39,10 @@
AssignedSession,
UpdateWorkerScheduleResponse,
UpdatedSessionActionInfo,
WorkerStatus,
)
from ..aws.deadline import (
DeadlineRequestConditionallyRecoverableError,
DeadlineRequestError,
DeadlineRequestInterrupted,
DeadlineRequestWorkerOfflineError,
Expand Down Expand Up @@ -415,45 +416,36 @@ def _transition_to_stopping(self, timeout: timedelta) -> None:
initiated a worker-initiated drain operation, and that it must not be
given additional new tasks to work on.
"""
request = dict[str, Any](
farmId=self._farm_id,
fleetId=self._fleet_id,
workerId=self._worker_id,
status="STOPPING",
)

start_time = monotonic()
curr_time = start_time
next_backoff = timedelta(microseconds=200 * 1000)

# We're only being given timeout seconds to successfully make this request.
# That is because the drain operation may be expedited, and we need to move
# fast to get to transitioning to STOPPED state after this.
while (curr_time - start_time) < timeout.total_seconds():
try:
self._deadline.update_worker(**request)
logger.info("Successfully set Worker state to STOPPING.")
break
except ClientError as e:
code = e.response.get("Error", {}).get("Code", None)
if code == "ThrottlingException" or code == "InternalServerException":
# backoff
curr_time = monotonic()
elapsed_time = curr_time - start_time
max_backoff = max(
timedelta(seconds=0),
timedelta(seconds=(timeout.total_seconds() - elapsed_time)),
)
backoff = min(max_backoff, next_backoff)
next_backoff = next_backoff * 2
if backoff <= timedelta(seconds=0):
logger.info("Failed to set Worker state to STOPPING: timeout")
break
sleep(backoff.total_seconds())
else:
logger.info("Failed to set Worker state to STOPPING.")
logger.exception(e)
break
timeout_event = Event()
timer = Timer(interval=timeout.total_seconds(), function=timeout_event.set)

try:
update_worker(
deadline_client=self._deadline,
farm_id=self._farm_id,
fleet_id=self._fleet_id,
worker_id=self._worker_id,
status=WorkerStatus.STOPPING,
interrupt_event=timeout_event,
)
logger.info("Successfully set Worker state to STOPPING.")
except DeadlineRequestInterrupted:
logger.info(
"Timeout reached trying to update Worker to STOPPING status. Proceeding without changing status..."
)
except (
DeadlineRequestUnrecoverableError,
DeadlineRequestConditionallyRecoverableError,
) as exc:
logger.warning(
f"Exception updating Worker to STOPPING status. Continuing with drain operation regardless. Exception: {str(exc)}"
)
finally:
timer.cancel()

def _updated_session_actions(
self,
Expand Down
7 changes: 5 additions & 2 deletions src/deadline_worker_agent/startup/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,9 +327,11 @@ def _start_worker(
try:
response = update_worker(
deadline_client=deadline_client,
config=config,
farm_id=config.farm_id,
fleet_id=config.fleet_id,
worker_id=worker_id,
status=WorkerStatus.STARTED,
capabilities=config.capabilities,
host_properties=host_properties,
)
except DeadlineRequestUnrecoverableError:
Expand Down Expand Up @@ -371,7 +373,8 @@ def _enforce_no_instance_profile_or_stop_worker(
try:
update_worker(
deadline_client=deadline_client,
config=config,
farm_id=config.farm_id,
fleet_id=config.fleet_id,
worker_id=worker_id,
status=WorkerStatus.STOPPED,
)
Expand Down
3 changes: 2 additions & 1 deletion src/deadline_worker_agent/startup/entrypoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,8 @@ def filter(self, record: logging.LogRecord) -> bool:
try:
update_worker(
deadline_client=deadline_client,
config=config,
farm_id=config.farm_id,
fleet_id=config.fleet_id,
worker_id=worker_id,
status=WorkerStatus.STOPPED,
)
Expand Down
Loading

0 comments on commit e4a8739

Please sign in to comment.