diff --git a/aws_advanced_python_wrapper/aurora_initial_connection_strategy_plugin.py b/aws_advanced_python_wrapper/aurora_initial_connection_strategy_plugin.py index 1cedf303..7e56daaf 100644 --- a/aws_advanced_python_wrapper/aurora_initial_connection_strategy_plugin.py +++ b/aws_advanced_python_wrapper/aurora_initial_connection_strategy_plugin.py @@ -86,7 +86,7 @@ def _get_verified_writer_connection(self, props: Properties, is_initial_connecti self._plugin_service.force_refresh_host_list(writer_candidate_conn) writer_candidate = self._plugin_service.identify_connection(writer_candidate_conn) - if writer_candidate is not None and writer_candidate.role != HostRole.WRITER: + if writer_candidate is None or writer_candidate.role != HostRole.WRITER: self._close_connection(writer_candidate_conn) self._delay(retry_delay_ms) continue @@ -133,6 +133,11 @@ def _get_verified_reader_connection(self, props: Properties, is_initial_connecti self._plugin_service.force_refresh_host_list(reader_candidate_conn) reader_candidate = self._plugin_service.identify_connection(reader_candidate_conn) + if reader_candidate is None: + self._close_connection(reader_candidate_conn) + self._delay(retry_delay_ms) + continue + if reader_candidate is not None and reader_candidate.role != HostRole.READER: if self._has_no_readers(): # Cluster has no readers. Simulate Aurora reader cluster endpoint logic and return the current writer connection. diff --git a/aws_advanced_python_wrapper/blue_green_plugin.py b/aws_advanced_python_wrapper/blue_green_plugin.py new file mode 100644 index 00000000..aeed76e5 --- /dev/null +++ b/aws_advanced_python_wrapper/blue_green_plugin.py @@ -0,0 +1,1926 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import socket +from datetime import datetime +from time import perf_counter_ns +from typing import TYPE_CHECKING, FrozenSet, List, cast + +from aws_advanced_python_wrapper.database_dialect import BlueGreenDialect +from aws_advanced_python_wrapper.host_list_provider import HostListProvider +from aws_advanced_python_wrapper.utils.value_container import ValueContainer + +if TYPE_CHECKING: + from aws_advanced_python_wrapper.pep249 import Connection + from aws_advanced_python_wrapper.driver_dialect import DriverDialect + from aws_advanced_python_wrapper.host_list_provider import HostListProviderService + from aws_advanced_python_wrapper.plugin_service import PluginService + +import time +from abc import ABC, abstractmethod +from copy import copy +from dataclasses import dataclass +from enum import Enum, auto +from threading import Condition, Event, RLock, Thread +from typing import Any, Callable, ClassVar, Dict, Optional, Set, Tuple + +from aws_advanced_python_wrapper.errors import (AwsWrapperError, + UnsupportedOperationError) +from aws_advanced_python_wrapper.host_availability import HostAvailability +from aws_advanced_python_wrapper.hostinfo import HostInfo, HostRole +from aws_advanced_python_wrapper.iam_plugin import IamAuthPlugin +from aws_advanced_python_wrapper.plugin import Plugin, PluginFactory +from aws_advanced_python_wrapper.utils.atomic import AtomicInt +from aws_advanced_python_wrapper.utils.concurrent import (ConcurrentDict, + ConcurrentSet) +from aws_advanced_python_wrapper.utils.log import Logger +from aws_advanced_python_wrapper.utils.messages import Messages +from aws_advanced_python_wrapper.utils.properties import (Properties, + WrapperProperties) +from aws_advanced_python_wrapper.utils.rdsutils import RdsUtils +from aws_advanced_python_wrapper.utils.telemetry.telemetry import \ + TelemetryTraceLevel + +logger = Logger(__name__) + + +class BlueGreenIntervalRate(Enum): + BASELINE = auto() + INCREASED = auto() + HIGH = auto() + + +class BlueGreenPhase(Enum): + NOT_CREATED = (0, False) + CREATED = (1, False) + PREPARATION = (2, True) # hosts are accessible + IN_PROGRESS = (3, True) # active phase; hosts are not accessible + POST = (4, True) # hosts are accessible; some changes are still in progress + COMPLETED = (5, True) # all changes are completed + + def __new__(cls, value: int, is_switchover_active_or_completed: bool): + obj = object.__new__(cls) + obj._value_ = (value, is_switchover_active_or_completed) + return obj + + @property + def phase_value(self) -> int: + return self.value[0] + + @property + def is_switchover_active_or_completed(self) -> bool: + return self.value[1] + + @staticmethod + def parse_phase(phase_str: Optional[str]) -> BlueGreenPhase: + if not phase_str: + return BlueGreenPhase.NOT_CREATED + + phase_upper = phase_str.upper() + if phase_upper == "AVAILABLE": + return BlueGreenPhase.CREATED + elif phase_upper == "SWITCHOVER_INITIATED": + return BlueGreenPhase.PREPARATION + elif phase_upper == "SWITCHOVER_IN_PROGRESS": + return BlueGreenPhase.IN_PROGRESS + elif phase_upper == "SWITCHOVER_IN_POST_PROCESSING": + return BlueGreenPhase.POST + elif phase_upper == "SWITCHOVER_COMPLETED": + return BlueGreenPhase.COMPLETED + else: + raise ValueError(Messages.get_formatted("BlueGreenPhase.UnknownStatus", phase_str)) + + +class BlueGreenRole(Enum): + SOURCE = 0 + TARGET = 1 + + @staticmethod + def parse_role(role_str: str, version: str) -> BlueGreenRole: + if "1.0" != version: + raise ValueError(Messages.get_formatted("BlueGreenRole.UnknownVersion", version)) + + if role_str == "BLUE_GREEN_DEPLOYMENT_SOURCE": + return BlueGreenRole.SOURCE + elif role_str == "BLUE_GREEN_DEPLOYMENT_TARGET": + return BlueGreenRole.TARGET + else: + raise ValueError(Messages.get_formatted("BlueGreenRole.UnknownRole", role_str)) + + +class BlueGreenStatus: + def __init__( + self, + bg_id: str, + phase: BlueGreenPhase, + connect_routings: Optional[List[ConnectRouting]] = None, + execute_routings: Optional[List[ExecuteRouting]] = None, + role_by_host: Optional[ConcurrentDict[str, BlueGreenRole]] = None, + corresponding_hosts: Optional[ConcurrentDict[str, Tuple[HostInfo, Optional[HostInfo]]]] = None): + self.bg_id = bg_id + self.phase = phase + self.connect_routings = [] if connect_routings is None else list(connect_routings) + self.execute_routings = [] if execute_routings is None else list(execute_routings) + self.roles_by_endpoint: ConcurrentDict[str, BlueGreenRole] = ConcurrentDict() + if role_by_host is not None: + self.roles_by_endpoint.put_all(role_by_host) + + self.corresponding_hosts: ConcurrentDict[str, Tuple[HostInfo, Optional[HostInfo]]] = ConcurrentDict() + if corresponding_hosts is not None: + self.corresponding_hosts.put_all(corresponding_hosts) + + self.cv = Condition() + + def get_role(self, host_info: HostInfo) -> Optional[BlueGreenRole]: + return self.roles_by_endpoint.get(host_info.host.lower()) + + def __str__(self) -> str: + connect_routings_str = ',\n '.join(str(cr) for cr in self.connect_routings) + execute_routings_str = ',\n '.join(str(er) for er in self.execute_routings) + role_mappings = ',\n '.join(f"{endpoint}: {role}" for endpoint, role in self.roles_by_endpoint.items()) + + return (f"{self.__class__.__name__}(\n" + f" id='{self.bg_id}',\n" + f" phase={self.phase},\n" + f" connect_routings=[\n" + f" {connect_routings_str}\n" + f" ],\n" + f" execute_routings=[\n" + f" {execute_routings_str}\n" + f" ],\n" + f" role_by_endpoint={{\n" + f" {role_mappings}\n" + f" }}\n" + f")") + + +@dataclass +class BlueGreenInterimStatus: + phase: BlueGreenPhase + version: str + port: int + start_topology: Tuple[HostInfo, ...] + start_ip_addresses_by_host_map: ConcurrentDict[str, ValueContainer[str]] + current_topology: Tuple[HostInfo, ...] + current_ip_addresses_by_host_map: ConcurrentDict[str, ValueContainer[str]] + host_names: Set[str] + all_start_topology_ip_changed: bool + all_start_topology_endpoints_removed: bool + all_topology_changed: bool + + def get_custom_hashcode(self): + result: int = self.get_value_hash(1, "" if self.phase is None else str(self.phase)) + result = self.get_value_hash(result, str(self.version)) + result = self.get_value_hash(result, str(self.port)) + result = self.get_value_hash(result, str(self.all_start_topology_ip_changed)) + result = self.get_value_hash(result, str(self.all_start_topology_endpoints_removed)) + result = self.get_value_hash(result, str(self.all_topology_changed)) + result = self.get_value_hash(result, "" if self.host_names is None else ",".join(sorted(self.host_names))) + result = self.get_host_tuple_hash(result, self.start_topology) + result = self.get_host_tuple_hash(result, self.current_topology) + result = self.get_ip_dict_hash(result, self.start_ip_addresses_by_host_map) + result = self.get_ip_dict_hash(result, self.current_ip_addresses_by_host_map) + return result + + def get_host_tuple_hash(self, current_hash: int, host_tuple: Optional[Tuple[HostInfo, ...]]) -> int: + if host_tuple is None or len(host_tuple) == 0: + tuple_str = "" + else: + tuple_str = ",".join(sorted(f"{x.url}{x.role}" for x in host_tuple)) + + return self.get_value_hash(current_hash, tuple_str) + + def get_ip_dict_hash(self, current_hash: int, ip_dict: Optional[ConcurrentDict[str, ValueContainer[str]]]) -> int: + if ip_dict is None or len(ip_dict) == 0: + dict_str = "" + else: + dict_str = ",".join(sorted(f"{key}{str(value)}" for key, value in ip_dict.items())) + + return self.get_value_hash(current_hash, dict_str) + + def get_value_hash(self, current_hash: int, val: Optional[str]) -> int: + return current_hash * 31 + hash("" if val is None else val) + + def __str__(self): + host_names_str = ',\n '.join(self.host_names) + start_topology_str = ',\n '.join(str(h) for h in self.start_topology) + start_addresses_by_host_str = ',\n '.join( + f"{k}: {v}" for k, v in self.start_ip_addresses_by_host_map.items() + ) + current_topology_str = ',\n '.join(str(h) for h in self.current_topology) + current_addresses_by_host_str = ',\n '.join( + f"{k}: {v}" for k, v in self.current_ip_addresses_by_host_map.items() + ) + + return (f"{self.__class__.__name__}(\n" + f" phase={self.phase},\n" + f" version={self.version},\n" + f" port={self.port},\n" + f" host_names=[\n" + f" {host_names_str}\n" + f" ],\n" + f" start_topology=[\n" + f" {start_topology_str}\n" + f" ],\n" + f" start_ip_addresses_by_host_map={{\n" + f" {start_addresses_by_host_str}\n" + f" }}\n" + f" current_topology=[\n" + f" {current_topology_str}\n" + f" ],\n" + f" current_ip_addresses_by_host_map={{\n" + f" {current_addresses_by_host_str}\n" + f" }}\n" + f" all_start_topology_ip_changed={self.all_start_topology_ip_changed}\n" + f" all_start_topology_endpoints_removed={self.all_start_topology_endpoints_removed}\n" + f" all_topology_changed={self.all_topology_changed}\n" + f")") + + +class ConnectRouting(ABC): + @abstractmethod + def is_match(self, host_info: Optional[HostInfo], role: BlueGreenRole) -> bool: + ... + + @abstractmethod + def apply( + self, + plugin: Plugin, + host_info: HostInfo, + props: Properties, + is_initial_connection: bool, + connect_func: Callable, + plugin_service: PluginService) -> Optional[Connection]: + ... + + +class ExecuteRouting(ABC): + @abstractmethod + def is_match(self, host_info: Optional[HostInfo], role: BlueGreenRole) -> bool: + ... + + @abstractmethod + def apply( + self, + plugin: Plugin, + plugin_service: PluginService, + props: Properties, + target: type, + method_name: str, + execute_func: Callable, + *args: Any, + **kwargs: Any) -> ValueContainer[Any]: + ... + + +class BaseRouting: + _MIN_SLEEP_MS = 50 + + def __init__(self, endpoint: Optional[str], bg_role: Optional[BlueGreenRole]): + self._endpoint = endpoint # host and optionally port as well + self._bg_role = bg_role + + def delay(self, delay_ms: int, bg_status: Optional[BlueGreenStatus], plugin_service: PluginService, bg_id: str): + end_time_sec = time.time() + (delay_ms / 1_000) + min_delay_ms = min(delay_ms, BaseRouting._MIN_SLEEP_MS) + + if bg_status is None: + time.sleep(delay_ms / 1_000) + return + + while bg_status is plugin_service.get_status(BlueGreenStatus, bg_id) and time.time() <= end_time_sec: + with bg_status.cv: + bg_status.cv.wait(min_delay_ms / 1_000) + + def is_match(self, host_info: Optional[HostInfo], bg_role: BlueGreenRole) -> bool: + if self._endpoint is None: + return self._bg_role is None or self._bg_role == bg_role + + if host_info is None: + return False + + return self._endpoint == host_info.url.lower() and (self._bg_role is None or self._bg_role == bg_role) + + def __str__(self): + endpoint_str = "None" if self._endpoint is None else f"'{self._endpoint}'" + return f"{self.__class__.__name__}(endpoint={endpoint_str}, bg_role={self._bg_role})" + + +class PassThroughConnectRouting(BaseRouting, ConnectRouting): + def __init__(self, endpoint: Optional[str], bg_role: Optional[BlueGreenRole]): + super().__init__(endpoint, bg_role) + + def apply( + self, + plugin: Plugin, + host_info: HostInfo, + props: Properties, + is_initial_connection: bool, + connect_func: Callable, + plugin_service: PluginService) -> Optional[Connection]: + return connect_func() + + +class RejectConnectRouting(BaseRouting, ConnectRouting): + def __init__(self, endpoint: Optional[str], bg_role: Optional[BlueGreenRole]): + super().__init__(endpoint, bg_role) + + def apply( + self, + plugin: Plugin, + host_info: HostInfo, + props: Properties, + is_initial_connection: bool, + connect_func: Callable, + plugin_service: PluginService) -> Optional[Connection]: + raise AwsWrapperError(Messages.get("RejectConnectRouting.InProgressCantConnect")) + + +class SubstituteConnectRouting(BaseRouting, ConnectRouting): + _rds_utils: ClassVar[RdsUtils] = RdsUtils() + + def __init__( + self, + substitute_host_info: HostInfo, + endpoint: Optional[str] = None, + bg_role: Optional[BlueGreenRole] = None, + iam_hosts: Optional[Tuple[HostInfo, ...]] = None, + iam_auth_success_handler: Optional[Callable[[str], None]] = None): + super().__init__(endpoint, bg_role) + self._substitute_host_info = substitute_host_info + self._iam_hosts = iam_hosts + self._iam_auth_success_handler = iam_auth_success_handler + + def __str__(self): + iam_hosts_str = ',\n '.join(str(iam_host) for iam_host in self._iam_hosts) + return (f"{self.__class__.__name__}(\n" + f" substitute_host_info={self._substitute_host_info},\n" + f" endpoint={self._endpoint},\n" + f" bg_role={self._bg_role},\n" + f" iam_hosts=[\n" + f" {iam_hosts_str}\n" + f" ],\n" + f" hash={hex(hash(self))}\n" + f")") + + def apply( + self, + plugin: Plugin, + host_info: HostInfo, + props: Properties, + is_initial_connection: bool, + connect_func: Callable, + plugin_service: PluginService) -> Optional[Connection]: + if not SubstituteConnectRouting._rds_utils.is_ip(self._substitute_host_info.host): + return plugin_service.connect(self._substitute_host_info, props, plugin) + + is_iam_in_use = plugin_service.is_plugin_in_use(IamAuthPlugin) + if not is_iam_in_use: + return plugin_service.connect(self._substitute_host_info, props, plugin) + + if not self._iam_hosts: + raise AwsWrapperError(Messages.get("SubstituteConnectRouting.RequireIamHost")) + + for iam_host in self._iam_hosts: + rerouted_host_info = copy(self._substitute_host_info) + rerouted_host_info.host_id = iam_host.host_id + rerouted_host_info.availability = HostAvailability.AVAILABLE + rerouted_host_info.add_alias(iam_host.host) + + rerouted_props = copy(props) + WrapperProperties.IAM_HOST.set(rerouted_props, iam_host.host) + if iam_host.is_port_specified(): + WrapperProperties.IAM_DEFAULT_PORT.set(rerouted_props, iam_host.port) + + try: + conn = plugin_service.connect(rerouted_host_info, rerouted_props) + if self._iam_auth_success_handler is not None: + try: + self._iam_auth_success_handler(iam_host.host) + except Exception: + pass # do nothing + + return conn + except AwsWrapperError as e: + if not plugin_service.is_login_exception(e): + raise e + # do nothing - try with another iam host + + raise AwsWrapperError( + Messages.get_formatted( + "SubstituteConnectRouting.InProgressCantOpenConnection", self._substitute_host_info.url)) + + +class SuspendConnectRouting(BaseRouting, ConnectRouting): + _TELEMETRY_SWITCHOVER: ClassVar[str] = "Blue/Green switchover" + _SLEEP_TIME_MS = 100 + + def __init__( + self, + endpoint: Optional[str], + bg_role: Optional[BlueGreenRole], + bg_id: str): + super().__init__(endpoint, bg_role) + self._bg_id = bg_id + + def apply( + self, + plugin: Plugin, + host_info: HostInfo, + props: Properties, + is_initial_connection: bool, + connect_func: Callable, + plugin_service: PluginService) -> Optional[Connection]: + logger.debug("SuspendConnectRouting.InProgressSuspendConnect") + + telemetry_factory = plugin_service.get_telemetry_factory() + telemetry_context = telemetry_factory.open_telemetry_context( + SuspendConnectRouting._TELEMETRY_SWITCHOVER, TelemetryTraceLevel.NESTED) + + bg_status = plugin_service.get_status(BlueGreenStatus, self._bg_id) + timeout_ms = WrapperProperties.BG_CONNECT_TIMEOUT_MS.get_int(props) + start_time_sec = time.time() + end_time_sec = start_time_sec + timeout_ms / 1_000 + + try: + while time.time() <= end_time_sec and \ + bg_status is not None and \ + bg_status.phase == BlueGreenPhase.IN_PROGRESS: + self.delay(SuspendConnectRouting._SLEEP_TIME_MS, bg_status, plugin_service, self._bg_id) + bg_status = plugin_service.get_status(BlueGreenStatus, self._bg_id) + + if bg_status is not None and bg_status.phase == BlueGreenPhase.IN_PROGRESS: + raise TimeoutError( + Messages.get_formatted("SuspendConnectRouting.InProgressTryConnectLater", timeout_ms)) + + logger.debug( + Messages.get_formatted( + "SuspendConnectRouting.SwitchoverCompleteContinueWithConnect", + (time.time() - start_time_sec) * 1000)) + finally: + telemetry_context.close_context() + + # return None so that the next routing can attempt a connection + return None + + +class SuspendUntilCorrespondingHostFoundConnectRouting(BaseRouting, ConnectRouting): + _TELEMETRY_SWITCHOVER: ClassVar[str] = "Blue/Green switchover" + _SLEEP_TIME_MS = 100 + + def __init__( + self, + endpoint: Optional[str], + bg_role: Optional[BlueGreenRole], + bg_id: str): + super().__init__(endpoint, bg_role) + self._bg_id = bg_id + + def apply( + self, + plugin: Plugin, + host_info: HostInfo, + props: Properties, + is_initial_connection: bool, + connect_func: Callable, + plugin_service: PluginService) -> Optional[Connection]: + logger.debug("SuspendConnectRouting.WaitConnectUntilCorrespondingHostFound", host_info.host) + + telemetry_factory = plugin_service.get_telemetry_factory() + telemetry_context = telemetry_factory.open_telemetry_context( + SuspendUntilCorrespondingHostFoundConnectRouting._TELEMETRY_SWITCHOVER, TelemetryTraceLevel.NESTED) + + bg_status = plugin_service.get_status(BlueGreenStatus, self._bg_id) + corresponding_pair = None if bg_status is None else bg_status.corresponding_hosts.get(host_info.host) + + timeout_ms = WrapperProperties.BG_CONNECT_TIMEOUT_MS.get_int(props) + start_time_sec = time.time() + end_time_sec = start_time_sec + timeout_ms / 1_000 + + try: + while time.time() <= end_time_sec and \ + bg_status is not None and \ + bg_status.phase != BlueGreenPhase.COMPLETED and \ + (corresponding_pair is None or corresponding_pair[1] is None): + # wait until the corresponding host is found, or until switchover is completed + self.delay( + SuspendUntilCorrespondingHostFoundConnectRouting._SLEEP_TIME_MS, bg_status, plugin_service, self._bg_id) + bg_status = plugin_service.get_status(BlueGreenStatus, self._bg_id) + corresponding_pair = None if bg_status is None else bg_status.corresponding_hosts.get(host_info.host) + + if bg_status is None or bg_status.phase == BlueGreenPhase.COMPLETED: + logger.debug( + "SuspendUntilCorrespondingHostFoundConnectRouting.CompletedContinueWithConnect", + (time.time() - start_time_sec) * 1000) + return None + + if time.time() > end_time_sec: + raise TimeoutError( + Messages.get_formatted( + "SuspendUntilCorrespondingHostFoundConnectRouting.CorrespondingHostNotFoundTryConnectLater", + host_info.host, + (time.time() - start_time_sec) * 1000)) + + logger.debug( + Messages.get_formatted( + "SuspendUntilCorrespondingHostFoundConnectRouting.CorrespondingHostFoundContinueWithConnect", + host_info.host, + (time.time() - start_time_sec) * 1000)) + finally: + telemetry_context.close_context() + + # return None so that the next routing can attempt a connection + return None + + +class PassThroughExecuteRouting(BaseRouting, ExecuteRouting): + def __init__(self, endpoint: Optional[str], bg_role: Optional[BlueGreenRole]): + super().__init__(endpoint, bg_role) + + def apply( + self, + plugin: Plugin, + plugin_service: PluginService, + props: Properties, + target: type, + method_name: str, + execute_func: Callable, + *args: Any, + **kwargs: Any) -> ValueContainer[Any]: + return ValueContainer.of(execute_func()) + + +class SuspendExecuteRouting(BaseRouting, ExecuteRouting): + _TELEMETRY_SWITCHOVER: ClassVar[str] = "Blue/Green switchover" + _SLEEP_TIME_MS = 100 + + def __init__( + self, + endpoint: Optional[str], + bg_role: Optional[BlueGreenRole], + bg_id: str): + super().__init__(endpoint, bg_role) + self._bg_id = bg_id + + def apply( + self, + plugin: Plugin, + plugin_service: PluginService, + props: Properties, + target: type, + method_name: str, + execute_func: Callable, + *args: Any, + **kwargs: Any) -> ValueContainer[Any]: + logger.debug("SuspendExecuteRouting.InProgressSuspendMethod", method_name) + + telemetry_factory = plugin_service.get_telemetry_factory() + telemetry_context = telemetry_factory.open_telemetry_context( + SuspendExecuteRouting._TELEMETRY_SWITCHOVER, TelemetryTraceLevel.NESTED) + + bg_status = plugin_service.get_status(BlueGreenStatus, self._bg_id) + timeout_ms = WrapperProperties.BG_CONNECT_TIMEOUT_MS.get_int(props) + start_time_sec = time.time() + end_time_sec = start_time_sec + timeout_ms / 1_000 + + try: + while time.time() <= end_time_sec and \ + bg_status is not None and \ + bg_status.phase == BlueGreenPhase.IN_PROGRESS: + self.delay(SuspendExecuteRouting._SLEEP_TIME_MS, bg_status, plugin_service, self._bg_id) + bg_status = plugin_service.get_status(BlueGreenStatus, self._bg_id) + + if bg_status is not None and bg_status.phase == BlueGreenPhase.IN_PROGRESS: + raise TimeoutError( + Messages.get_formatted( + "SuspendExecuteRouting.InProgressTryMethodLater", + timeout_ms, method_name)) + + logger.debug( + Messages.get_formatted( + "SuspendExecuteRouting.SwitchoverCompleteContinueWithMethod", + method_name, + (time.time() - start_time_sec) * 1000)) + finally: + telemetry_context.close_context() + + # return empty so that the next routing can attempt a connection + return ValueContainer.empty() + + +class BlueGreenPlugin(Plugin): + _SUBSCRIBED_METHODS: Set[str] = {"connect"} + _CLOSE_METHODS: ClassVar[Set[str]] = {"Connection.close", "Cursor.close"} + _status_providers: ClassVar[ConcurrentDict[str, BlueGreenStatusProvider]] = ConcurrentDict() + + def __init__(self, plugin_service: PluginService, props: Properties): + self._plugin_service = plugin_service + self._props = props + self._telemetry_factory = plugin_service.get_telemetry_factory() + self._provider_supplier: Callable[[PluginService, Properties, str], BlueGreenStatusProvider] = \ + lambda _plugin_service, _props, bg_id: BlueGreenStatusProvider(_plugin_service, _props, bg_id) + self._bg_id = WrapperProperties.BG_ID.get_or_default(props).strip().lower() + self._rds_utils = RdsUtils() + self._bg_status: Optional[BlueGreenStatus] = None + self._is_iam_in_use = False + self._start_time_ns = AtomicInt(0) + self._end_time_ns = AtomicInt(0) + + self._SUBSCRIBED_METHODS.update(self._plugin_service.network_bound_methods) + + @property + def subscribed_methods(self) -> Set[str]: + return self._SUBSCRIBED_METHODS + + def connect( + self, + target_driver_func: Callable, + driver_dialect: DriverDialect, + host_info: HostInfo, + props: Properties, + is_initial_connection: bool, + connect_func: Callable) -> Connection: + self._reset_routing_time() + try: + self._bg_status = self._plugin_service.get_status(BlueGreenStatus, self._bg_id) + if self._bg_status is None: + return self._open_direct_connection(connect_func, is_initial_connection) + + if is_initial_connection: + self._is_iam_in_use = self._plugin_service.is_plugin_in_use(IamAuthPlugin) + + bg_role = self._bg_status.get_role(host_info) + if bg_role is None: + # The host is not participating in BG switchover - connect directly + return self._open_direct_connection(connect_func, is_initial_connection) + + routing = next((r for r in self._bg_status.connect_routings if r.is_match(host_info, bg_role)), None) + if not routing: + return self._open_direct_connection(connect_func, is_initial_connection) + + self._start_time_ns.set(perf_counter_ns()) + conn: Optional[Connection] = None + while routing is not None and conn is None: + conn = routing.apply(self, host_info, props, is_initial_connection, connect_func, self._plugin_service) + if conn is not None: + break + + latest_status = self._plugin_service.get_status(BlueGreenStatus, self._bg_id) + if latest_status is None: + self._end_time_ns.set(perf_counter_ns()) + return self._open_direct_connection(connect_func, is_initial_connection) + + routing = \ + next((r for r in self._bg_status.connect_routings if r.is_match(host_info, bg_role)), None) + + self._end_time_ns.set(perf_counter_ns()) + if conn is None: + conn = connect_func() + + if is_initial_connection: + self._init_status_provider() + + return conn + finally: + if self._start_time_ns.get() > 0: + self._end_time_ns.compare_and_set(0, perf_counter_ns()) + + def _reset_routing_time(self): + self._start_time_ns.set(0) + self._end_time_ns.set(0) + + def _open_direct_connection(self, connect_func: Callable, is_initial_connection: bool) -> Connection: + conn = connect_func() + if is_initial_connection: + self._init_status_provider() + + return conn + + def _init_status_provider(self): + self._status_providers.compute_if_absent( + self._bg_id, + lambda key: self._provider_supplier(self._plugin_service, self._props, self._bg_id)) + + def execute(self, target: type, method_name: str, execute_func: Callable, *args: Any, **kwargs: Any) -> Any: + self._reset_routing_time() + try: + self._init_status_provider() + if method_name in BlueGreenPlugin._CLOSE_METHODS: + return execute_func() + + self._bg_status = self._plugin_service.get_status(BlueGreenStatus, self._bg_id) + if self._bg_status is None: + return execute_func() + + host_info = self._plugin_service.current_host_info + bg_role = None if host_info is None else self._bg_status.get_role(host_info) + if bg_role is None: + # The host is not participating in BG switchover - execute directly + return execute_func() + + routing = next((r for r in self._bg_status.execute_routings if r.is_match(host_info, bg_role)), None) + if routing is None: + return execute_func() + + result_container: ValueContainer[Any] = ValueContainer.empty() + self._start_time_ns.set(perf_counter_ns()) + while routing is not None and not result_container.is_present(): + result_container = routing.apply( + self, + self._plugin_service, + self._props, + target, + method_name, + execute_func, + *args, + **kwargs) + if result_container.is_present(): + break + + latest_status = self._plugin_service.get_status(BlueGreenStatus, self._bg_id) + if latest_status is None: + self._end_time_ns.set(perf_counter_ns()) + return execute_func() + + routing = \ + next((r for r in self._bg_status.execute_routings if r.is_match(host_info, bg_role)), None) + + self._end_time_ns.set(perf_counter_ns()) + if result_container.is_present(): + return result_container.get() + + return execute_func() + finally: + if self._start_time_ns.get() > 0: + self._end_time_ns.compare_and_set(0, perf_counter_ns()) + + # For testing purposes only. + def get_hold_time_ns(self) -> int: + if self._start_time_ns.get() == 0: + return 0 + + if self._end_time_ns.get() == 0: + return perf_counter_ns() - self._start_time_ns.get() + else: + return self._end_time_ns.get() - self._start_time_ns.get() + + +class BlueGreenPluginFactory(PluginFactory): + def get_instance(self, plugin_service: PluginService, props: Properties) -> Plugin: + return BlueGreenPlugin(plugin_service, props) + + +BlueGreenInterimStatusProcessor = Callable[[BlueGreenRole, BlueGreenInterimStatus], None] + + +class BlueGreenStatusMonitor: + _DEFAULT_STATUS_CHECK_INTERVAL_MS: ClassVar[int] = 5 * 60_000 # 5 minutes + _BG_CLUSTER_ID: ClassVar[str] = "941d00a8-8238-4f7d-bf59-771bff783a8e" + _LATEST_KNOWN_VERSION: ClassVar[str] = "1.0" + # Add more versions here if needed. + _KNOWN_VERSIONS: ClassVar[FrozenSet[str]] = frozenset({_LATEST_KNOWN_VERSION}) + + def __init__( + self, + bg_role: BlueGreenRole, + bg_id: str, + initial_host_info: HostInfo, + plugin_service: PluginService, + props: Properties, + status_check_intervals_ms: Dict[BlueGreenIntervalRate, int], + interim_status_processor: Optional[BlueGreenInterimStatusProcessor] = None): + self._bg_role = bg_role + self._bg_id = bg_id + self._initial_host_info = initial_host_info + self._plugin_service = plugin_service + + # autocommit is False by default. When False, the BG status query may return stale data, so we set it to True. + props["autocommit"] = True + self._props = props + self._status_check_intervals_ms = status_check_intervals_ms + self._interim_status_processor = interim_status_processor + + self._rds_utils = RdsUtils() + self._cv = Condition() + self.should_collect_ip_addresses = Event() + self.should_collect_ip_addresses.set() + self.should_collect_topology = Event() + self.should_collect_topology.set() + self.use_ip_address = Event() + self._panic_mode = Event() + self._panic_mode.set() + self.stop = Event() + self.interval_rate = BlueGreenIntervalRate.BASELINE + self._host_list_provider: Optional[HostListProvider] = None + self._start_topology: Tuple[HostInfo, ...] = () + self._current_topology: Tuple[HostInfo, ...] = () + self._start_ip_addresses_by_host: ConcurrentDict[str, ValueContainer[str]] = ConcurrentDict() + self._current_ip_addresses_by_host: ConcurrentDict[str, ValueContainer[str]] = ConcurrentDict() + self._all_start_topology_ip_changed = False + self._all_start_topology_endpoints_removed = False + self._all_topology_changed = False + self._current_phase: Optional[BlueGreenPhase] = BlueGreenPhase.NOT_CREATED + self._host_names: Set[str] = set() + self._version = "1.0" + self._port = -1 + self._connection: Optional[Connection] = None + self._connection_host_info: Optional[HostInfo] = None + self._connected_ip_address: Optional[str] = None + self._is_host_info_correct = Event() + self._has_started = Event() + + db_dialect = self._plugin_service.database_dialect + if not isinstance(db_dialect, BlueGreenDialect): + raise AwsWrapperError(Messages.get_formatted("BlueGreenStatusMonitor.UnexpectedDialect", db_dialect)) + + self._bg_dialect: BlueGreenDialect = cast('BlueGreenDialect', self._plugin_service.database_dialect) + + self._open_connection_thread: Optional[Thread] = None + self._monitor_thread = Thread(daemon=True, name="BlueGreenMonitorThread", target=self._run) + + def start(self): + if not self._has_started.is_set(): + self._has_started.set() + self._monitor_thread.start() + + def _run(self): + try: + while not self.stop.is_set(): + try: + old_phase = self._current_phase + self._open_connection() + self._collect_status() + self.collect_topology() + self._collect_ip_addresses() + self._update_ip_address_flags() + + if self._current_phase is not None and (old_phase is None or old_phase != self._current_phase): + logger.debug("BlueGreenStatusMonitor.StatusChanged", self._bg_role, self._current_phase) + + if self._interim_status_processor is not None: + self._interim_status_processor( + self._bg_role, + BlueGreenInterimStatus( + self._current_phase, + self._version, + self._port, + self._start_topology, + self._start_ip_addresses_by_host, + self._current_topology, + self._current_ip_addresses_by_host, + self._host_names, + self._all_start_topology_ip_changed, + self._all_start_topology_endpoints_removed, + self._all_topology_changed) + ) + + interval_rate = BlueGreenIntervalRate.HIGH if self._panic_mode.is_set() else self.interval_rate + delay_ms = self._status_check_intervals_ms.get( + interval_rate, BlueGreenStatusMonitor._DEFAULT_STATUS_CHECK_INTERVAL_MS) + self._delay(delay_ms) + except Exception as e: + logger.warning("BlueGreenStatusMonitor.MonitoringUnhandledException", self._bg_role, e) + + finally: + self._close_connection() + logger.debug("BlueGreenStatusMonitor.ThreadCompleted", self._bg_role) + + def _open_connection(self): + conn = self._connection + if not self._is_connection_closed(conn): + return + + if self._open_connection_thread is not None: + if self._open_connection_thread.is_alive(): + return # The task to open the connection is in progress, let's wait. + elif not self._panic_mode.is_set(): + return # The connection should be open by now since the open connection task is not running. + + self._connection = None + self._panic_mode.set() + self._open_connection_thread = \ + Thread(daemon=True, name="BlueGreenMonitorConnectionOpener", target=self._open_connection_task) + self._open_connection_thread.start() + + def _open_connection_task(self): + host_info = self._connection_host_info + ip_address = self._connected_ip_address + if host_info is None: + self._connection_host_info = self._initial_host_info + host_info = self._initial_host_info + self._connected_ip_address = None + ip_address = None + self._is_host_info_correct.clear() + + try: + if self.use_ip_address.is_set() and ip_address is not None: + ip_host_info = copy(host_info) + ip_host_info.host = ip_address + props_copy = copy(self._props) + WrapperProperties.IAM_HOST.set(props_copy, ip_host_info.host) + + logger.debug( + "BlueGreenStatusMonitor.OpeningConnectionWithIp", self._bg_role, ip_host_info.host) + self._connection = self._plugin_service.force_connect(ip_host_info, props_copy) + logger.debug( + "BlueGreenStatusMonitor.OpenedConnectionWithIp", self._bg_role, ip_host_info.host) + else: + logger.debug("BlueGreenStatusMonitor.OpeningConnection", self._bg_role, host_info.host) + self._connection = self._plugin_service.force_connect(host_info, self._props) + self._connected_ip_address = self._get_ip_address(host_info.host).or_else(None) + logger.debug("BlueGreenStatusMonitor.OpenedConnection", self._bg_role, host_info.host) + + self._panic_mode.clear() + self._notify_changes() + except Exception: + # Attempt to open connection failed. + self._connection = None + self._panic_mode.set() + self._notify_changes() + + def _get_ip_address(self, host: str) -> ValueContainer[str]: + try: + return ValueContainer.of(socket.gethostbyname(host)) + except socket.gaierror: + return ValueContainer.empty() + + def _notify_changes(self): + with self._cv: + self._cv.notify_all() + + def _collect_status(self): + conn = self._connection + try: + if self._is_connection_closed(conn): + return + + if not self._bg_dialect.is_blue_green_status_available(conn): + if self._plugin_service.driver_dialect.is_closed(conn): + self._connection = None + self._current_phase = None + self._panic_mode.set() + else: + self._current_phase = BlueGreenPhase.NOT_CREATED + logger.debug( + "BlueGreenStatusMonitor.StatusNotAvailable", self._bg_role, BlueGreenPhase.NOT_CREATED) + return + + status_entries = [] + with conn.cursor() as cursor: + cursor.execute(self._bg_dialect.blue_green_status_query) + for record in cursor: + # columns: version, endpoint, port, role, status + version = record[0] + if version not in BlueGreenStatusMonitor._KNOWN_VERSIONS: + self._version = BlueGreenStatusMonitor._LATEST_KNOWN_VERSION + logger.warning( + "BlueGreenStatusMonitor.UsesVersion", self._bg_role, version, self._version) + + endpoint = record[1] + port = record[2] + bg_role = BlueGreenRole.parse_role(record[3], self._version) + phase = BlueGreenPhase.parse_phase(record[4]) + + if self._bg_role != bg_role: + continue + + status_entries.append(BlueGreenDbStatusInfo(version, endpoint, port, phase, bg_role)) + + # Attempt to find the writer cluster status info + status_info = next((status for status in status_entries + if self._rds_utils.is_writer_cluster_dns(status.endpoint) and + self._rds_utils.is_not_old_instance(status.endpoint)), + None) + if status_info is None: + # Grab an instance endpoint instead + status_info = next((status for status in status_entries + if self._rds_utils.is_rds_instance(status.endpoint) and + self._rds_utils.is_not_old_instance(status.endpoint)), + None) + else: + # Writer cluster endpoint has been found, add the reader cluster endpoint as well. + self._host_names.add(status_info.endpoint.replace(".cluster-", ".cluster-ro-")) + + if status_info is None: + if len(status_entries) == 0: + # The status table may have no entries after BGD is completed. The old1 cluster/instance has + # been separated and no longer receives updates from the related green cluster/instance. + if self._bg_role != BlueGreenRole.SOURCE: + logger.warning("BlueGreenStatusMonitor.NoEntriesInStatusTable", self._bg_role) + + self._current_phase = None + else: + self._current_phase = status_info.phase + self._version = status_info.version + self._port = status_info.port + + if self.should_collect_topology.is_set(): + current_host_names = {status.endpoint.lower() for status in status_entries + if status.endpoint is not None and + self._rds_utils.is_not_old_instance(status.endpoint)} + self._host_names.update(current_host_names) + + if not self._is_host_info_correct.is_set() and status_info is not None: + # We connected to an initial host info that might not be the desired blue or green cluster. Let's check + # if we need to reconnect to the correct one. + status_info_ip_address = self._get_ip_address(status_info.endpoint).or_else(None) + connected_ip_address = self._connected_ip_address + if connected_ip_address is not None and connected_ip_address != status_info_ip_address: + # We are not connected to the desired blue or green cluster, we need to reconnect. + self._connection_host_info = HostInfo(host=status_info.endpoint, port=status_info.port) + self._props["host"] = status_info.endpoint + self._is_host_info_correct.set() + self._close_connection() + self._panic_mode.set() + else: + # We are already connected to the right host. + self._is_host_info_correct.set() + self._panic_mode.clear() + + if self._is_host_info_correct.is_set() and self._host_list_provider is None: + # A connection to the correct cluster (blue or green) has been established. Let's initialize the host + # list provider. + self._init_host_list_provider() + except Exception as e: + if not self._is_connection_closed(self._connection): + # It's normal to get a connection closed error during BGD switchover, but the connection isn't closed so + # let's log the error. + logger.debug("BlueGreenStatusMonitor.UnhandledException", self._bg_role, e) + self._close_connection() + self._panic_mode.set() + + def _close_connection(self): + conn = self._connection + self._connection = None + if conn is not None and not self._plugin_service.driver_dialect.is_closed(conn): + try: + conn.close() + except Exception: + pass + + def _init_host_list_provider(self): + if self._host_list_provider is not None or not self._is_host_info_correct.is_set(): + return + + # We need to instantiate a separate HostListProvider with a special unique cluster ID to avoid interference with + # other HostListProviders opened for this cluster. Blue and Green clusters should have different cluster IDs. + + props_copy = copy(self._props) + cluster_id = f"{self._bg_id}::{self._bg_role}::{BlueGreenStatusMonitor._BG_CLUSTER_ID}" + WrapperProperties.CLUSTER_ID.set(props_copy, cluster_id) + logger.debug("BlueGreenStatusMonitor.CreateHostListProvider", self._bg_role, cluster_id) + + host_info = self._connection_host_info + if host_info is None: + logger.warning("BlueGreenStatusMonitor.HostInfoNone") + return + + host_list_provider_supplier = self._plugin_service.database_dialect.get_host_list_provider_supplier() + host_list_provider_service: HostListProviderService = cast('HostListProviderService', self._plugin_service) + self._host_list_provider = host_list_provider_supplier(host_list_provider_service, props_copy) + + def _is_connection_closed(self, conn: Optional[Connection]) -> bool: + return conn is None or self._plugin_service.driver_dialect.is_closed(conn) + + def _delay(self, delay_ms: int): + start_ns = perf_counter_ns() + end_ns = start_ns + delay_ms * 1_000_000 + initial_interval_rate = self.interval_rate + initial_panic_mode_val = self._panic_mode.is_set() + min_delay_sec = min(delay_ms, 50) / 1_000 + + while self.interval_rate == initial_interval_rate and \ + perf_counter_ns() < end_ns and \ + not self.stop.is_set() and \ + initial_panic_mode_val == self._panic_mode.is_set(): + with self._cv: + self._cv.wait(min_delay_sec) + + def collect_topology(self): + if self._host_list_provider is None: + return + + conn = self._connection + if self._is_connection_closed(conn): + return + + self._current_topology = self._host_list_provider.force_refresh(conn) + if self.should_collect_topology.is_set(): + self._start_topology = self._current_topology + + current_topology_copy = self._current_topology + if current_topology_copy is not None and self.should_collect_topology.is_set(): + self._host_names.update({host_info.host for host_info in current_topology_copy}) + + def _collect_ip_addresses(self): + self._current_ip_addresses_by_host.clear() + if self._host_names is not None: + for host in self._host_names: + self._current_ip_addresses_by_host.put_if_absent(host, self._get_ip_address(host)) + + if self.should_collect_ip_addresses.is_set(): + self._start_ip_addresses_by_host.clear() + self._start_ip_addresses_by_host.put_all(self._current_ip_addresses_by_host) + + def _update_ip_address_flags(self): + if self.should_collect_topology.is_set(): + self._all_start_topology_ip_changed = False + self._all_start_topology_endpoints_removed = False + self._all_topology_changed = False + return + + if not self.should_collect_ip_addresses.is_set(): + # Check whether all hosts in start_topology resolve to new IP addresses + self._all_start_topology_ip_changed = self._has_all_start_topology_ip_changed() + + # Check whether all hosts in start_topology no longer have IP addresses. This indicates that the start_topology + # hosts can no longer be resolved because their DNS entries no longer exist. + self._all_start_topology_endpoints_removed = self._are_all_start_endpoints_removed() + + if not self.should_collect_topology.is_set(): + # Check whether all hosts in current_topology do not exist in start_topology + start_topology_hosts = set() if self._start_topology is None else \ + {host_info.host for host_info in self._start_topology} + current_topology_copy = self._current_topology + self._all_topology_changed = ( + current_topology_copy and + start_topology_hosts and + all(host_info.host not in start_topology_hosts for host_info in current_topology_copy)) + + def _has_all_start_topology_ip_changed(self) -> bool: + if not self._start_topology: + return False + + for host_info in self._start_topology: + start_ip_container = self._start_ip_addresses_by_host.get(host_info.host) + current_ip_container = self._current_ip_addresses_by_host.get(host_info.host) + if start_ip_container is None or not start_ip_container.is_present() or \ + current_ip_container is None or not current_ip_container.is_present(): + return False + + if start_ip_container.get() == current_ip_container.get(): + return False + + return True + + def _are_all_start_endpoints_removed(self) -> bool: + start_topology = self._start_topology + if not start_topology: + return False + + for host_info in start_topology: + start_ip_container = self._start_ip_addresses_by_host.get(host_info.host) + current_ip_container = self._current_ip_addresses_by_host.get(host_info.host) + if start_ip_container is None or current_ip_container is None or \ + not start_ip_container.is_present() or current_ip_container.is_present(): + return False + + return True + + def reset_collected_data(self): + self._start_ip_addresses_by_host.clear() + self._start_topology = [] + self._host_names.clear() + + +@dataclass +class BlueGreenDbStatusInfo: + version: str + endpoint: str + port: int + phase: BlueGreenPhase + bg_role: BlueGreenRole + + +class BlueGreenStatusProvider: + _MONITORING_PROPERTY_PREFIX: ClassVar[str] = "blue-green-monitoring-" + _DEFAULT_CONNECT_TIMEOUT_MS: ClassVar[int] = 10_000 + _DEFAULT_SOCKET_TIMEOUT_MS: ClassVar[int] = 10_000 + + def __init__(self, plugin_service: PluginService, props: Properties, bg_id: str): + self._plugin_service = plugin_service + self._props = props + self._bg_id = bg_id + + self._interim_status_hashes = [0, 0] + self._latest_context_hash = 0 + self._interim_statuses: List[Optional[BlueGreenInterimStatus]] = [None, None] + self._host_ip_addresses: ConcurrentDict[str, ValueContainer[str]] = ConcurrentDict() + # The second element of the Tuple is None when no corresponding host is found. + self._corresponding_hosts: ConcurrentDict[str, Tuple[HostInfo, Optional[HostInfo]]] = ConcurrentDict() + # Keys are host URLs (port excluded) + self._roles_by_host: ConcurrentDict[str, BlueGreenRole] = ConcurrentDict() + self._iam_auth_success_hosts: ConcurrentDict[str, ConcurrentSet[str]] = ConcurrentDict() + self._green_host_name_change_times: ConcurrentDict[str, datetime] = ConcurrentDict() + self._summary_status: Optional[BlueGreenStatus] = None + self._latest_phase = BlueGreenPhase.NOT_CREATED + self._rollback = False + self._blue_dns_update_completed = False + self._green_dns_removed = False + self._green_topology_changed = False + self._all_green_hosts_changed_name = False + self._post_status_end_time_ns = 0 + self._process_status_lock = RLock() + self._status_check_intervals_ms: Dict[BlueGreenIntervalRate, int] = {} + self._phase_times_ns: ConcurrentDict[str, PhaseTimeInfo] = ConcurrentDict() + self._rds_utils = RdsUtils() + + self._switchover_timeout_ns = WrapperProperties.BG_SWITCHOVER_TIMEOUT_MS.get_int(props) * 1_000_000 + self._suspend_blue_connections_when_in_progress = ( + WrapperProperties.BG_SUSPEND_NEW_BLUE_CONNECTIONS.get_bool(props)) + self._status_check_intervals_ms.update({ + BlueGreenIntervalRate.BASELINE: WrapperProperties.BG_INTERVAL_BASELINE_MS.get_int(props), + BlueGreenIntervalRate.INCREASED: WrapperProperties.BG_INTERVAL_INCREASED_MS.get_int(props), + BlueGreenIntervalRate.HIGH: WrapperProperties.BG_INTERVAL_HIGH_MS.get_int(props) + }) + + dialect = self._plugin_service.database_dialect + if not isinstance(dialect, BlueGreenDialect): + raise AwsWrapperError( + Messages.get_formatted( + "BlueGreenStatusProvider.UnsupportedDialect", self._bg_id, dialect.__class__.__name__)) + + current_host_info = self._plugin_service.current_host_info + blue_monitor = BlueGreenStatusMonitor( + BlueGreenRole.SOURCE, + self._bg_id, + current_host_info, + self._plugin_service, + self._get_monitoring_props(), + self._status_check_intervals_ms, + self._process_interim_status) + green_monitor = BlueGreenStatusMonitor( + BlueGreenRole.TARGET, + self._bg_id, + current_host_info, + self._plugin_service, + self._get_monitoring_props(), + self._status_check_intervals_ms, + self._process_interim_status) + + self._monitors: List[BlueGreenStatusMonitor] = [blue_monitor, green_monitor] + + for monitor in self._monitors: + monitor.start() + + def _get_monitoring_props(self) -> Properties: + monitoring_props = copy(self._props) + for key in self._props.keys(): + if key.startswith(BlueGreenStatusProvider._MONITORING_PROPERTY_PREFIX): + new_key = key[len(BlueGreenStatusProvider._MONITORING_PROPERTY_PREFIX):] + monitoring_props[new_key] = self._props[key] + monitoring_props.pop(key, None) + + monitoring_props.put_if_absent( + WrapperProperties.CONNECT_TIMEOUT_SEC.name, BlueGreenStatusProvider._DEFAULT_CONNECT_TIMEOUT_MS // 1_000) + monitoring_props.put_if_absent( + WrapperProperties.SOCKET_TIMEOUT_SEC.name, BlueGreenStatusProvider._DEFAULT_SOCKET_TIMEOUT_MS // 1_000) + return monitoring_props + + def _process_interim_status(self, bg_role: BlueGreenRole, interim_status: BlueGreenInterimStatus): + with self._process_status_lock: + status_hash = interim_status.get_custom_hashcode() + context_hash = self._get_context_hash() + if self._interim_status_hashes[bg_role.value] == status_hash and self._latest_context_hash == context_hash: + # no changes detected + return + + logger.debug("BlueGreenStatusProvider.InterimStatus", self._bg_id, bg_role, interim_status) + self._update_phase(bg_role, interim_status) + + # Store interim_status and corresponding hash + self._interim_statuses[bg_role.value] = interim_status + self._interim_status_hashes[bg_role.value] = status_hash + self._latest_context_hash = context_hash + + # Update map of IP addresses. + self._host_ip_addresses.put_all(interim_status.start_ip_addresses_by_host_map) + + # Update role_by_host based on the provided host names. + self._roles_by_host.put_all({host_name.lower(): bg_role for host_name in interim_status.host_names}) + + self._update_corresponding_hosts() + self._update_summary_status(bg_role, interim_status) + self._update_monitors() + self._update_status_cache() + self._log_current_context() + self._log_switchover_final_summary() + self._reset_context_when_completed() + + def _get_context_hash(self) -> int: + result = self._get_value_hash(1, str(self._all_green_hosts_changed_name)) + result = self._get_value_hash(result, str(len(self._iam_auth_success_hosts))) + return result + + def _get_value_hash(self, current_hash: int, val: str) -> int: + return current_hash * 31 + hash(val) + + def _update_phase(self, bg_role: BlueGreenRole, interim_status: BlueGreenInterimStatus): + role_status = self._interim_statuses[bg_role.value] + latest_phase = BlueGreenPhase.NOT_CREATED if role_status is None else role_status.phase + if latest_phase is not None and \ + interim_status.phase is not None and \ + interim_status.phase.value < latest_phase.value: + self._rollback = True + logger.debug("BlueGreenStatusProvider.Rollback", self._bg_id) + + if interim_status.phase is None: + return + + # The phase should not move backwards unless we're rolling back. + if self._rollback: + if interim_status.phase.value < self._latest_phase.value: + self._latest_phase = interim_status.phase + else: + if interim_status.phase.value >= self._latest_phase.value: + self._latest_phase = interim_status.phase + + def _update_corresponding_hosts(self): + """ + Update corresponding hosts. The blue writer host is mapped to the green writer host, and each blue reader host is + mapped to a green reader host + """ + + self._corresponding_hosts.clear() + source_status = self._interim_statuses[BlueGreenRole.SOURCE.value] + target_status = self._interim_statuses[BlueGreenRole.TARGET.value] + if source_status is None or target_status is None: + return + + if source_status.start_topology and target_status.start_topology: + blue_writer_host_info = self._get_writer_host(BlueGreenRole.SOURCE) + green_writer_host_info = self._get_writer_host(BlueGreenRole.TARGET) + sorted_blue_readers = self._get_reader_hosts(BlueGreenRole.SOURCE) + sorted_green_readers = self._get_reader_hosts(BlueGreenRole.TARGET) + + if blue_writer_host_info is not None: + # green_writer_host_info may be None, but that will be handled properly by the corresponding routing. + self._corresponding_hosts.put( + blue_writer_host_info.host, (blue_writer_host_info, green_writer_host_info)) + + if sorted_blue_readers: + # Map blue readers to green hosts + if sorted_green_readers: + # Map each to blue reader to a green reader. + green_index = 0 + for blue_host_info in sorted_blue_readers: + self._corresponding_hosts.put( + blue_host_info.host, (blue_host_info, sorted_green_readers[green_index])) + green_index += 1 + # The modulo operation prevents us from exceeding the bounds of sorted_green_readers if there are + # more blue readers than green readers. In this case, multiple blue readers may be mapped to the + # same green reader. + green_index %= len(sorted_green_readers) + else: + # There's no green readers - map all blue reader hosts to the green writer + for blue_host_info in sorted_blue_readers: + self._corresponding_hosts.put(blue_host_info.host, (blue_host_info, green_writer_host_info)) + + if source_status.host_names and target_status.host_names: + blue_hosts = source_status.host_names + green_hosts = target_status.host_names + + # Map blue writer cluster host to green writer cluster host. + blue_cluster_host = next( + (blue_host for blue_host in blue_hosts if self._rds_utils.is_writer_cluster_dns(blue_host)), + None) + green_cluster_host = next( + (green_host for green_host in green_hosts if self._rds_utils.is_writer_cluster_dns(green_host)), + None) + if blue_cluster_host and green_cluster_host: + self._corresponding_hosts.put_if_absent( + blue_cluster_host, (HostInfo(host=blue_cluster_host), HostInfo(host=green_cluster_host))) + + # Map blue reader cluster host to green reader cluster host. + blue_reader_cluster_host = next( + (blue_host for blue_host in blue_hosts if self._rds_utils.is_reader_cluster_dns(blue_host)), + None) + green_reader_cluster_host = next( + (green_host for green_host in green_hosts if self._rds_utils.is_reader_cluster_dns(green_host)), + None) + if blue_reader_cluster_host and green_reader_cluster_host: + self._corresponding_hosts.put_if_absent( + blue_reader_cluster_host, + (HostInfo(host=blue_reader_cluster_host), HostInfo(host=green_reader_cluster_host))) + + # Map blue custom cluster hosts to green custom cluster hosts. + for blue_host in blue_hosts: + if not self._rds_utils.is_rds_custom_cluster_dns(blue_host): + continue + + custom_cluster_name = self._rds_utils.get_cluster_id(blue_host) + if not custom_cluster_name: + continue + + corresponding_green_host = next( + (green_host for green_host in green_hosts + if self._rds_utils.is_rds_custom_cluster_dns(green_host) + and custom_cluster_name == self._rds_utils.remove_green_instance_prefix( + self._rds_utils.get_cluster_id(green_host))), + None + ) + + if corresponding_green_host: + self._corresponding_hosts.put_if_absent( + blue_host, (HostInfo(blue_host), HostInfo(corresponding_green_host))) + + def _get_writer_host(self, bg_role: BlueGreenRole) -> Optional[HostInfo]: + role_status = self._interim_statuses[bg_role.value] + if role_status is None: + return None + + hosts = role_status.start_topology + return next((host for host in hosts if host.role == HostRole.WRITER), None) + + def _get_reader_hosts(self, bg_role: BlueGreenRole) -> Optional[List[HostInfo]]: + role_status = self._interim_statuses[bg_role.value] + if role_status is None: + return [] + + hosts = role_status.start_topology + reader_hosts = [host for host in hosts if host.role != HostRole.WRITER] + reader_hosts.sort(key=lambda host_info: host_info.host) + return reader_hosts + + def _update_summary_status(self, bg_role: BlueGreenRole, interim_status: BlueGreenInterimStatus): + if self._latest_phase == BlueGreenPhase.NOT_CREATED: + self._summary_status = BlueGreenStatus(self._bg_id, BlueGreenPhase.NOT_CREATED) + + elif self._latest_phase == BlueGreenPhase.CREATED: + self._update_dns_flags(bg_role, interim_status) + self._summary_status = self._get_status_of_created() + + elif self._latest_phase == BlueGreenPhase.PREPARATION: + self._start_switchover_timer() + self._update_dns_flags(bg_role, interim_status) + self._summary_status = self._get_status_of_preparation() + + elif self._latest_phase == BlueGreenPhase.IN_PROGRESS: + self._update_dns_flags(bg_role, interim_status) + self._summary_status = self._get_status_of_in_progress() + + elif self._latest_phase == BlueGreenPhase.POST: + self._update_dns_flags(bg_role, interim_status) + self._summary_status = self._get_status_of_post() + + elif self._latest_phase == BlueGreenPhase.COMPLETED: + self._update_dns_flags(bg_role, interim_status) + self._summary_status = self._get_status_of_completed() + + else: + raise ValueError(Messages.get_formatted("BlueGreenStatusProvider.UnknownPhase", self._bg_id, self._latest_phase)) + + def _update_dns_flags(self, bg_role: BlueGreenRole, interim_status: BlueGreenInterimStatus): + if bg_role == BlueGreenRole.SOURCE and not self._blue_dns_update_completed and interim_status.all_start_topology_ip_changed: + logger.debug("BlueGreenStatusProvider.BlueDnsCompleted", self._bg_id) + self._blue_dns_update_completed = True + self._store_event_phase_time("Blue DNS updated") + + if bg_role == BlueGreenRole.TARGET and not self._green_dns_removed and interim_status.all_start_topology_endpoints_removed: + logger.debug("BlueGreenStatusProvider.GreenDnsRemoved", self._bg_id) + self._green_dns_removed = True + self._store_event_phase_time("Green DNS removed") + + if bg_role == BlueGreenRole.TARGET and not self._green_topology_changed and interim_status.all_topology_changed: + logger.debug("BlueGreenStatusProvider.GreenTopologyChanged", self._bg_id) + self._green_topology_changed = True + self._store_event_phase_time("Green topology changed") + + def _store_event_phase_time(self, key_prefix: str, phase: Optional[BlueGreenPhase] = None): + rollback_str = " (rollback)" if self._rollback else "" + key = f"{key_prefix}{rollback_str}" + self._phase_times_ns.put_if_absent(key, PhaseTimeInfo(datetime.now(), perf_counter_ns(), phase)) + + def _start_switchover_timer(self): + if self._post_status_end_time_ns == 0: + self._post_status_end_time_ns = perf_counter_ns() + self._switchover_timeout_ns + + def _get_status_of_created(self) -> BlueGreenStatus: + """ + New connect requests: go to blue or green hosts; default behaviour; no routing. + Existing connections: default behaviour; no action. + Execute JDBC calls: default behaviour; no action. + """ + return BlueGreenStatus( + self._bg_id, + BlueGreenPhase.CREATED, + [], + [], + self._roles_by_host, + self._corresponding_hosts + ) + + def _get_status_of_preparation(self): + """ + New connect requests to blue: route to corresponding IP address. + New connect requests to green: route to corresponding IP address. + New connect requests with IP address: default behaviour; no routing. + Existing connections: default behaviour; no action. + Execute JDBC calls: default behaviour; no action. + """ + + if self._is_switchover_timer_expired(): + logger.debug("BlueGreenStatusProvider.SwitchoverTimeout") + if self._rollback: + return self._get_status_of_created() + return self._get_status_of_completed() + + connect_routings = self._get_blue_ip_address_connect_routings() + return BlueGreenStatus( + self._bg_id, + BlueGreenPhase.PREPARATION, + connect_routings, + [], + self._roles_by_host, + self._corresponding_hosts + ) + + def _is_switchover_timer_expired(self) -> bool: + return 0 < self._post_status_end_time_ns < perf_counter_ns() + + def _get_blue_ip_address_connect_routings(self) -> List[ConnectRouting]: + connect_routings: List[ConnectRouting] = [] + for host, role in self._roles_by_host.items(): + host_pair = self._corresponding_hosts.get(host) + if role == BlueGreenRole.TARGET or host_pair is None: + continue + + blue_host_info = host_pair[0] + blue_ip_container = self._host_ip_addresses.get(blue_host_info.host) + if blue_ip_container is None or not blue_ip_container.is_present(): + blue_ip_host_info = blue_host_info + else: + blue_ip_host_info = copy(blue_host_info) + blue_ip_host_info.host = blue_ip_container.get() + + host_routing = SubstituteConnectRouting(blue_ip_host_info, host, role, (blue_host_info,)) + interim_status = self._interim_statuses[role.value] + if interim_status is None: + continue + + host_and_port = self._get_host_and_port(host, interim_status.port) + host_and_port_routing = SubstituteConnectRouting(blue_ip_host_info, host_and_port, role, (blue_host_info,)) + connect_routings.extend([host_routing, host_and_port_routing]) + + return connect_routings + + def _get_host_and_port(self, host: str, port: int): + return f"{host}:{port}" if port > 0 else host + + def _get_status_of_in_progress(self) -> BlueGreenStatus: + """ + New connect requests to blue: suspend or route to corresponding IP address (depending on settings). + New connect requests to green: suspend. + New connect requests with IP address: suspend. + Existing connections: default behaviour; no action. + Execute JDBC calls: suspend. + """ + + if self._is_switchover_timer_expired(): + logger.debug("BlueGreenStatusProvider.SwitchoverTimeout") + if self._rollback: + return self._get_status_of_created() + return self._get_status_of_completed() + + connect_routings: List[ConnectRouting] = [] + if self._suspend_blue_connections_when_in_progress: + connect_routings.append(SuspendConnectRouting(None, BlueGreenRole.SOURCE, self._bg_id)) + else: + # If we aren't suspending new blue connections, we should use IP addresses. + connect_routings.extend(self._get_blue_ip_address_connect_routings()) + + connect_routings.append(SuspendConnectRouting(None, BlueGreenRole.TARGET, self._bg_id)) + + ip_addresses: Set[str] = {address_container.get() for address_container in self._host_ip_addresses.values() + if address_container.is_present()} + for ip_address in ip_addresses: + if self._suspend_blue_connections_when_in_progress: + # Check if the IP address belongs to one of the blue hosts. + interim_status = self._interim_statuses[BlueGreenRole.SOURCE.value] + if interim_status is not None and self._interim_status_contains_ip_address(interim_status, ip_address): + host_connect_routing = SuspendConnectRouting(ip_address, None, self._bg_id) + host_and_port = self._get_host_and_port(ip_address, interim_status.port) + host_port_connect_routing = SuspendConnectRouting(host_and_port, None, self._bg_id) + connect_routings.extend([host_connect_routing, host_port_connect_routing]) + continue + + # Check if the IP address belongs to one of the green hosts. + interim_status = self._interim_statuses[BlueGreenRole.TARGET.value] + if interim_status is not None and self._interim_status_contains_ip_address(interim_status, ip_address): + host_connect_routing = SuspendConnectRouting(ip_address, None, self._bg_id) + host_and_port = self._get_host_and_port(ip_address, interim_status.port) + host_port_connect_routing = SuspendConnectRouting(host_and_port, None, self._bg_id) + connect_routings.extend([host_connect_routing, host_port_connect_routing]) + continue + + # All blue and green traffic should be suspended. + execute_routings: List[ExecuteRouting] = [ + SuspendExecuteRouting(None, BlueGreenRole.SOURCE, self._bg_id), + SuspendExecuteRouting(None, BlueGreenRole.TARGET, self._bg_id)] + + # All traffic through connections with IP addresses that belong to blue or green hosts should be suspended. + for ip_address in ip_addresses: + # Check if the IP address belongs to one of the blue hosts. + interim_status = self._interim_statuses[BlueGreenRole.SOURCE.value] + if interim_status is not None and self._interim_status_contains_ip_address(interim_status, ip_address): + host_execute_routing = SuspendExecuteRouting(ip_address, None, self._bg_id) + host_and_port = self._get_host_and_port(ip_address, interim_status.port) + host_port_execute_routing = SuspendExecuteRouting(host_and_port, None, self._bg_id) + execute_routings.extend([host_execute_routing, host_port_execute_routing]) + continue + + # Check if the IP address belongs to one of the green hosts. + interim_status = self._interim_statuses[BlueGreenRole.TARGET.value] + if interim_status is not None and self._interim_status_contains_ip_address(interim_status, ip_address): + host_execute_routing = SuspendExecuteRouting(ip_address, None, self._bg_id) + host_and_port = self._get_host_and_port(ip_address, interim_status.port) + host_port_execute_routing = SuspendExecuteRouting(host_and_port, None, self._bg_id) + execute_routings.extend([host_execute_routing, host_port_execute_routing]) + continue + + execute_routings.append(SuspendExecuteRouting(ip_address, None, self._bg_id)) + + return BlueGreenStatus( + self._bg_id, + BlueGreenPhase.IN_PROGRESS, + connect_routings, + execute_routings, + self._roles_by_host, + self._corresponding_hosts + ) + + def _interim_status_contains_ip_address(self, interim_status: BlueGreenInterimStatus, ip_address: str) -> bool: + for ip_address_container in interim_status.start_ip_addresses_by_host_map.values(): + if ip_address_container.is_present() and ip_address_container.get() == ip_address: + return True + + return False + + def _get_status_of_post(self) -> BlueGreenStatus: + if self._is_switchover_timer_expired(): + logger.debug("BlueGreenStatusProvider.SwitchoverTimeout") + if self._rollback: + return self._get_status_of_created() + return self._get_status_of_completed() + + return BlueGreenStatus( + self._bg_id, + BlueGreenPhase.POST, + self._get_post_status_connect_routings(), + [], + self._roles_by_host, + self._corresponding_hosts + ) + + def _get_post_status_connect_routings(self) -> List[ConnectRouting]: + if self._blue_dns_update_completed and self._all_green_hosts_changed_name: + return [] if self._green_dns_removed else [RejectConnectRouting(None, BlueGreenRole.TARGET)] + + routings: List[ConnectRouting] = [] + # New connect calls to blue hosts should be routed to green hosts + for host, role in self._roles_by_host.items(): + if role != BlueGreenRole.SOURCE or host not in self._corresponding_hosts.keys(): + continue + + blue_host = host + is_blue_host_instance = self._rds_utils.is_rds_instance(blue_host) + host_pair = self._corresponding_hosts.get(blue_host) + blue_host_info = None if host_pair is None else host_pair[0] + green_host_info = None if host_pair is None else host_pair[1] + + if green_host_info is None: + # The corresponding green host was not found. We need to suspend the connection request. + host_suspend_routing = SuspendUntilCorrespondingHostFoundConnectRouting(blue_host, role, self._bg_id) + interim_status = self._interim_statuses[role.value] + if interim_status is None: + continue + + host_and_port = self._get_host_and_port(blue_host, interim_status.port) + host_port_suspend_routing = ( + SuspendUntilCorrespondingHostFoundConnectRouting(host_and_port, None, self._bg_id)) + routings.extend([host_suspend_routing, host_port_suspend_routing]) + else: + green_host = green_host_info.host + green_ip_container = self._host_ip_addresses.get(green_host) + if green_ip_container is None or not green_ip_container.is_present(): + green_ip_host_info = green_host_info + else: + green_ip_host_info = copy(green_host_info) + green_ip_host_info.host = green_ip_container.get() + + # Check whether the green host has already been connected to a non-prefixed blue IAM host name. + if self._is_already_successfully_connected(green_host, blue_host): + # Green host has already changed its name, and it's not a new non-prefixed blue host. + iam_hosts: Optional[Tuple[HostInfo, ...]] = None if blue_host_info is None else (blue_host_info,) + else: + # The green host has not yet changed its name, so we need to try both possible IAM hosts. + iam_hosts = (green_host_info,) if blue_host_info is None else (green_host_info, blue_host_info) + + iam_auth_success_handler = None if is_blue_host_instance \ + else lambda iam_host: self._register_iam_host(green_host, iam_host) + host_substitute_routing = SubstituteConnectRouting( + green_ip_host_info, blue_host, role, iam_hosts, iam_auth_success_handler) + interim_status = self._interim_statuses[role.value] + if interim_status is None: + continue + + host_and_port = self._get_host_and_port(blue_host, interim_status.port) + host_port_substitute_routing = SubstituteConnectRouting( + green_ip_host_info, host_and_port, role, iam_hosts, iam_auth_success_handler) + routings.extend([host_substitute_routing, host_port_substitute_routing]) + + if not self._green_dns_removed: + routings.append(RejectConnectRouting(None, BlueGreenRole.TARGET)) + + return routings + + def _is_already_successfully_connected(self, connect_host: str, iam_host: str): + success_hosts = self._iam_auth_success_hosts.compute_if_absent(connect_host, lambda _: ConcurrentSet()) + return success_hosts is not None and iam_host in success_hosts + + def _register_iam_host(self, connect_host: str, iam_host: str): + success_hosts = self._iam_auth_success_hosts.compute_if_absent(connect_host, lambda _: ConcurrentSet()) + if success_hosts is None: + success_hosts = ConcurrentSet() + + if connect_host != iam_host: + if success_hosts is not None and iam_host in success_hosts: + self._green_host_name_change_times.compute_if_absent(connect_host, lambda _: datetime.now()) + logger.debug("BlueGreenStatusProvider.GreenHostChangedName", connect_host, iam_host) + + success_hosts.add(iam_host) + if connect_host != iam_host: + # Check whether all IAM hosts have changed their names + all_hosts_changed_names = all( + any(iam_host != original_host for iam_host in iam_hosts) + for original_host, iam_hosts in self._iam_auth_success_hosts.items() + if iam_hosts # Filter out empty sets + ) + + if all_hosts_changed_names and not self._all_green_hosts_changed_name: + logger.debug("BlueGreenStatusProvider.AllGreenHostsChangedName") + self._all_green_hosts_changed_name = True + self._store_event_phase_time("Green host certificates changed") + + def _get_status_of_completed(self) -> BlueGreenStatus: + if self._is_switchover_timer_expired(): + logger.debug("BlueGreenStatusProvider.SwitchoverTimeout") + if self._rollback: + return self._get_status_of_created() + + return BlueGreenStatus( + self._bg_id, BlueGreenPhase.COMPLETED, [], [], self._roles_by_host, self._corresponding_hosts) + + if not self._blue_dns_update_completed or not self._green_dns_removed: + return self._get_status_of_post() + + return BlueGreenStatus( + self._bg_id, BlueGreenPhase.COMPLETED, [], [], self._roles_by_host, ConcurrentDict()) + + def _update_monitors(self): + phase = self._summary_status.phase + if phase == BlueGreenPhase.NOT_CREATED: + for monitor in self._monitors: + monitor.interval_rate = BlueGreenIntervalRate.BASELINE + monitor.should_collect_ip_addresses.clear() + monitor.should_collect_topology.clear() + monitor.use_ip_address.clear() + elif phase == BlueGreenPhase.CREATED: + for monitor in self._monitors: + monitor.interval_rate = BlueGreenIntervalRate.INCREASED + monitor.should_collect_ip_addresses.set() + monitor.should_collect_topology.set() + monitor.use_ip_address.clear() + if self._rollback: + monitor.reset_collected_data() + elif phase == BlueGreenPhase.PREPARATION \ + or phase == BlueGreenPhase.IN_PROGRESS \ + or phase == BlueGreenPhase.POST: + for monitor in self._monitors: + monitor.interval_rate = BlueGreenIntervalRate.HIGH + monitor.should_collect_ip_addresses.clear() + monitor.should_collect_topology.clear() + monitor.use_ip_address.set() + elif phase == BlueGreenPhase.COMPLETED: + for monitor in self._monitors: + monitor.interval_rate = BlueGreenIntervalRate.BASELINE + monitor.should_collect_ip_addresses.clear() + monitor.should_collect_topology.clear() + monitor.use_ip_address.clear() + monitor.reset_collected_data() + + # Stop monitoring old1 cluster/instance. + if not self._rollback and self._monitors[BlueGreenRole.SOURCE.value] is not None: + self._monitors[BlueGreenRole.SOURCE.value].stop.set() + else: + raise UnsupportedOperationError( + Messages.get_formatted( + "BlueGreenStatusProvider.UnknownPhase", self._bg_id, self._summary_status.phase)) + + def _update_status_cache(self): + latest_status = self._plugin_service.get_status(BlueGreenStatus, self._bg_id) + self._plugin_service.set_status(BlueGreenStatus, self._summary_status, self._bg_id) + phase = self._summary_status.phase + self._store_event_phase_time(phase.name, phase) + + if latest_status is not None: + # Notify all waiting threads that the status has been updated. + with latest_status.cv: + latest_status.cv.notify_all() + + def _log_current_context(self): + logger.debug(f"[bg_id: '{self._bg_id}'] Summary status: \n{self._summary_status}") + hosts_str = "\n".join( + f" {blue_host} -> {host_pair[1] if host_pair else None}" + for blue_host, host_pair in self._corresponding_hosts.items()) + logger.debug(f"Corresponding hosts:\n{hosts_str}") + phase_times = \ + "\n".join(f" {event_desc} -> {info.date_time}" for event_desc, info in self._phase_times_ns.items()) + logger.debug(f"Phase times:\n{phase_times}") + change_name_times = \ + "\n".join(f" {host} -> {date_time}" for host, date_time in self._green_host_name_change_times.items()) + logger.debug(f"Green host certificate change times:\n{change_name_times}") + logger.debug("\n" + f" latest_status_phase: {self._latest_phase}\n" + f" blue_dns_update_completed: {self._blue_dns_update_completed}\n" + f" green_dns_removed: {self._green_dns_removed}\n" + f" all_green_hosts_changed_name: {self._all_green_hosts_changed_name}\n" + f" green_topology_changed: {self._green_topology_changed}\n") + + def _log_switchover_final_summary(self): + switchover_completed = (not self._rollback and self._summary_status.phase == BlueGreenPhase.COMPLETED) or \ + (self._rollback and self._summary_status.phase == BlueGreenPhase.CREATED) + has_active_switchover_phases = \ + any(phase_info.phase is not None and phase_info.phase.is_switchover_active_or_completed + for phase_info in self._phase_times_ns.values()) + + if not switchover_completed or not has_active_switchover_phases: + return + + time_zero_phase = BlueGreenPhase.PREPARATION if self._rollback else BlueGreenPhase.IN_PROGRESS + time_zero_key = f"{time_zero_phase.name} (rollback)" if self._rollback else time_zero_phase.name + time_zero = self._phase_times_ns.get(time_zero_key) + sorted_phase_entries = sorted(self._phase_times_ns.items(), key=lambda entry: entry[1].timestamp_ns) + formatted_phase_entries = [ + "{:>28s} {:>18s} ms {:>31s}".format( + str(entry[1].date_time), + "" if time_zero is None else str((entry[1].timestamp_ns - time_zero.timestamp_ns) // 1_000_000), + entry[0] + ) for entry in sorted_phase_entries + ] + phase_times_str = "\n".join(formatted_phase_entries) + divider = "----------------------------------------------------------------------------------\n" + header = "{:<28s} {:>21s} {:>31s}\n".format("timestamp", "time offset (ms)", "event") + log_message = (f"[bg_id: '{self._bg_id}']\n{divider}" + f"{header}{divider}" + f"{phase_times_str}\n{divider}") + logger.debug(log_message) + + def _reset_context_when_completed(self): + switchover_completed = (not self._rollback and self._summary_status.phase == BlueGreenPhase.COMPLETED) or \ + (self._rollback and self._summary_status.phase == BlueGreenPhase.CREATED) + has_active_switchover_phases = \ + any(phase_info.phase is not None and phase_info.phase.is_switchover_active_or_completed + for phase_info in self._phase_times_ns.values()) + + if not switchover_completed or not has_active_switchover_phases: + return + + logger.debug("BlueGreenStatusProvider.ResetContext") + self._rollback = False + self._summary_status = None + self._latest_phase = BlueGreenPhase.NOT_CREATED + self._phase_times_ns.clear() + self._blue_dns_update_completed = False + self._green_dns_removed = False + self._green_topology_changed = False + self._all_green_hosts_changed_name = False + self._post_status_end_time_ns = 0 + self._interim_status_hashes = [0, 0] + self._latest_context_hash = 0 + self._interim_statuses = [None, None] + self._host_ip_addresses.clear() + self._corresponding_hosts.clear() + self._roles_by_host.clear() + self._iam_auth_success_hosts.clear() + self._green_host_name_change_times.clear() + + +@dataclass +class PhaseTimeInfo: + date_time: datetime + timestamp_ns: int + phase: Optional[BlueGreenPhase] diff --git a/aws_advanced_python_wrapper/database_dialect.py b/aws_advanced_python_wrapper/database_dialect.py index 40d1bc7d..2249116d 100644 --- a/aws_advanced_python_wrapper/database_dialect.py +++ b/aws_advanced_python_wrapper/database_dialect.py @@ -25,7 +25,7 @@ from .driver_dialect import DriverDialect from .exception_handling import ExceptionHandler -from abc import abstractmethod +from abc import ABC, abstractmethod from concurrent.futures import Executor, ThreadPoolExecutor, TimeoutError from contextlib import closing from enum import Enum, auto @@ -53,12 +53,12 @@ class DialectCode(Enum): # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html - MULTI_AZ_MYSQL = "multi-az-mysql" + MULTI_AZ_CLUSTER_MYSQL = "multi-az-mysql" AURORA_MYSQL = "aurora-mysql" RDS_MYSQL = "rds-mysql" MYSQL = "mysql" - MULTI_AZ_PG = "multi-az-pg" + MULTI_AZ_CLUSTER_PG = "multi-az-pg" AURORA_PG = "aurora-pg" RDS_PG = "rds-pg" PG = "pg" @@ -168,7 +168,7 @@ def query_for_dialect(self, url: str, host_info: Optional[HostInfo], conn: Conne class MysqlDatabaseDialect(DatabaseDialect): _DIALECT_UPDATE_CANDIDATES: Tuple[DialectCode, ...] = ( - DialectCode.AURORA_MYSQL, DialectCode.MULTI_AZ_MYSQL, DialectCode.RDS_MYSQL) + DialectCode.AURORA_MYSQL, DialectCode.MULTI_AZ_CLUSTER_MYSQL, DialectCode.RDS_MYSQL) _exception_handler: Optional[ExceptionHandler] = None @property @@ -200,9 +200,10 @@ def is_dialect(self, conn: Connection, driver_dialect: DriverDialect) -> bool: with closing(conn.cursor()) as cursor: cursor.execute(self.server_version_query) for record in cursor: - for column_value in record: - if "mysql" in column_value.lower(): - return True + if len(record) < 2: + return False + if "mysql" in record[1].lower(): + return True except Exception: if not initial_transaction_status and driver_dialect.is_in_transaction(conn): conn.rollback() @@ -218,7 +219,7 @@ def prepare_conn_props(self, props: Properties): class PgDatabaseDialect(DatabaseDialect): _DIALECT_UPDATE_CANDIDATES: Tuple[DialectCode, ...] = ( - DialectCode.AURORA_PG, DialectCode.MULTI_AZ_PG, DialectCode.RDS_PG) + DialectCode.AURORA_PG, DialectCode.MULTI_AZ_CLUSTER_PG, DialectCode.RDS_PG) _exception_handler: Optional[ExceptionHandler] = None @property @@ -264,18 +265,44 @@ def prepare_conn_props(self, props: Properties): pass -class RdsMysqlDialect(MysqlDatabaseDialect): - _DIALECT_UPDATE_CANDIDATES = (DialectCode.AURORA_MYSQL, DialectCode.MULTI_AZ_MYSQL) +class BlueGreenDialect(ABC): + @property + @abstractmethod + def blue_green_status_query(self) -> str: + ... + + @abstractmethod + def is_blue_green_status_available(self, conn: Connection) -> bool: + ... + + +class RdsMysqlDialect(MysqlDatabaseDialect, BlueGreenDialect): + _DIALECT_UPDATE_CANDIDATES = (DialectCode.AURORA_MYSQL, DialectCode.MULTI_AZ_CLUSTER_MYSQL) + + _BG_STATUS_QUERY = "SELECT version, endpoint, port, role, status FROM mysql.rds_topology" + _BG_STATUS_EXISTS_QUERY = \ + "SELECT 1 AS tmp FROM information_schema.tables WHERE table_schema = 'mysql' AND table_name = 'rds_topology'" def is_dialect(self, conn: Connection, driver_dialect: DriverDialect) -> bool: initial_transaction_status: bool = driver_dialect.is_in_transaction(conn) try: with closing(conn.cursor()) as cursor: cursor.execute(self.server_version_query) - for record in cursor: - for column_value in record: - if "source distribution" in column_value.lower(): - return True + record = cursor.fetchone() + if record is None or len(record) < 2: + return False + + if "source distribution" != record[1].lower(): + return False + + with closing(conn.cursor()) as cursor: + cursor.execute("SHOW VARIABLES LIKE 'report_host'") + record = cursor.fetchone() + if record is None or len(record) < 2: + return False + + report_host = record[1] + return report_host is not None and report_host != "" except Exception: if not initial_transaction_status and driver_dialect.is_in_transaction(conn): conn.rollback() @@ -286,13 +313,29 @@ def is_dialect(self, conn: Connection, driver_dialect: DriverDialect) -> bool: def dialect_update_candidates(self) -> Optional[Tuple[DialectCode, ...]]: return RdsMysqlDialect._DIALECT_UPDATE_CANDIDATES + @property + def blue_green_status_query(self) -> str: + return RdsMysqlDialect._BG_STATUS_QUERY -class RdsPgDialect(PgDatabaseDialect): + def is_blue_green_status_available(self, conn: Connection) -> bool: + try: + with closing(conn.cursor()) as cursor: + cursor.execute(RdsMysqlDialect._BG_STATUS_EXISTS_QUERY) + return cursor.fetchone() is not None + except Exception: + return False + + +class RdsPgDialect(PgDatabaseDialect, BlueGreenDialect): _EXTENSIONS_QUERY = ("SELECT (setting LIKE '%rds_tools%') AS rds_tools, " "(setting LIKE '%aurora_stat_utils%') AS aurora_stat_utils " "FROM pg_settings " "WHERE name='rds.extensions'") - _DIALECT_UPDATE_CANDIDATES = (DialectCode.AURORA_PG, DialectCode.MULTI_AZ_PG) + _DIALECT_UPDATE_CANDIDATES = (DialectCode.AURORA_PG, DialectCode.MULTI_AZ_CLUSTER_PG) + + _BG_STATUS_QUERY = (f"SELECT version, endpoint, port, role, status " + f"FROM rds_tools.show_topology('aws_advanced_python_wrapper-{DriverInfo.DRIVER_VERSION}')") + _BG_STATUS_EXISTS_QUERY = "SELECT 'rds_tools.show_topology'::regproc" def is_dialect(self, conn: Connection, driver_dialect: DriverDialect) -> bool: initial_transaction_status: bool = driver_dialect.is_in_transaction(conn) @@ -319,9 +362,21 @@ def is_dialect(self, conn: Connection, driver_dialect: DriverDialect) -> bool: def dialect_update_candidates(self) -> Optional[Tuple[DialectCode, ...]]: return RdsPgDialect._DIALECT_UPDATE_CANDIDATES + @property + def blue_green_status_query(self) -> str: + return RdsPgDialect._BG_STATUS_QUERY -class AuroraMysqlDialect(MysqlDatabaseDialect, TopologyAwareDatabaseDialect): - _DIALECT_UPDATE_CANDIDATES = (DialectCode.MULTI_AZ_MYSQL,) + def is_blue_green_status_available(self, conn: Connection) -> bool: + try: + with closing(conn.cursor()) as cursor: + cursor.execute(RdsPgDialect._BG_STATUS_EXISTS_QUERY) + return cursor.fetchone() is not None + except Exception: + return False + + +class AuroraMysqlDialect(MysqlDatabaseDialect, TopologyAwareDatabaseDialect, BlueGreenDialect): + _DIALECT_UPDATE_CANDIDATES = (DialectCode.MULTI_AZ_CLUSTER_MYSQL,) _TOPOLOGY_QUERY = ("SELECT SERVER_ID, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END, " "CPU, REPLICA_LAG_IN_MILLISECONDS, LAST_UPDATE_TIMESTAMP " "FROM information_schema.replica_host_status " @@ -330,6 +385,10 @@ class AuroraMysqlDialect(MysqlDatabaseDialect, TopologyAwareDatabaseDialect): _HOST_ID_QUERY = "SELECT @@aurora_server_id" _IS_READER_QUERY = "SELECT @@innodb_read_only" + _BG_STATUS_QUERY = "SELECT version, endpoint, port, role, status FROM mysql.rds_topology" + _BG_STATUS_EXISTS_QUERY = \ + "SELECT 1 AS tmp FROM information_schema.tables WHERE table_schema = 'mysql' AND table_name = 'rds_topology'" + @property def dialect_update_candidates(self) -> Optional[Tuple[DialectCode, ...]]: return AuroraMysqlDialect._DIALECT_UPDATE_CANDIDATES @@ -351,9 +410,21 @@ def is_dialect(self, conn: Connection, driver_dialect: DriverDialect) -> bool: def get_host_list_provider_supplier(self) -> Callable: return lambda provider_service, props: RdsHostListProvider(provider_service, props) + @property + def blue_green_status_query(self) -> str: + return AuroraMysqlDialect._BG_STATUS_QUERY -class AuroraPgDialect(PgDatabaseDialect, TopologyAwareDatabaseDialect, AuroraLimitlessDialect): - _DIALECT_UPDATE_CANDIDATES: Tuple[DialectCode, ...] = (DialectCode.MULTI_AZ_PG,) + def is_blue_green_status_available(self, conn: Connection) -> bool: + try: + with closing(conn.cursor()) as cursor: + cursor.execute(AuroraMysqlDialect._BG_STATUS_EXISTS_QUERY) + return cursor.fetchone() is not None + except Exception: + return False + + +class AuroraPgDialect(PgDatabaseDialect, TopologyAwareDatabaseDialect, AuroraLimitlessDialect, BlueGreenDialect): + _DIALECT_UPDATE_CANDIDATES: Tuple[DialectCode, ...] = (DialectCode.MULTI_AZ_CLUSTER_PG,) _EXTENSIONS_QUERY = "SELECT (setting LIKE '%aurora_stat_utils%') AS aurora_stat_utils " \ "FROM pg_settings WHERE name='rds.extensions'" @@ -371,6 +442,10 @@ class AuroraPgDialect(PgDatabaseDialect, TopologyAwareDatabaseDialect, AuroraLim _IS_READER_QUERY = "SELECT pg_is_in_recovery()" _LIMITLESS_ROUTER_ENDPOINT_QUERY = "SELECT router_endpoint, load FROM aurora_limitless_router_endpoints()" + _BG_STATUS_QUERY = (f"SELECT version, endpoint, port, role, status " + f"FROM get_blue_green_fast_switchover_metadata('aws_advanced_python_wrapper-{DriverInfo.DRIVER_VERSION}')") + _BG_STATUS_EXISTS_QUERY = "SELECT 'get_blue_green_fast_switchover_metadata'::regproc" + @property def dialect_update_candidates(self) -> Optional[Tuple[DialectCode, ...]]: return AuroraPgDialect._DIALECT_UPDATE_CANDIDATES @@ -407,8 +482,20 @@ def is_dialect(self, conn: Connection, driver_dialect: DriverDialect) -> bool: def get_host_list_provider_supplier(self) -> Callable: return lambda provider_service, props: RdsHostListProvider(provider_service, props) + @property + def blue_green_status_query(self) -> str: + return AuroraPgDialect._BG_STATUS_QUERY + + def is_blue_green_status_available(self, conn: Connection) -> bool: + try: + with closing(conn.cursor()) as cursor: + cursor.execute(AuroraPgDialect._BG_STATUS_EXISTS_QUERY) + return cursor.fetchone() is not None + except Exception: + return False -class MultiAzMysqlDialect(MysqlDatabaseDialect, TopologyAwareDatabaseDialect): + +class MultiAzClusterMysqlDialect(MysqlDatabaseDialect, TopologyAwareDatabaseDialect): _TOPOLOGY_QUERY = "SELECT id, endpoint, port FROM mysql.rds_topology" _WRITER_HOST_QUERY = "SHOW REPLICA STATUS" _WRITER_HOST_COLUMN_INDEX = 39 @@ -423,10 +510,19 @@ def is_dialect(self, conn: Connection, driver_dialect: DriverDialect) -> bool: initial_transaction_status: bool = driver_dialect.is_in_transaction(conn) try: with closing(conn.cursor()) as cursor: - cursor.execute(MultiAzMysqlDialect._TOPOLOGY_QUERY) + cursor.execute(MultiAzClusterMysqlDialect._TOPOLOGY_QUERY) records = cursor.fetchall() - if records is not None and len(records) > 0: - return True + if not records: + return False + + with closing(conn.cursor()) as cursor: + cursor.execute("SHOW VARIABLES LIKE 'report_host'") + record = cursor.fetchone() + if record is None or len(record) < 2: + return False + + report_host = record[1] + return report_host is not None and report_host != "" except Exception: if not initial_transaction_status and driver_dialect.is_in_transaction(conn): conn.rollback() @@ -456,7 +552,7 @@ def prepare_conn_props(self, props: Properties): props["conn_attrs"].update(extra_conn_attrs) -class MultiAzPgDialect(PgDatabaseDialect, TopologyAwareDatabaseDialect): +class MultiAzClusterPgDialect(PgDatabaseDialect, TopologyAwareDatabaseDialect): # The driver name passed to show_topology is used for RDS metrics purposes. # It is not required for functional correctness. _TOPOLOGY_QUERY = \ @@ -473,17 +569,18 @@ def dialect_update_candidates(self) -> Optional[Tuple[DialectCode, ...]]: @property def exception_handler(self) -> Optional[ExceptionHandler]: - if MultiAzPgDialect._exception_handler is None: - MultiAzPgDialect._exception_handler = Utils.initialize_class( + if MultiAzClusterPgDialect._exception_handler is None: + MultiAzClusterPgDialect._exception_handler = Utils.initialize_class( "aws_advanced_python_wrapper.utils.pg_exception_handler.MultiAzPgExceptionHandler") - return MultiAzPgDialect._exception_handler + return MultiAzClusterPgDialect._exception_handler def is_dialect(self, conn: Connection, driver_dialect: DriverDialect) -> bool: initial_transaction_status: bool = driver_dialect.is_in_transaction(conn) try: with closing(conn.cursor()) as cursor: - cursor.execute(MultiAzPgDialect._WRITER_HOST_QUERY) - if cursor.fetchone() is not None: + cursor.execute(MultiAzClusterPgDialect._WRITER_HOST_QUERY) + record = cursor.fetchone() + if record is not None and len(record) > 0 and bool(record[0]): return True except Exception: if not initial_transaction_status and driver_dialect.is_in_transaction(conn): @@ -509,8 +606,8 @@ class UnknownDatabaseDialect(DatabaseDialect): DialectCode.RDS_PG, DialectCode.AURORA_MYSQL, DialectCode.AURORA_PG, - DialectCode.MULTI_AZ_MYSQL, - DialectCode.MULTI_AZ_PG) + DialectCode.MULTI_AZ_CLUSTER_MYSQL, + DialectCode.MULTI_AZ_CLUSTER_PG) @property def default_port(self) -> int: @@ -551,11 +648,11 @@ class DatabaseDialectManager(DatabaseDialectProvider): DialectCode.MYSQL: MysqlDatabaseDialect(), DialectCode.RDS_MYSQL: RdsMysqlDialect(), DialectCode.AURORA_MYSQL: AuroraMysqlDialect(), - DialectCode.MULTI_AZ_MYSQL: MultiAzMysqlDialect(), + DialectCode.MULTI_AZ_CLUSTER_MYSQL: MultiAzClusterMysqlDialect(), DialectCode.PG: PgDatabaseDialect(), DialectCode.RDS_PG: RdsPgDialect(), DialectCode.AURORA_PG: AuroraPgDialect(), - DialectCode.MULTI_AZ_PG: MultiAzPgDialect(), + DialectCode.MULTI_AZ_CLUSTER_PG: MultiAzClusterPgDialect(), DialectCode.UNKNOWN: UnknownDatabaseDialect() } diff --git a/aws_advanced_python_wrapper/hostinfo.py b/aws_advanced_python_wrapper/hostinfo.py index ac102fe7..7f151350 100644 --- a/aws_advanced_python_wrapper/hostinfo.py +++ b/aws_advanced_python_wrapper/hostinfo.py @@ -34,7 +34,7 @@ class HostRole(Enum): @dataclass(eq=False) class HostInfo: NO_PORT: ClassVar[int] = -1 - DEFAULT_WEIGHT = 100 + DEFAULT_WEIGHT: ClassVar[int] = 100 def __init__( self, @@ -49,9 +49,9 @@ def __init__( self.host = host self.port = port self.role = role - self._availability = availability + self.availability = availability self.host_availability_strategy = host_availability_strategy - self.weight = weight, + self.weight = weight self.host_id = host_id self.last_update_time = last_update_time @@ -66,11 +66,25 @@ def __eq__(self, other: object): return self.host == other.host \ and self.port == other.port \ - and self._availability == other._availability \ + and self.availability == other.availability \ and self.role == other.role def __str__(self): - return f"HostInfo({self.host}, {self.port}, {self.role}, {self._availability})" + return f"HostInfo({self.host}, {self.port}, {self.role}, {self.availability})" + + def __repr__(self): + return f"HostInfo({self.host}, {self.port}, {self.role}, {self.availability})" + + def __copy__(self): + return HostInfo( + host=self.host, + port=self.port, + role=self.role, + availability=self.availability, + weight=self.weight, + host_id=self.host_id, + last_update_time=self.last_update_time + ) @property def url(self): @@ -119,15 +133,15 @@ def is_port_specified(self) -> bool: def get_availability(self) -> HostAvailability: if self.host_availability_strategy is not None: - return self.host_availability_strategy.get_host_availability(self._availability) + return self.host_availability_strategy.get_host_availability(self.availability) - return self._availability + return self.availability def get_raw_availability(self) -> HostAvailability: - return self._availability + return self.availability def set_availability(self, availability: HostAvailability): - self._availability = availability + self.availability = availability if self.host_availability_strategy is not None: self.host_availability_strategy.set_host_availability(availability) diff --git a/aws_advanced_python_wrapper/plugin_service.py b/aws_advanced_python_wrapper/plugin_service.py index 4be4fea3..b58f8ea3 100644 --- a/aws_advanced_python_wrapper/plugin_service.py +++ b/aws_advanced_python_wrapper/plugin_service.py @@ -16,8 +16,11 @@ from typing import TYPE_CHECKING, ClassVar, List, Type, TypeVar +from aws_advanced_python_wrapper import LogUtils from aws_advanced_python_wrapper.aurora_initial_connection_strategy_plugin import \ AuroraInitialConnectionStrategyPluginFactory +from aws_advanced_python_wrapper.blue_green_plugin import \ + BlueGreenPluginFactory from aws_advanced_python_wrapper.custom_endpoint_plugin import \ CustomEndpointPluginFactory from aws_advanced_python_wrapper.fastest_response_strategy_plugin import \ @@ -28,6 +31,7 @@ from aws_advanced_python_wrapper.okta_plugin import OktaAuthPluginFactory from aws_advanced_python_wrapper.states.session_state_service import ( SessionStateService, SessionStateServiceImpl) +from aws_advanced_python_wrapper.utils.utils import Utils if TYPE_CHECKING: from aws_advanced_python_wrapper.allowed_and_blocked_hosts import AllowedAndBlockedHosts @@ -111,7 +115,8 @@ def plugin_manager(self, value): self._plugin_manager = value -T = TypeVar('T') +StatusType = TypeVar('StatusType') +UnwrapType = TypeVar('UnwrapType') class PluginService(ExceptionHandler, Protocol): @@ -144,7 +149,7 @@ def set_current_connection(self, connection: Connection, host_info: HostInfo): @property @abstractmethod - def current_host_info(self) -> Optional[HostInfo]: + def current_host_info(self) -> HostInfo: ... @property @@ -292,16 +297,16 @@ def is_plugin_in_use(self, plugin_class: Type[Plugin]): ... @abstractmethod - def set_status(self, clazz: Type[T], status: Optional[T], key: str): + def set_status(self, clazz: Type[StatusType], status: Optional[StatusType], key: str): ... @abstractmethod - def get_status(self, clazz: Type[T], key: str) -> Optional[T]: + def get_status(self, clazz: Type[StatusType], key: str) -> Optional[StatusType]: ... class PluginServiceImpl(PluginService, HostListProviderService, CanReleaseResources): - _STATUS_CACHE_EXPIRATION_NANO = 60 * 1_000_000_000 # one hour + _STATUS_CACHE_EXPIRATION_NANO = 60 * 60 * 1_000_000_000 # one hour _host_availability_expiring_cache: CacheMap[str, HostAvailability] = CacheMap() _status_cache: ClassVar[CacheMap[str, Any]] = CacheMap() @@ -420,7 +425,37 @@ def set_current_connection(self, connection: Optional[Connection], host_info: Op self.session_state_service.complete() @property - def current_host_info(self) -> Optional[HostInfo]: + def current_host_info(self) -> HostInfo: + if self._current_host_info is not None: + return self._current_host_info + + self._current_host_info = self._initial_connection_host_info + if self._current_host_info is not None: + logger.debug("PluginServiceImpl.SetCurrentHostInfo", self._current_host_info) + return self._current_host_info + + all_hosts = self.all_hosts + if not all_hosts: + raise AwsWrapperError(Messages.get("PluginServiceImpl.HostListEmpty")) + + self._current_host_info = ( + next((host_info for host_info in all_hosts if host_info.role == HostRole.WRITER), None)) + if self._current_host_info: + allowed_hosts = self.hosts + if not Utils.contains_url(allowed_hosts, self._current_host_info.url): + raise AwsWrapperError( + Messages.get_formatted( + "PluginServiceImpl.CurrentHostNotAllowed", + self._current_host_info.url, LogUtils.log_topology(allowed_hosts))) + else: + allowed_hosts = self.hosts + if len(allowed_hosts) > 0: + self._current_host_info = self.hosts[0] + + if self._current_host_info is None: + raise AwsWrapperError("PluginServiceImpl.CouldNotDetermineCurrentHost") + + logger.debug("PluginServiceImpl.SetCurrentHostInfo", self._current_host_info) return self._current_host_info @property @@ -499,6 +534,7 @@ def update_dialect(self, connection: Optional[Connection] = None): if original_dialect != self._database_dialect: host_list_provider_init = self._database_dialect.get_host_list_provider_supplier() self.host_list_provider = host_list_provider_init(self, self._props) + self.refresh_host_list(connection) def update_driver_dialect(self, connection_provider: ConnectionProvider): self._driver_dialect = self._driver_dialect_manager.get_pool_connection_driver_dialect( @@ -676,18 +712,18 @@ def release_resources(self): if host_list_provider is not None and isinstance(host_list_provider, CanReleaseResources): host_list_provider.release_resources() - def set_status(self, clazz: Type[T], status: Optional[T], key: str): + def set_status(self, clazz: Type[StatusType], status: Optional[StatusType], key: str): cache_key = self._get_status_cache_key(clazz, key) if status is None: self._status_cache.remove(cache_key) else: self._status_cache.put(cache_key, status, PluginServiceImpl._STATUS_CACHE_EXPIRATION_NANO) - def _get_status_cache_key(self, clazz: Type[T], key: str) -> str: + def _get_status_cache_key(self, clazz: Type[StatusType], key: str) -> str: key_str = "" if key is None else key.strip().lower() return f"{key_str}::{clazz.__name__}" - def get_status(self, clazz: Type[T], key: str) -> Optional[T]: + def get_status(self, clazz: Type[StatusType], key: str) -> Optional[StatusType]: cache_key = self._get_status_cache_key(clazz, key) status = PluginServiceImpl._status_cache.get(cache_key) if status is None: @@ -731,6 +767,7 @@ class PluginManager(CanReleaseResources): "okta": OktaAuthPluginFactory, "initial_connection": AuroraInitialConnectionStrategyPluginFactory, "limitless": LimitlessPluginFactory, + "bg": BlueGreenPluginFactory } WEIGHT_RELATIVE_TO_PRIOR_PLUGIN = -1 @@ -746,6 +783,7 @@ class PluginManager(CanReleaseResources): ReadWriteSplittingPluginFactory: 300, FailoverPluginFactory: 400, HostMonitoringPluginFactory: 500, + BlueGreenPluginFactory: 550, FastestResponseStrategyPluginFactory: 600, IamAuthPluginFactory: 700, AwsSecretsManagerPluginFactory: 800, @@ -1073,6 +1111,17 @@ def is_plugin_in_use(self, plugin_class: Type[Plugin]) -> bool: return False + # For testing purposes only. + def _unwrap(self, unwrap_class: Type[UnwrapType]) -> Optional[UnwrapType]: + if len(self._plugins) < 1: + return None + + for plugin in self._plugins: + if isinstance(plugin, unwrap_class): + return plugin + + return None + def release_resources(self): """ Allows all connection plugins a chance to clean up any dangling resources diff --git a/aws_advanced_python_wrapper/resources/aws_advanced_python_wrapper_messages.properties b/aws_advanced_python_wrapper/resources/aws_advanced_python_wrapper_messages.properties index a7c37c48..9d690871 100644 --- a/aws_advanced_python_wrapper/resources/aws_advanced_python_wrapper_messages.properties +++ b/aws_advanced_python_wrapper/resources/aws_advanced_python_wrapper_messages.properties @@ -40,6 +40,41 @@ AwsSecretsManagerPlugin.UnhandledException=[AwsSecretsManagerPlugin] Unhandled e AwsWrapperConnection.ConnectionNotOpen=[AwsWrapperConnection] Attempted to establish an initial connection, but the connection returned by the connect call evaluated to None. AwsWrapperConnection.InitialHostInfoNone=[AwsWrapperConnection] The initial connection host info unexpectedly evaluated to None after initializing the host list provider. +BlueGreenPhase.UnknownStatus=[BlueGreenPhase] Unknown blue/green status '{}'. + +BlueGreenRole.UnknownRole=[BlueGreenRole] Unknown blue/green role '{}'. +BlueGreenRole.UnknownVersion=[BlueGreenRole] Unknown blue/green version '{}'. + +BlueGreenStatusMonitor.CreateHostListProvider=[BlueGreenStatusMonitor] [{}] Creating a new HostListProvider, clusterId: {}. +BlueGreenStatusMonitor.Exception=[BlueGreenStatusMonitor] [{}] currentPhase: {}, exception while querying for blue green status. +BlueGreenStatusMonitor.HostInfoNone=[BlueGreenStatusMonitor] Unable to initialize HostListProvider since connection host information is None. +BlueGreenStatusMonitor.Interrupted=[BlueGreenStatusMonitor] [{}] Interrupted. +BlueGreenStatusMonitor.MonitoringUnhandledException=[BlueGreenStatusMonitor] [{}] Unhandled exception while monitoring blue/green status: {}. +BlueGreenStatusMonitor.NoEntriesInStatusTable=[BlueGreenStatusMonitor] [{}] No entries in status table. +BlueGreenStatusMonitor.OpenedConnection=[BlueGreenStatusMonitor] [{}] Opened monitoring connection to {}. +BlueGreenStatusMonitor.OpenedConnectionWithIp=[BlueGreenStatusMonitor] [{}] Opened monitoring connection (IP) to {}. +BlueGreenStatusMonitor.OpeningConnection=[BlueGreenStatusMonitor] [{}] Opening monitoring connection to {}. +BlueGreenStatusMonitor.OpeningConnectionWithIp=[BlueGreenStatusMonitor] [{}] Opening monitoring connection (IP) to {}. +BlueGreenStatusMonitor.StatusChanged=[BlueGreenStatusMonitor] [{}] Status changed to: {} +BlueGreenStatusMonitor.StatusNotAvailable=[BlueGreenStatusMonitor] [{}] (status not available) currentPhase: {} +BlueGreenStatusMonitor.ThreadCompleted=[BlueGreenStatusMonitor] [{}] Blue/green status monitoring thread is completed. +BlueGreenStatusMonitor.UnexpectedDialect=[BlueGreenStatusMonitor] Attempted to create a BlueGreenStatusMonitor, but a BlueGreenDialect is required. The current dialect is {}. +BlueGreenStatusMonitor.UnhandledException=[BlueGreenStatusMonitor] [{}] Unhandled exception: {}. +BlueGreenStatusMonitor.UsesVersion=[BlueGreenStatusMonitor] [{}] Blue/Green deployment uses version '{}' which the driver doesn't support. Version '{}' will be used instead. + +BlueGreenStatusProvider.BlueDnsCompleted=[BlueGreenStatusProvider] [bgdId: '{}'] Blue DNS update completed. +BlueGreenStatusProvider.GreenDnsRemoved=[BlueGreenStatusProvider] [bgdId: '{}'] Green DNS removed. +BlueGreenStatusProvider.GreenHostChangedName=[BlueGreenStatusProvider] Green host '{}' has changed its name to '{}'. +BlueGreenStatusProvider.GreenTopologyChanged=[BlueGreenStatusProvider] [bgdId: '{}'] Green topology changed. +BlueGreenStatusProvider.InterimStatus=[BlueGreenStatusProvider] [bgdId: '{}', role: {}] {} +BlueGreenStatusProvider.ResetContext=[BlueGreenStatusProvider] Resetting context. +BlueGreenStatusProvider.Rollback=[BlueGreenStatusProvider] [bgdId: '{}'] Blue/Green deployment is in rollback mode. +BlueGreenStatusProvider.SwitchoverTimeout=[BlueGreenStatusProvider] Blue/Green switchover has timed out. +BlueGreenStatusProvider.UnknownPhase=[BlueGreenStatusProvider] [bgdId: '{}'] Unknown BG phase '{}'. +BlueGreenStatusProvider.UnsupportedDialect=[BlueGreenStatusProvider] [bgdId: '{}'] Blue/Green Deployments isn't supported by database dialect {}. + +CloseConnectionExecuteRouting.InProgressConnectionClosed=[CloseConnectionExecuteRouting] Connection has been closed since Blue/Green switchover is in progress. + conftest.ExceptionWhileObtainingInstanceIDs=[conftest] An exception was thrown while attempting to obtain the cluster's instance IDs: '{}' ConnectTimePlugin.ConnectTime=[ConnectTimePlugin] Connected in {} nanos. @@ -235,11 +270,15 @@ PluginManager.MethodInvokedAgainstOldConnection = [PluginManager] The internal c PluginManager.PipelineNone=[PluginManager] A pipeline was requested but the created pipeline evaluated to None. PluginManager.ResortedPlugins=[PluginManager] Plugins order has been rearranged. The following order is in effect: {}. +PluginServiceImpl.CouldNotDetermineCurrentHost=[PluginServiceImpl] The current host could not be determined. +PluginServiceImpl.CurrentHostNotAllowed=[PluginServiceImpl] The current host is not in the list of allowed hosts. Current host: '{}'. Allowed hosts: {} PluginServiceImpl.FailedToRetrieveHostPort=[PluginServiceImpl] Could not retrieve Host:Port for connection. {} PluginServiceImpl.FillAliasesTimeout=[PluginServiceImpl] The timeout limit was reached while querying for the current host's alias. PluginServiceImpl.GetHostRoleConnectionNone=[PluginServiceImpl] Attempted to evaluate the host role of the given connection, but could not find a non-None connection to evaluate. +PluginServiceImpl.HostListEmpty=[PluginServiceImpl] Could not determine the current host info because the current host list is empty. PluginServiceImpl.IncorrectStatusType=[PluginServiceImpl] Received an unexpected type from the status cache. An object of type {} was requested, but the object at key '{}' had a type of {}. The retrieved object was: {}. PluginServiceImpl.NonEmptyAliases=[PluginServiceImpl] fill_aliases called when HostInfo already contains the following aliases: {}. +PluginServiceImpl.SetCurrentHostInfo=[PluginServiceImpl] Set current host info to {} PluginServiceImpl.UnableToUpdateTransactionStatus=[PluginServiceImpl] Unable to update transaction status, current connection is None. PluginServiceImpl.UpdateDialectConnectionNone=[PluginServiceImpl] The plugin service attempted to update the current dialect but could not identify a connection to use. PluginServiceImpl.UnsupportedStrategy=[PluginServiceImpl] The driver does not support the requested host selection strategy: {} @@ -312,6 +351,8 @@ ReadWriteSplittingPlugin.SwitchedFromWriterToReader=[ReadWriteSplittingPlugin] S ReadWriteSplittingPlugin.UnavailableHostInfo=[ReadWriteSplittingPlugin] Current Host Info could not be found in plugin service. ReadWriteSplittingPlugin.UnsupportedHostInfoSelectorStrategy=[ReadWriteSplittingPlugin] Unsupported host selection strategy '{}' specified in plugin configuration parameter 'reader_host_selector_strategy'. Please visit the Read/Write Splitting Plugin documentation for all supported strategies. +RejectConnectRouting.InProgressCantConnect=[RejectConnectRouting] Blue/Green Deployment switchover is in progress. New connection can't be opened. + RoundRobinHostSelector.ClusterInfoNone=[RoundRobinHostSelector] The round robin cluster information cache should have an entry for the current cluster, but no entry was found. RoundRobinHostSelector.RoundRobinInvalidDefaultWeight=[RoundRobinHostSelector] The provided default weight value is not valid. Weight values must be an integer greater than or equal to 1. RoundRobinHostSelector.RoundRobinInvalidHostWeightPairs= [RoundRobinHostSelector] The provided host weight pairs have not been configured correctly. Please ensure the provided host weight pairs is a comma separated list of pairs, each pair in the format of :. Weight values must be an integer greater than or equal to the default weight value of 1. Weight pair: '{}' @@ -335,6 +376,21 @@ StaleDnsHelper.WriterInetAddress=[StaleDnsPlugin] Writer host address: {} StaleDnsPlugin.RequireDynamicProvider=[StaleDnsPlugin] A dynamic host list provider is required for the stale DNS plugin, but the detected host list was a static provider. +SubstituteConnectRouting.InProgressCantOpenConnection=[SubstituteConnectRouting] Blue/Green Deployment switchover is in progress. Can't establish connection to '{}'. +SubstituteConnectRouting.RequireIamHost=[SubstituteConnectRouting] Connecting with IP address when IAM authentication is enabled requires an 'iamHost' parameter. + +SuspendConnectRouting.InProgressSuspendConnect=[SuspendConnectRouting] Blue/Green Deployment switchover is in progress. The 'connect' call will be delayed until switchover is completed. +SuspendConnectRouting.InProgressTryConnectLater=[SuspendConnectRouting] Blue/Green Deployment switchover is still in progress after {} seconds. Try to connect again later. +SuspendConnectRouting.SwitchoverCompleteContinueWithConnect=[SuspendConnectRouting] Blue/Green Deployment switchover is completed. Continue with connect call. The call was suspended for {} ms. + +SuspendExecuteRouting.InProgressSuspendMethod=[SuspendExecuteRouting] Blue/Green Deployment switchover is in progress. Suspend '{}' call until switchover is completed. +SuspendExecuteRouting.InProgressTryMethodLater=[SuspendExecuteRouting] Blue/Green Deployment switchover is still in progress after {} ms. Try '{}' again later. +SuspendExecuteRouting.SwitchoverCompleteContinueWithMethod=[SuspendExecuteRouting] Blue/Green Deployment switchover is completed. Continue with '{}' call. The call was suspended for {} ms. + +SuspendUntilCorrespondingHostFoundConnectRouting.CompletedContinueWithConnect=[SuspendConnectUntilCorrespondingHostFoundConnectRouting] Blue/Green Deployment status is completed. Continue with 'connect' call. The call was suspended for {} ms. +SuspendUntilCorrespondingHostFoundConnectRouting.CorrespondingHostFoundContinueWithConnect=[SuspendConnectUntilCorrespondingHostFoundConnectRouting] The corresponding host for '{}' was found. Continue with 'connect' call. The call was suspended for {} ms. +SuspendUntilCorrespondingHostFoundConnectRouting.CorrespondingHostNotFoundTryConnectLater=[SuspendConnectUntilCorrespondingHostFoundConnectRouting] Blue/Green Deployment switchover is still in progress and the corresponding host for '{}' was not found after {} ms. Try to connect again later. + Testing.CantParse=[Testing] Can't parse {}. Testing.DisabledConnectivity=[Testing] Disabled connectivity to {}. Testing.EnabledConnectivity=[Testing] Enabled connectivity to {}. diff --git a/aws_advanced_python_wrapper/utils/atomic.py b/aws_advanced_python_wrapper/utils/atomic.py index 62eba40c..42d7961e 100644 --- a/aws_advanced_python_wrapper/utils/atomic.py +++ b/aws_advanced_python_wrapper/utils/atomic.py @@ -20,6 +20,9 @@ def __init__(self, initial_value: int = 0): self._value = initial_value self._lock: Lock = Lock() + def __str__(self): + return f"AtomicInt[value={self._value}]" + def get(self): with self._lock: return self._value @@ -49,3 +52,10 @@ def decrement_and_get(self): with self._lock: self._value -= 1 return self._value + + def compare_and_set(self, expected_value: int, new_value: int) -> bool: + with self._lock: + if self._value == expected_value: + self._value = new_value + return True + return False diff --git a/aws_advanced_python_wrapper/utils/concurrent.py b/aws_advanced_python_wrapper/utils/concurrent.py index 244f8236..04836932 100644 --- a/aws_advanced_python_wrapper/utils/concurrent.py +++ b/aws_advanced_python_wrapper/utils/concurrent.py @@ -14,12 +14,12 @@ from __future__ import annotations -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Dict, Iterator, Set, Union, ValuesView if TYPE_CHECKING: from typing import ItemsView -from threading import Lock +from threading import Condition, Lock, RLock from typing import Callable, Generic, KeysView, List, Optional, TypeVar K = TypeVar('K') @@ -34,6 +34,15 @@ def __init__(self): def __len__(self): return len(self._dict) + def __contains__(self, key): + return key in self._dict + + def __str__(self): + return f"ConcurrentDict{str(self._dict)}" + + def __repr__(self): + return f"ConcurrentDict{str(self._dict)}" + def get(self, key: K, default_value: Optional[V] = None) -> Optional[V]: return self._dict.get(key, default_value) @@ -63,6 +72,10 @@ def compute_if_absent(self, key: K, mapping_func: Callable) -> Optional[V]: return new_value return value + def put(self, key: K, value: V): + with self._lock: + self._dict[key] = value + def put_if_absent(self, key: K, new_value: V) -> V: with self._lock: existing_value = self._dict.get(key) @@ -71,6 +84,11 @@ def put_if_absent(self, key: K, new_value: V) -> V: return new_value return existing_value + def put_all(self, other_dict: Union[ConcurrentDict[K, V], Dict[K, V]]): + with self._lock: + for k, v in other_dict.items(): + self._dict[k] = v + def remove(self, key: K) -> V: with self._lock: return self._dict.pop(key, None) @@ -96,5 +114,56 @@ def apply_if(self, predicate: Callable, apply: Callable): def keys(self) -> KeysView: return self._dict.keys() + def values(self) -> ValuesView: + return self._dict.values() + def items(self) -> ItemsView: return self._dict.items() + + +class ConcurrentSet(Generic[V]): + def __init__(self): + self._set: Set[V] = set() + self._lock = RLock() + + def __len__(self): + with self._lock: + return len(self._set) + + def __contains__(self, item: V) -> bool: + with self._lock: + return item in self._set + + def __iter__(self) -> Iterator[V]: + with self._lock: + return iter(set(self._set)) + + def add(self, item: V): + with self._lock: + self._set.add(item) + + def remove(self, item: V): + with self._lock: + self._set.remove(item) + + +class CountDownLatch: + def __init__(self, count=1): + self.count = count + self.condition = Condition() + + def set_count(self, count: int): + self.count = count + + def count_down(self): + with self.condition: + if self.count > 0: + self.count -= 1 + if self.count == 0: + self.condition.notify_all() + + def wait_sec(self, timeout_sec=None): + with self.condition: + if self.count > 0: + return self.condition.wait(timeout_sec) + return True diff --git a/aws_advanced_python_wrapper/utils/properties.py b/aws_advanced_python_wrapper/utils/properties.py index ccd98eb7..45aded90 100644 --- a/aws_advanced_python_wrapper/utils/properties.py +++ b/aws_advanced_python_wrapper/utils/properties.py @@ -20,7 +20,9 @@ class Properties(Dict[str, Any]): - pass + def put_if_absent(self, key: str, value: Any): + if self.get(key) is None: + self[key] = value class WrapperProperty: @@ -29,11 +31,19 @@ def __init__(self, name: str, description: str, default_value: Optional[Any] = N self.default_value = default_value self.description = description + def __str__(self): + return f"WrapperProperty(name={self.name}, default_value={self.default_value}" + def get(self, props: Properties) -> Optional[str]: if self.default_value: return props.get(self.name, self.default_value) return props.get(self.name) + def get_or_default(self, props: Properties) -> str: + if not self.default_value: + raise ValueError(f"No default value found for property {self}") + return props.get(self.name, self.default_value) + def get_int(self, props: Properties) -> int: if self.default_value: return int(props.get(self.name, self.default_value)) @@ -373,6 +383,37 @@ class WrapperProperties: "Interval in milliseconds between polling for Limitless Transaction Routers to the database.", 7_500) + # Blue/Green + BG_CONNECT_TIMEOUT_MS = WrapperProperty( + "bg_connect_timeout_ms", + "Connect timeout (in msec) during Blue/Green Deployment switchover.", + 30_000) + BG_ID = WrapperProperty( + "bg_id", + "Blue/Green Deployment identifier that helps the driver to distinguish different deployments.", + "1") + BG_INTERVAL_BASELINE_MS = WrapperProperty( + "bg_interval_baseline_ms", + "Baseline Blue/Green Deployment status checking interval (in msec).", + 60_000) + BG_INTERVAL_INCREASED_MS = WrapperProperty( + "bg_interval_increased_ms", + "Increased Blue/Green Deployment status checking interval (in msec).", + 1_000) + BG_INTERVAL_HIGH_MS = WrapperProperty( + "bg_interval_high_ms", + "High Blue/Green Deployment status checking interval (in msec).", + 100) + BG_SWITCHOVER_TIMEOUT_MS = WrapperProperty( + "bg_switchover_timeout_ms", + "Blue/Green Deployment switchover timeout (in msec).", + 180_000) # 3 minutes + BG_SUSPEND_NEW_BLUE_CONNECTIONS = WrapperProperty( + "bg_suspend_new_blue_connections", + "Enables Blue/Green Deployment switchover to suspend new blue connection requests while the " + "switchover process is in progress.", + False) + # Telemetry ENABLE_TELEMETRY = WrapperProperty( "enable_telemetry", diff --git a/aws_advanced_python_wrapper/utils/rdsutils.py b/aws_advanced_python_wrapper/utils/rdsutils.py index 7e289d2d..ab8f1b1a 100644 --- a/aws_advanced_python_wrapper/utils/rdsutils.py +++ b/aws_advanced_python_wrapper/utils/rdsutils.py @@ -115,6 +115,10 @@ class RdsUtils: IP_V6 = r"^[0-9a-fA-F]{1,4}(:[0-9a-fA-F]{1,4}){7}" IP_V6_COMPRESSED = r"^(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,5})?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,5})?)" + BG_OLD_HOST_PATTERN = r".*(?P-old1)\." + BG_GREEN_HOST_PATTERN = r".*(?P-green-[0-9a-z]{6})\." + BG_GREEN_HOST_ID_PATTERN = r"(.*)-green-[0-9a-z]{6}" + DNS_GROUP = "dns" DOMAIN_GROUP = "domain" INSTANCE_GROUP = "instance" @@ -260,6 +264,42 @@ def identify_rds_type(self, host: Optional[str]) -> RdsUrlType: return RdsUrlType.OTHER + def is_green_instance(self, host: str) -> bool: + if not host: + return False + + return search(RdsUtils.BG_GREEN_HOST_PATTERN, host) is not None + + def is_not_old_instance(self, host: str) -> bool: + if host is None or not host.strip(): + return False + return search(RdsUtils.BG_OLD_HOST_PATTERN, host) is None + + def is_not_green_or_old_instance(self, host: str) -> bool: + if not host: + return False + + return search(RdsUtils.BG_GREEN_HOST_PATTERN, host) is None and \ + search(RdsUtils.BG_OLD_HOST_PATTERN, host) is None + + def remove_green_instance_prefix(self, host: str) -> str: + if not host: + return host + + host_match = search(RdsUtils.BG_GREEN_HOST_PATTERN, host) + if host_match is None: + host_id_match = search(RdsUtils.BG_GREEN_HOST_ID_PATTERN, host) + if host_id_match: + return host_id_match.group(0) + else: + return host + + prefix = host_match.group("prefix") + if not prefix: + return host + + return host.replace(f"{prefix}.", ".") + def _find(self, host: str, patterns: list): if not host or not host.strip(): return None diff --git a/aws_advanced_python_wrapper/utils/utils.py b/aws_advanced_python_wrapper/utils/utils.py index 54fd497f..ec599a54 100644 --- a/aws_advanced_python_wrapper/utils/utils.py +++ b/aws_advanced_python_wrapper/utils/utils.py @@ -87,3 +87,11 @@ def initialize_class(full_class_name: str, *args): return getattr(m, parts[-1])(*args) except ModuleNotFoundError: return None + + @staticmethod + def contains_url(hosts: Tuple[HostInfo, ...], url: str) -> bool: + for host in hosts: + if host.url == url: + return True + + return False diff --git a/aws_advanced_python_wrapper/utils/value_container.py b/aws_advanced_python_wrapper/utils/value_container.py new file mode 100644 index 00000000..092194ba --- /dev/null +++ b/aws_advanced_python_wrapper/utils/value_container.py @@ -0,0 +1,71 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Generic, Optional, TypeVar, Union, cast + +V = TypeVar('V') + + +class Empty(object): + """An empty sentinel object used to differentiate between None vs an empty value.""" + pass + + +class ValueContainer(Generic[V]): + """A container object which may or may not contain a non-None value.""" + + # Sentinel object to represent an empty ValueContainer + _EMPTY = Empty() + + def __init__(self, value: Union[Empty, V] = _EMPTY): + self._value = value + + @classmethod + def of(cls, value: V) -> 'ValueContainer[V]': + if value is None: + raise ValueError("Value cannot be None") + return cls(value) + + @classmethod + def empty(cls) -> 'ValueContainer[V]': + return cls() + + def is_present(self) -> bool: + return self._value is not self._EMPTY + + def is_empty(self) -> bool: + return self._value is self._EMPTY + + def get(self) -> V: + if self._value is self._EMPTY: + raise ValueError("No value present") + return cast('V', self._value) + + def or_else(self, other: Optional[V]) -> Optional[V]: + return cast('V', self._value) if self.is_present() else other + + def __eq__(self, other: object) -> bool: + if not isinstance(other, ValueContainer): + return False + if self.is_empty() and other.is_empty(): + return True + if self.is_empty() or other.is_empty(): + return False + return self._value == other._value + + def __str__(self) -> str: + return "ValueContainer.empty" if self.is_empty() else f"ValueContainer[{self._value}]" + + def __repr__(self) -> str: + return "ValueContainer.empty" if self.is_empty() else f"ValueContainer[{self._value}]" diff --git a/aws_advanced_python_wrapper/wrapper.py b/aws_advanced_python_wrapper/wrapper.py index 7f6681fb..e04d293f 100644 --- a/aws_advanced_python_wrapper/wrapper.py +++ b/aws_advanced_python_wrapper/wrapper.py @@ -15,7 +15,7 @@ from __future__ import annotations from typing import (TYPE_CHECKING, Any, Callable, Iterator, List, Optional, - Union) + Type, TypeVar, Union) if TYPE_CHECKING: from aws_advanced_python_wrapper.host_list_provider import HostListProviderService @@ -40,6 +40,8 @@ logger = Logger(__name__) +UnwrapType = TypeVar('UnwrapType') + class AwsWrapperConnection(Connection, CanReleaseResources): __module__ = "aws_advanced_python_wrapper" @@ -213,6 +215,10 @@ def release_resources(self): if isinstance(self._plugin_service, CanReleaseResources): self._plugin_service.release_resources() + # For testing purposes only + def _unwrap(self, unwrap_class: Type[UnwrapType]) -> Optional[UnwrapType]: + return self._plugin_manager._unwrap(unwrap_class) + def __del__(self): self.release_resources() diff --git a/docs/development-guide/IntegrationTests.md b/docs/development-guide/IntegrationTests.md index e8d63089..ff306d6c 100644 --- a/docs/development-guide/IntegrationTests.md +++ b/docs/development-guide/IntegrationTests.md @@ -120,13 +120,13 @@ unset FILTER # Done testing the IAM tests, unset FILTER | `DB_USERNAME` | Yes | The username to access the database. | `admin` | | `DB_PASSWORD` | Yes | The database cluster password. | `password` | | `DB_DATABASE_NAME` | No | Name of the database that will be used by the tests. The default database name is test. | `test_db_name` | -| `RDS_CLUSTER_NAME` | Yes | The database identifier for your Aurora or RDS cluster. Must be a unique value to avoid conflicting with existing clusters. | `db-identifier` | -| `RDS_CLUSTER_DOMAIN` | No | The existing database connection suffix. Use this variable to run against an existing database. | `XYZ.us-east-2.rds.amazonaws.com` | +| `RDS_DB_NAME` | Yes | The database identifier for your Aurora or RDS cluster. Must be a unique value to avoid conflicting with existing clusters. | `db-identifier` | +| `RDS_DB_DOMAIN` | No | The existing database connection suffix. Use this variable to run against an existing database. | `XYZ.us-east-2.rds.amazonaws.com` | | `IAM_USER` | No | User within the database that is identified with AWSAuthenticationPlugin. This is used for AWS IAM Authentication and is optional | `example_user_name` | | `AWS_ACCESS_KEY_ID` | Yes | An AWS access key associated with an IAM user or role with RDS permissions. | `ASIAIOSFODNN7EXAMPLE` | | `AWS_SECRET_ACCESS_KEY` | Yes | The secret key associated with the provided AWS_ACCESS_KEY_ID. | `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` | | `AWS_SESSION_TOKEN` | No | AWS Session Token for CLI, SDK, & API access. This value is for MFA credentials only. See: [temporary AWS credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html). | `AQoDYXdzEJr...` | | -| `REUSE_RDS_CLUSTER` | Yes | Set to true if you would like to use an existing cluster for your tests. | `false` | +| `REUSE_RDS_DB` | Yes | Set to true if you would like to use an existing cluster for your tests. | `false` | | `RDS_DB_REGION` | Yes | The database region. | `us-east-2` | | `DEBUG_ENV` | No | The IDE you will be using to debug the tests, values are either `PYCHARM` or `VSCODE` | `PYCHARM` | diff --git a/poetry.lock b/poetry.lock index 041eeaef..e4258a20 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "astor" @@ -61,19 +61,18 @@ tzdata = ["tzdata"] [[package]] name = "beautifulsoup4" -version = "4.13.4" +version = "4.12.3" description = "Screen-scraping library" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.6.0" groups = ["test"] files = [ - {file = "beautifulsoup4-4.13.4-py3-none-any.whl", hash = "sha256:9bbbb14bfde9d79f38b8cd5f8c7c85f4b8f2523190ebed90e950a8dea4cb1c4b"}, - {file = "beautifulsoup4-4.13.4.tar.gz", hash = "sha256:dbb3c4e1ceae6aefebdaf2423247260cd062430a410e38c66f2baa50a8437195"}, + {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, + {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, ] [package.dependencies] soupsieve = ">1.2" -typing-extensions = ">=4.0.0" [package.extras] cchardet = ["cchardet"] @@ -125,118 +124,149 @@ urllib3 = [ [package.extras] crt = ["awscrt (==0.23.8)"] +[[package]] +name = "botocore-stubs" +version = "1.38.30" +description = "Type annotations and code completion for botocore" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "botocore_stubs-1.38.30-py3-none-any.whl", hash = "sha256:2efb8bdf36504aff596c670d875d8f7dd15205277c15c4cea54afdba8200c266"}, + {file = "botocore_stubs-1.38.30.tar.gz", hash = "sha256:291d7bf39a316c00a8a55b7255489b02c0cea1a343482e7784e8d1e235bae995"}, +] + +[package.dependencies] +types-awscrt = "*" + +[package.extras] +botocore = ["botocore"] + [[package]] name = "certifi" -version = "2025.7.9" +version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false -python-versions = ">=3.7" +python-versions = ">=3.6" groups = ["main", "test"] files = [ - {file = "certifi-2025.7.9-py3-none-any.whl", hash = "sha256:d842783a14f8fdd646895ac26f719a061408834473cfc10203f6a575beb15d39"}, - {file = "certifi-2025.7.9.tar.gz", hash = "sha256:c1d2ec05395148ee10cf672ffc28cd37ea0ab0d99f9cc74c43e588cbd111b079"}, + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] [[package]] name = "charset-normalizer" -version = "3.4.2" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7" +python-versions = ">=3.7.0" groups = ["main", "test"] files = [ - {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"}, - {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"}, - {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"}, - {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"}, - {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"}, - {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"}, - {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"}, - {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"}, - {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"}, - {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -351,74 +381,71 @@ toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "debugpy" -version = "1.8.15" +version = "1.8.14" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" groups = ["test"] files = [ - {file = "debugpy-1.8.15-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:e9a8125c85172e3ec30985012e7a81ea5e70bbb836637f8a4104f454f9b06c97"}, - {file = "debugpy-1.8.15-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fd0b6b5eccaa745c214fd240ea82f46049d99ef74b185a3517dad3ea1ec55d9"}, - {file = "debugpy-1.8.15-cp310-cp310-win32.whl", hash = "sha256:8181cce4d344010f6bfe94a531c351a46a96b0f7987750932b2908e7a1e14a55"}, - {file = "debugpy-1.8.15-cp310-cp310-win_amd64.whl", hash = "sha256:af2dcae4e4cd6e8b35f982ccab29fe65f7e8766e10720a717bc80c464584ee21"}, - {file = "debugpy-1.8.15-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:babc4fb1962dd6a37e94d611280e3d0d11a1f5e6c72ac9b3d87a08212c4b6dd3"}, - {file = "debugpy-1.8.15-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f778e68f2986a58479d0ac4f643e0b8c82fdd97c2e200d4d61e7c2d13838eb53"}, - {file = "debugpy-1.8.15-cp311-cp311-win32.whl", hash = "sha256:f9d1b5abd75cd965e2deabb1a06b0e93a1546f31f9f621d2705e78104377c702"}, - {file = "debugpy-1.8.15-cp311-cp311-win_amd64.whl", hash = "sha256:62954fb904bec463e2b5a415777f6d1926c97febb08ef1694da0e5d1463c5c3b"}, - {file = "debugpy-1.8.15-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:3dcc7225cb317469721ab5136cda9ff9c8b6e6fb43e87c9e15d5b108b99d01ba"}, - {file = "debugpy-1.8.15-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:047a493ca93c85ccede1dbbaf4e66816794bdc214213dde41a9a61e42d27f8fc"}, - {file = "debugpy-1.8.15-cp312-cp312-win32.whl", hash = "sha256:b08e9b0bc260cf324c890626961dad4ffd973f7568fbf57feb3c3a65ab6b6327"}, - {file = "debugpy-1.8.15-cp312-cp312-win_amd64.whl", hash = "sha256:e2a4fe357c92334272eb2845fcfcdbec3ef9f22c16cf613c388ac0887aed15fa"}, - {file = "debugpy-1.8.15-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:f5e01291ad7d6649aed5773256c5bba7a1a556196300232de1474c3c372592bf"}, - {file = "debugpy-1.8.15-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94dc0f0d00e528d915e0ce1c78e771475b2335b376c49afcc7382ee0b146bab6"}, - {file = "debugpy-1.8.15-cp313-cp313-win32.whl", hash = "sha256:fcf0748d4f6e25f89dc5e013d1129ca6f26ad4da405e0723a4f704583896a709"}, - {file = "debugpy-1.8.15-cp313-cp313-win_amd64.whl", hash = "sha256:73c943776cb83e36baf95e8f7f8da765896fd94b05991e7bc162456d25500683"}, - {file = "debugpy-1.8.15-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:054cd4935bd2e4964dfe1aeee4d6bca89d0c833366776fc35387f8a2f517dd00"}, - {file = "debugpy-1.8.15-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21c4288e662997df3176c4b9d93ee1393913fbaf320732be332d538000c53208"}, - {file = "debugpy-1.8.15-cp38-cp38-win32.whl", hash = "sha256:aaa8ce6a37d764f93fe583d7c6ca58eb7550b36941387483db113125f122bb0d"}, - {file = "debugpy-1.8.15-cp38-cp38-win_amd64.whl", hash = "sha256:71cdf7f676af78e70f005c7fad2ef9da0edc2a24befbf3ab146a51f0d58048c2"}, - {file = "debugpy-1.8.15-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:085b6d0adb3eb457c2823ac497a0690b10a99eff8b01c01a041e84579f114b56"}, - {file = "debugpy-1.8.15-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cd546a405381d17527814852642df0a74b7da8acc20ae5f3cfad0b7c86419511"}, - {file = "debugpy-1.8.15-cp39-cp39-win32.whl", hash = "sha256:ae0d445fe11ff4351428e6c2389e904e1cdcb4a47785da5a5ec4af6c5b95fce5"}, - {file = "debugpy-1.8.15-cp39-cp39-win_amd64.whl", hash = "sha256:de7db80189ca97ab4b10a87e4039cfe4dd7ddfccc8f33b5ae40fcd33792fc67a"}, - {file = "debugpy-1.8.15-py2.py3-none-any.whl", hash = "sha256:bce2e6c5ff4f2e00b98d45e7e01a49c7b489ff6df5f12d881c67d2f1ac635f3d"}, - {file = "debugpy-1.8.15.tar.gz", hash = "sha256:58d7a20b7773ab5ee6bdfb2e6cf622fdf1e40c9d5aef2857d85391526719ac00"}, + {file = "debugpy-1.8.14-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:93fee753097e85623cab1c0e6a68c76308cd9f13ffdf44127e6fab4fbf024339"}, + {file = "debugpy-1.8.14-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d937d93ae4fa51cdc94d3e865f535f185d5f9748efb41d0d49e33bf3365bd79"}, + {file = "debugpy-1.8.14-cp310-cp310-win32.whl", hash = "sha256:c442f20577b38cc7a9aafecffe1094f78f07fb8423c3dddb384e6b8f49fd2987"}, + {file = "debugpy-1.8.14-cp310-cp310-win_amd64.whl", hash = "sha256:f117dedda6d969c5c9483e23f573b38f4e39412845c7bc487b6f2648df30fe84"}, + {file = "debugpy-1.8.14-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:1b2ac8c13b2645e0b1eaf30e816404990fbdb168e193322be8f545e8c01644a9"}, + {file = "debugpy-1.8.14-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf431c343a99384ac7eab2f763980724834f933a271e90496944195318c619e2"}, + {file = "debugpy-1.8.14-cp311-cp311-win32.whl", hash = "sha256:c99295c76161ad8d507b413cd33422d7c542889fbb73035889420ac1fad354f2"}, + {file = "debugpy-1.8.14-cp311-cp311-win_amd64.whl", hash = "sha256:7816acea4a46d7e4e50ad8d09d963a680ecc814ae31cdef3622eb05ccacf7b01"}, + {file = "debugpy-1.8.14-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:8899c17920d089cfa23e6005ad9f22582fd86f144b23acb9feeda59e84405b84"}, + {file = "debugpy-1.8.14-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6bb5c0dcf80ad5dbc7b7d6eac484e2af34bdacdf81df09b6a3e62792b722826"}, + {file = "debugpy-1.8.14-cp312-cp312-win32.whl", hash = "sha256:281d44d248a0e1791ad0eafdbbd2912ff0de9eec48022a5bfbc332957487ed3f"}, + {file = "debugpy-1.8.14-cp312-cp312-win_amd64.whl", hash = "sha256:5aa56ef8538893e4502a7d79047fe39b1dae08d9ae257074c6464a7b290b806f"}, + {file = "debugpy-1.8.14-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:329a15d0660ee09fec6786acdb6e0443d595f64f5d096fc3e3ccf09a4259033f"}, + {file = "debugpy-1.8.14-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f920c7f9af409d90f5fd26e313e119d908b0dd2952c2393cd3247a462331f15"}, + {file = "debugpy-1.8.14-cp313-cp313-win32.whl", hash = "sha256:3784ec6e8600c66cbdd4ca2726c72d8ca781e94bce2f396cc606d458146f8f4e"}, + {file = "debugpy-1.8.14-cp313-cp313-win_amd64.whl", hash = "sha256:684eaf43c95a3ec39a96f1f5195a7ff3d4144e4a18d69bb66beeb1a6de605d6e"}, + {file = "debugpy-1.8.14-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:d5582bcbe42917bc6bbe5c12db1bffdf21f6bfc28d4554b738bf08d50dc0c8c3"}, + {file = "debugpy-1.8.14-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5349b7c3735b766a281873fbe32ca9cca343d4cc11ba4a743f84cb854339ff35"}, + {file = "debugpy-1.8.14-cp38-cp38-win32.whl", hash = "sha256:7118d462fe9724c887d355eef395fae68bc764fd862cdca94e70dcb9ade8a23d"}, + {file = "debugpy-1.8.14-cp38-cp38-win_amd64.whl", hash = "sha256:d235e4fa78af2de4e5609073972700523e372cf5601742449970110d565ca28c"}, + {file = "debugpy-1.8.14-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:413512d35ff52c2fb0fd2d65e69f373ffd24f0ecb1fac514c04a668599c5ce7f"}, + {file = "debugpy-1.8.14-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c9156f7524a0d70b7a7e22b2e311d8ba76a15496fb00730e46dcdeedb9e1eea"}, + {file = "debugpy-1.8.14-cp39-cp39-win32.whl", hash = "sha256:b44985f97cc3dd9d52c42eb59ee9d7ee0c4e7ecd62bca704891f997de4cef23d"}, + {file = "debugpy-1.8.14-cp39-cp39-win_amd64.whl", hash = "sha256:b1528cfee6c1b1c698eb10b6b096c598738a8238822d218173d21c3086de8123"}, + {file = "debugpy-1.8.14-py2.py3-none-any.whl", hash = "sha256:5cd9a579d553b6cb9759a7908a41988ee6280b961f24f63336835d9418216a20"}, + {file = "debugpy-1.8.14.tar.gz", hash = "sha256:7cd287184318416850aa8b60ac90105837bb1e59531898c07569d197d2ed5322"}, ] [[package]] name = "deprecated" -version = "1.2.18" +version = "1.2.14" description = "Python @deprecated decorator to deprecate old python classes, functions or methods." optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" groups = ["main", "test"] files = [ - {file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"}, - {file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"}, + {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, + {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, ] [package.dependencies] wrapt = ">=1.10,<2" [package.extras] -dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"] +dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] name = "exceptiongroup" -version = "1.3.0" +version = "1.2.2" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" groups = ["test"] markers = "python_version < \"3.11\"" files = [ - {file = "exceptiongroup-1.3.0-py3-none-any.whl", hash = "sha256:4d111e6e0c13d0644cad6ddaa7ed0261a0b36971f6d23e7ec9b4b9097da78a10"}, - {file = "exceptiongroup-1.3.0.tar.gz", hash = "sha256:b241f5885f560bc56a59ee63ca4c6a8bfa46ae4ad651af316d4e81817bb9fd88"}, + {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, + {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} - [package.extras] test = ["pytest (>=6)"] @@ -470,21 +497,21 @@ files = [ [[package]] name = "googleapis-common-protos" -version = "1.70.0" +version = "1.65.0" description = "Common protobufs used in Google APIs" optional = false python-versions = ">=3.7" groups = ["test"] files = [ - {file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"}, - {file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"}, + {file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"}, + {file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"}, ] [package.dependencies] -protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" +protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0" [package.extras] -grpc = ["grpcio (>=1.44.0,<2.0.0)"] +grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "greenlet" @@ -493,7 +520,7 @@ description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" groups = ["dev"] -markers = "python_version <= \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")" +markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")" files = [ {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, @@ -576,137 +603,71 @@ test = ["objgraph", "psutil"] [[package]] name = "grpcio" -version = "1.70.0" +version = "1.67.0" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" groups = ["test"] -markers = "python_version < \"3.10\"" files = [ - {file = "grpcio-1.70.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:95469d1977429f45fe7df441f586521361e235982a0b39e33841549143ae2851"}, - {file = "grpcio-1.70.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:ed9718f17fbdb472e33b869c77a16d0b55e166b100ec57b016dc7de9c8d236bf"}, - {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:374d014f29f9dfdb40510b041792e0e2828a1389281eb590df066e1cc2b404e5"}, - {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2af68a6f5c8f78d56c145161544ad0febbd7479524a59c16b3e25053f39c87f"}, - {file = "grpcio-1.70.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7df14b2dcd1102a2ec32f621cc9fab6695effef516efbc6b063ad749867295"}, - {file = "grpcio-1.70.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c78b339869f4dbf89881e0b6fbf376313e4f845a42840a7bdf42ee6caed4b11f"}, - {file = "grpcio-1.70.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:58ad9ba575b39edef71f4798fdb5c7b6d02ad36d47949cd381d4392a5c9cbcd3"}, - {file = "grpcio-1.70.0-cp310-cp310-win32.whl", hash = "sha256:2b0d02e4b25a5c1f9b6c7745d4fa06efc9fd6a611af0fb38d3ba956786b95199"}, - {file = "grpcio-1.70.0-cp310-cp310-win_amd64.whl", hash = "sha256:0de706c0a5bb9d841e353f6343a9defc9fc35ec61d6eb6111802f3aa9fef29e1"}, - {file = "grpcio-1.70.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:17325b0be0c068f35770f944124e8839ea3185d6d54862800fc28cc2ffad205a"}, - {file = "grpcio-1.70.0-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:dbe41ad140df911e796d4463168e33ef80a24f5d21ef4d1e310553fcd2c4a386"}, - {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:5ea67c72101d687d44d9c56068328da39c9ccba634cabb336075fae2eab0d04b"}, - {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb5277db254ab7586769e490b7b22f4ddab3876c490da0a1a9d7c695ccf0bf77"}, - {file = "grpcio-1.70.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7831a0fc1beeeb7759f737f5acd9fdcda520e955049512d68fda03d91186eea"}, - {file = "grpcio-1.70.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:27cc75e22c5dba1fbaf5a66c778e36ca9b8ce850bf58a9db887754593080d839"}, - {file = "grpcio-1.70.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d63764963412e22f0491d0d32833d71087288f4e24cbcddbae82476bfa1d81fd"}, - {file = "grpcio-1.70.0-cp311-cp311-win32.whl", hash = "sha256:bb491125103c800ec209d84c9b51f1c60ea456038e4734688004f377cfacc113"}, - {file = "grpcio-1.70.0-cp311-cp311-win_amd64.whl", hash = "sha256:d24035d49e026353eb042bf7b058fb831db3e06d52bee75c5f2f3ab453e71aca"}, - {file = "grpcio-1.70.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:ef4c14508299b1406c32bdbb9fb7b47612ab979b04cf2b27686ea31882387cff"}, - {file = "grpcio-1.70.0-cp312-cp312-macosx_10_14_universal2.whl", hash = "sha256:aa47688a65643afd8b166928a1da6247d3f46a2784d301e48ca1cc394d2ffb40"}, - {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:880bfb43b1bb8905701b926274eafce5c70a105bc6b99e25f62e98ad59cb278e"}, - {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e654c4b17d07eab259d392e12b149c3a134ec52b11ecdc6a515b39aceeec898"}, - {file = "grpcio-1.70.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2394e3381071045a706ee2eeb6e08962dd87e8999b90ac15c55f56fa5a8c9597"}, - {file = "grpcio-1.70.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b3c76701428d2df01964bc6479422f20e62fcbc0a37d82ebd58050b86926ef8c"}, - {file = "grpcio-1.70.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac073fe1c4cd856ebcf49e9ed6240f4f84d7a4e6ee95baa5d66ea05d3dd0df7f"}, - {file = "grpcio-1.70.0-cp312-cp312-win32.whl", hash = "sha256:cd24d2d9d380fbbee7a5ac86afe9787813f285e684b0271599f95a51bce33528"}, - {file = "grpcio-1.70.0-cp312-cp312-win_amd64.whl", hash = "sha256:0495c86a55a04a874c7627fd33e5beaee771917d92c0e6d9d797628ac40e7655"}, - {file = "grpcio-1.70.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa573896aeb7d7ce10b1fa425ba263e8dddd83d71530d1322fd3a16f31257b4a"}, - {file = "grpcio-1.70.0-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:d405b005018fd516c9ac529f4b4122342f60ec1cee181788249372524e6db429"}, - {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f32090238b720eb585248654db8e3afc87b48d26ac423c8dde8334a232ff53c9"}, - {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfa089a734f24ee5f6880c83d043e4f46bf812fcea5181dcb3a572db1e79e01c"}, - {file = "grpcio-1.70.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f19375f0300b96c0117aca118d400e76fede6db6e91f3c34b7b035822e06c35f"}, - {file = "grpcio-1.70.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:7c73c42102e4a5ec76608d9b60227d917cea46dff4d11d372f64cbeb56d259d0"}, - {file = "grpcio-1.70.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:0a5c78d5198a1f0aa60006cd6eb1c912b4a1520b6a3968e677dbcba215fabb40"}, - {file = "grpcio-1.70.0-cp313-cp313-win32.whl", hash = "sha256:fe9dbd916df3b60e865258a8c72ac98f3ac9e2a9542dcb72b7a34d236242a5ce"}, - {file = "grpcio-1.70.0-cp313-cp313-win_amd64.whl", hash = "sha256:4119fed8abb7ff6c32e3d2255301e59c316c22d31ab812b3fbcbaf3d0d87cc68"}, - {file = "grpcio-1.70.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:8058667a755f97407fca257c844018b80004ae8035565ebc2812cc550110718d"}, - {file = "grpcio-1.70.0-cp38-cp38-macosx_10_14_universal2.whl", hash = "sha256:879a61bf52ff8ccacbedf534665bb5478ec8e86ad483e76fe4f729aaef867cab"}, - {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:0ba0a173f4feacf90ee618fbc1a27956bfd21260cd31ced9bc707ef551ff7dc7"}, - {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558c386ecb0148f4f99b1a65160f9d4b790ed3163e8610d11db47838d452512d"}, - {file = "grpcio-1.70.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:412faabcc787bbc826f51be261ae5fa996b21263de5368a55dc2cf824dc5090e"}, - {file = "grpcio-1.70.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3b0f01f6ed9994d7a0b27eeddea43ceac1b7e6f3f9d86aeec0f0064b8cf50fdb"}, - {file = "grpcio-1.70.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7385b1cb064734005204bc8994eed7dcb801ed6c2eda283f613ad8c6c75cf873"}, - {file = "grpcio-1.70.0-cp38-cp38-win32.whl", hash = "sha256:07269ff4940f6fb6710951116a04cd70284da86d0a4368fd5a3b552744511f5a"}, - {file = "grpcio-1.70.0-cp38-cp38-win_amd64.whl", hash = "sha256:aba19419aef9b254e15011b230a180e26e0f6864c90406fdbc255f01d83bc83c"}, - {file = "grpcio-1.70.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:4f1937f47c77392ccd555728f564a49128b6a197a05a5cd527b796d36f3387d0"}, - {file = "grpcio-1.70.0-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:0cd430b9215a15c10b0e7d78f51e8a39d6cf2ea819fd635a7214fae600b1da27"}, - {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:e27585831aa6b57b9250abaf147003e126cd3a6c6ca0c531a01996f31709bed1"}, - {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1af8e15b0f0fe0eac75195992a63df17579553b0c4af9f8362cc7cc99ccddf4"}, - {file = "grpcio-1.70.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbce24409beaee911c574a3d75d12ffb8c3e3dd1b813321b1d7a96bbcac46bf4"}, - {file = "grpcio-1.70.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ff4a8112a79464919bb21c18e956c54add43ec9a4850e3949da54f61c241a4a6"}, - {file = "grpcio-1.70.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5413549fdf0b14046c545e19cfc4eb1e37e9e1ebba0ca390a8d4e9963cab44d2"}, - {file = "grpcio-1.70.0-cp39-cp39-win32.whl", hash = "sha256:b745d2c41b27650095e81dea7091668c040457483c9bdb5d0d9de8f8eb25e59f"}, - {file = "grpcio-1.70.0-cp39-cp39-win_amd64.whl", hash = "sha256:a31d7e3b529c94e930a117b2175b2efd179d96eb3c7a21ccb0289a8ab05b645c"}, - {file = "grpcio-1.70.0.tar.gz", hash = "sha256:8d1584a68d5922330025881e63a6c1b54cc8117291d382e4fa69339b6d914c56"}, + {file = "grpcio-1.67.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:bd79929b3bb96b54df1296cd3bf4d2b770bd1df6c2bdf549b49bab286b925cdc"}, + {file = "grpcio-1.67.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:16724ffc956ea42967f5758c2f043faef43cb7e48a51948ab593570570d1e68b"}, + {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:2b7183c80b602b0ad816315d66f2fb7887614ead950416d60913a9a71c12560d"}, + {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:efe32b45dd6d118f5ea2e5deaed417d8a14976325c93812dd831908522b402c9"}, + {file = "grpcio-1.67.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe89295219b9c9e47780a0f1c75ca44211e706d1c598242249fe717af3385ec8"}, + {file = "grpcio-1.67.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa8d025fae1595a207b4e47c2e087cb88d47008494db258ac561c00877d4c8f8"}, + {file = "grpcio-1.67.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f95e15db43e75a534420e04822df91f645664bf4ad21dfaad7d51773c80e6bb4"}, + {file = "grpcio-1.67.0-cp310-cp310-win32.whl", hash = "sha256:a6b9a5c18863fd4b6624a42e2712103fb0f57799a3b29651c0e5b8119a519d65"}, + {file = "grpcio-1.67.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6eb68493a05d38b426604e1dc93bfc0137c4157f7ab4fac5771fd9a104bbaa6"}, + {file = "grpcio-1.67.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:e91d154689639932305b6ea6f45c6e46bb51ecc8ea77c10ef25aa77f75443ad4"}, + {file = "grpcio-1.67.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cb204a742997277da678611a809a8409657b1398aaeebf73b3d9563b7d154c13"}, + {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:ae6de510f670137e755eb2a74b04d1041e7210af2444103c8c95f193340d17ee"}, + {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:74b900566bdf68241118f2918d312d3bf554b2ce0b12b90178091ea7d0a17b3d"}, + {file = "grpcio-1.67.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4e95e43447a02aa603abcc6b5e727d093d161a869c83b073f50b9390ecf0fa8"}, + {file = "grpcio-1.67.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0bb94e66cd8f0baf29bd3184b6aa09aeb1a660f9ec3d85da615c5003154bc2bf"}, + {file = "grpcio-1.67.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:82e5bd4b67b17c8c597273663794a6a46a45e44165b960517fe6d8a2f7f16d23"}, + {file = "grpcio-1.67.0-cp311-cp311-win32.whl", hash = "sha256:7fc1d2b9fd549264ae585026b266ac2db53735510a207381be509c315b4af4e8"}, + {file = "grpcio-1.67.0-cp311-cp311-win_amd64.whl", hash = "sha256:ac11ecb34a86b831239cc38245403a8de25037b448464f95c3315819e7519772"}, + {file = "grpcio-1.67.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:227316b5631260e0bef8a3ce04fa7db4cc81756fea1258b007950b6efc90c05d"}, + {file = "grpcio-1.67.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d90cfdafcf4b45a7a076e3e2a58e7bc3d59c698c4f6470b0bb13a4d869cf2273"}, + {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:77196216d5dd6f99af1c51e235af2dd339159f657280e65ce7e12c1a8feffd1d"}, + {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15c05a26a0f7047f720da41dc49406b395c1470eef44ff7e2c506a47ac2c0591"}, + {file = "grpcio-1.67.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3840994689cc8cbb73d60485c594424ad8adb56c71a30d8948d6453083624b52"}, + {file = "grpcio-1.67.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:5a1e03c3102b6451028d5dc9f8591131d6ab3c8a0e023d94c28cb930ed4b5f81"}, + {file = "grpcio-1.67.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:682968427a63d898759474e3b3178d42546e878fdce034fd7474ef75143b64e3"}, + {file = "grpcio-1.67.0-cp312-cp312-win32.whl", hash = "sha256:d01793653248f49cf47e5695e0a79805b1d9d4eacef85b310118ba1dfcd1b955"}, + {file = "grpcio-1.67.0-cp312-cp312-win_amd64.whl", hash = "sha256:985b2686f786f3e20326c4367eebdaed3e7aa65848260ff0c6644f817042cb15"}, + {file = "grpcio-1.67.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:8c9a35b8bc50db35ab8e3e02a4f2a35cfba46c8705c3911c34ce343bd777813a"}, + {file = "grpcio-1.67.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:42199e704095b62688998c2d84c89e59a26a7d5d32eed86d43dc90e7a3bd04aa"}, + {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:c4c425f440fb81f8d0237c07b9322fc0fb6ee2b29fbef5f62a322ff8fcce240d"}, + {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:323741b6699cd2b04a71cb38f502db98f90532e8a40cb675393d248126a268af"}, + {file = "grpcio-1.67.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:662c8e105c5e5cee0317d500eb186ed7a93229586e431c1bf0c9236c2407352c"}, + {file = "grpcio-1.67.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f6bd2ab135c64a4d1e9e44679a616c9bc944547357c830fafea5c3caa3de5153"}, + {file = "grpcio-1.67.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:2f55c1e0e2ae9bdd23b3c63459ee4c06d223b68aeb1961d83c48fb63dc29bc03"}, + {file = "grpcio-1.67.0-cp313-cp313-win32.whl", hash = "sha256:fd6bc27861e460fe28e94226e3673d46e294ca4673d46b224428d197c5935e69"}, + {file = "grpcio-1.67.0-cp313-cp313-win_amd64.whl", hash = "sha256:cf51d28063338608cd8d3cd64677e922134837902b70ce00dad7f116e3998210"}, + {file = "grpcio-1.67.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:7f200aca719c1c5dc72ab68be3479b9dafccdf03df530d137632c534bb6f1ee3"}, + {file = "grpcio-1.67.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0892dd200ece4822d72dd0952f7112c542a487fc48fe77568deaaa399c1e717d"}, + {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:f4d613fbf868b2e2444f490d18af472ccb47660ea3df52f068c9c8801e1f3e85"}, + {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c69bf11894cad9da00047f46584d5758d6ebc9b5950c0dc96fec7e0bce5cde9"}, + {file = "grpcio-1.67.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9bca3ca0c5e74dea44bf57d27e15a3a3996ce7e5780d61b7c72386356d231db"}, + {file = "grpcio-1.67.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:014dfc020e28a0d9be7e93a91f85ff9f4a87158b7df9952fe23cc42d29d31e1e"}, + {file = "grpcio-1.67.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d4ea4509d42c6797539e9ec7496c15473177ce9abc89bc5c71e7abe50fc25737"}, + {file = "grpcio-1.67.0-cp38-cp38-win32.whl", hash = "sha256:9d75641a2fca9ae1ae86454fd25d4c298ea8cc195dbc962852234d54a07060ad"}, + {file = "grpcio-1.67.0-cp38-cp38-win_amd64.whl", hash = "sha256:cff8e54d6a463883cda2fab94d2062aad2f5edd7f06ae3ed030f2a74756db365"}, + {file = "grpcio-1.67.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:62492bd534979e6d7127b8a6b29093161a742dee3875873e01964049d5250a74"}, + {file = "grpcio-1.67.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eef1dce9d1a46119fd09f9a992cf6ab9d9178b696382439446ca5f399d7b96fe"}, + {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:f623c57a5321461c84498a99dddf9d13dac0e40ee056d884d6ec4ebcab647a78"}, + {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54d16383044e681f8beb50f905249e4e7261dd169d4aaf6e52eab67b01cbbbe2"}, + {file = "grpcio-1.67.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2a44e572fb762c668e4812156b81835f7aba8a721b027e2d4bb29fb50ff4d33"}, + {file = "grpcio-1.67.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:391df8b0faac84d42f5b8dfc65f5152c48ed914e13c522fd05f2aca211f8bfad"}, + {file = "grpcio-1.67.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:cfd9306511fdfc623a1ba1dc3bc07fbd24e6cfbe3c28b4d1e05177baa2f99617"}, + {file = "grpcio-1.67.0-cp39-cp39-win32.whl", hash = "sha256:30d47dbacfd20cbd0c8be9bfa52fdb833b395d4ec32fe5cff7220afc05d08571"}, + {file = "grpcio-1.67.0-cp39-cp39-win_amd64.whl", hash = "sha256:f55f077685f61f0fbd06ea355142b71e47e4a26d2d678b3ba27248abfe67163a"}, + {file = "grpcio-1.67.0.tar.gz", hash = "sha256:e090b2553e0da1c875449c8e75073dd4415dd71c9bde6a406240fdf4c0ee467c"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.70.0)"] - -[[package]] -name = "grpcio" -version = "1.73.1" -description = "HTTP/2-based RPC framework" -optional = false -python-versions = ">=3.9" -groups = ["test"] -markers = "python_version >= \"3.10\"" -files = [ - {file = "grpcio-1.73.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:2d70f4ddd0a823436c2624640570ed6097e40935c9194482475fe8e3d9754d55"}, - {file = "grpcio-1.73.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:3841a8a5a66830261ab6a3c2a3dc539ed84e4ab019165f77b3eeb9f0ba621f26"}, - {file = "grpcio-1.73.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:628c30f8e77e0258ab788750ec92059fc3d6628590fb4b7cea8c102503623ed7"}, - {file = "grpcio-1.73.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:67a0468256c9db6d5ecb1fde4bf409d016f42cef649323f0a08a72f352d1358b"}, - {file = "grpcio-1.73.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68b84d65bbdebd5926eb5c53b0b9ec3b3f83408a30e4c20c373c5337b4219ec5"}, - {file = "grpcio-1.73.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c54796ca22b8349cc594d18b01099e39f2b7ffb586ad83217655781a350ce4da"}, - {file = "grpcio-1.73.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:75fc8e543962ece2f7ecd32ada2d44c0c8570ae73ec92869f9af8b944863116d"}, - {file = "grpcio-1.73.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6a6037891cd2b1dd1406b388660522e1565ed340b1fea2955b0234bdd941a862"}, - {file = "grpcio-1.73.1-cp310-cp310-win32.whl", hash = "sha256:cce7265b9617168c2d08ae570fcc2af4eaf72e84f8c710ca657cc546115263af"}, - {file = "grpcio-1.73.1-cp310-cp310-win_amd64.whl", hash = "sha256:6a2b372e65fad38842050943f42ce8fee00c6f2e8ea4f7754ba7478d26a356ee"}, - {file = "grpcio-1.73.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:ba2cea9f7ae4bc21f42015f0ec98f69ae4179848ad744b210e7685112fa507a1"}, - {file = "grpcio-1.73.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:d74c3f4f37b79e746271aa6cdb3a1d7e4432aea38735542b23adcabaaee0c097"}, - {file = "grpcio-1.73.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:5b9b1805a7d61c9e90541cbe8dfe0a593dfc8c5c3a43fe623701b6a01b01d710"}, - {file = "grpcio-1.73.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3215f69a0670a8cfa2ab53236d9e8026bfb7ead5d4baabe7d7dc11d30fda967"}, - {file = "grpcio-1.73.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc5eccfd9577a5dc7d5612b2ba90cca4ad14c6d949216c68585fdec9848befb1"}, - {file = "grpcio-1.73.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:dc7d7fd520614fce2e6455ba89791458020a39716951c7c07694f9dbae28e9c0"}, - {file = "grpcio-1.73.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:105492124828911f85127e4825d1c1234b032cb9d238567876b5515d01151379"}, - {file = "grpcio-1.73.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:610e19b04f452ba6f402ac9aa94eb3d21fbc94553368008af634812c4a85a99e"}, - {file = "grpcio-1.73.1-cp311-cp311-win32.whl", hash = "sha256:d60588ab6ba0ac753761ee0e5b30a29398306401bfbceffe7d68ebb21193f9d4"}, - {file = "grpcio-1.73.1-cp311-cp311-win_amd64.whl", hash = "sha256:6957025a4608bb0a5ff42abd75bfbb2ed99eda29d5992ef31d691ab54b753643"}, - {file = "grpcio-1.73.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:921b25618b084e75d424a9f8e6403bfeb7abef074bb6c3174701e0f2542debcf"}, - {file = "grpcio-1.73.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:277b426a0ed341e8447fbf6c1d6b68c952adddf585ea4685aa563de0f03df887"}, - {file = "grpcio-1.73.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:96c112333309493c10e118d92f04594f9055774757f5d101b39f8150f8c25582"}, - {file = "grpcio-1.73.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f48e862aed925ae987eb7084409a80985de75243389dc9d9c271dd711e589918"}, - {file = "grpcio-1.73.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83a6c2cce218e28f5040429835fa34a29319071079e3169f9543c3fbeff166d2"}, - {file = "grpcio-1.73.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:65b0458a10b100d815a8426b1442bd17001fdb77ea13665b2f7dc9e8587fdc6b"}, - {file = "grpcio-1.73.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:0a9f3ea8dce9eae9d7cb36827200133a72b37a63896e0e61a9d5ec7d61a59ab1"}, - {file = "grpcio-1.73.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:de18769aea47f18e782bf6819a37c1c528914bfd5683b8782b9da356506190c8"}, - {file = "grpcio-1.73.1-cp312-cp312-win32.whl", hash = "sha256:24e06a5319e33041e322d32c62b1e728f18ab8c9dbc91729a3d9f9e3ed336642"}, - {file = "grpcio-1.73.1-cp312-cp312-win_amd64.whl", hash = "sha256:303c8135d8ab176f8038c14cc10d698ae1db9c480f2b2823f7a987aa2a4c5646"}, - {file = "grpcio-1.73.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:b310824ab5092cf74750ebd8a8a8981c1810cb2b363210e70d06ef37ad80d4f9"}, - {file = "grpcio-1.73.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:8f5a6df3fba31a3485096ac85b2e34b9666ffb0590df0cd044f58694e6a1f6b5"}, - {file = "grpcio-1.73.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:052e28fe9c41357da42250a91926a3e2f74c046575c070b69659467ca5aa976b"}, - {file = "grpcio-1.73.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c0bf15f629b1497436596b1cbddddfa3234273490229ca29561209778ebe182"}, - {file = "grpcio-1.73.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ab860d5bfa788c5a021fba264802e2593688cd965d1374d31d2b1a34cacd854"}, - {file = "grpcio-1.73.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:ad1d958c31cc91ab050bd8a91355480b8e0683e21176522bacea225ce51163f2"}, - {file = "grpcio-1.73.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:f43ffb3bd415c57224c7427bfb9e6c46a0b6e998754bfa0d00f408e1873dcbb5"}, - {file = "grpcio-1.73.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:686231cdd03a8a8055f798b2b54b19428cdf18fa1549bee92249b43607c42668"}, - {file = "grpcio-1.73.1-cp313-cp313-win32.whl", hash = "sha256:89018866a096e2ce21e05eabed1567479713ebe57b1db7cbb0f1e3b896793ba4"}, - {file = "grpcio-1.73.1-cp313-cp313-win_amd64.whl", hash = "sha256:4a68f8c9966b94dff693670a5cf2b54888a48a5011c5d9ce2295a1a1465ee84f"}, - {file = "grpcio-1.73.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:b4adc97d2d7f5c660a5498bda978ebb866066ad10097265a5da0511323ae9f50"}, - {file = "grpcio-1.73.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:c45a28a0cfb6ddcc7dc50a29de44ecac53d115c3388b2782404218db51cb2df3"}, - {file = "grpcio-1.73.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:10af9f2ab98a39f5b6c1896c6fc2036744b5b41d12739d48bed4c3e15b6cf900"}, - {file = "grpcio-1.73.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:45cf17dcce5ebdb7b4fe9e86cb338fa99d7d1bb71defc78228e1ddf8d0de8cbb"}, - {file = "grpcio-1.73.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c502c2e950fc7e8bf05c047e8a14522ef7babac59abbfde6dbf46b7a0d9c71e"}, - {file = "grpcio-1.73.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6abfc0f9153dc4924536f40336f88bd4fe7bd7494f028675e2e04291b8c2c62a"}, - {file = "grpcio-1.73.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ed451a0e39c8e51eb1612b78686839efd1a920666d1666c1adfdb4fd51680c0f"}, - {file = "grpcio-1.73.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:07f08705a5505c9b5b0cbcbabafb96462b5a15b7236bbf6bbcc6b0b91e1cbd7e"}, - {file = "grpcio-1.73.1-cp39-cp39-win32.whl", hash = "sha256:ad5c958cc3d98bb9d71714dc69f1c13aaf2f4b53e29d4cc3f1501ef2e4d129b2"}, - {file = "grpcio-1.73.1-cp39-cp39-win_amd64.whl", hash = "sha256:42f0660bce31b745eb9d23f094a332d31f210dcadd0fc8e5be7e4c62a87ce86b"}, - {file = "grpcio-1.73.1.tar.gz", hash = "sha256:7fce2cd1c0c1116cf3850564ebfc3264fba75d3c74a7414373f1238ea365ef87"}, -] - -[package.extras] -protobuf = ["grpcio-tools (>=1.73.1)"] +protobuf = ["grpcio-tools (>=1.67.0)"] [[package]] name = "idna" @@ -725,38 +686,34 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2 [[package]] name = "importlib-metadata" -version = "8.5.0" +version = "8.4.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" groups = ["main", "test"] files = [ - {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, - {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, + {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, + {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, ] [package.dependencies] -zipp = ">=3.20" +zipp = ">=0.5" [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] -cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] -type = ["pytest-mypy"] +test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] [[package]] name = "iniconfig" -version = "2.1.0" +version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false -python-versions = ">=3.8" +python-versions = ">=3.7" groups = ["test"] files = [ - {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, - {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] [[package]] @@ -948,14 +905,14 @@ reports = ["lxml"] [[package]] name = "mypy-extensions" -version = "1.1.0" +version = "1.0.0" description = "Type system extensions for programs checked with the mypy type checker." optional = false -python-versions = ">=3.8" +python-versions = ">=3.5" groups = ["dev"] files = [ - {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, - {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] [[package]] @@ -1002,126 +959,123 @@ telemetry = ["opentelemetry-api (==1.18.0)", "opentelemetry-exporter-otlp-proto- [[package]] name = "opentelemetry-api" -version = "1.33.1" +version = "1.27.0" description = "OpenTelemetry Python API" optional = false python-versions = ">=3.8" groups = ["main", "test"] files = [ - {file = "opentelemetry_api-1.33.1-py3-none-any.whl", hash = "sha256:4db83ebcf7ea93e64637ec6ee6fabee45c5cbe4abd9cf3da95c43828ddb50b83"}, - {file = "opentelemetry_api-1.33.1.tar.gz", hash = "sha256:1c6055fc0a2d3f23a50c7e17e16ef75ad489345fd3df1f8b8af7c0bbf8a109e8"}, + {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, + {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, ] [package.dependencies] deprecated = ">=1.2.6" -importlib-metadata = ">=6.0,<8.7.0" +importlib-metadata = ">=6.0,<=8.4.0" [[package]] name = "opentelemetry-exporter-otlp" -version = "1.33.1" +version = "1.27.0" description = "OpenTelemetry Collector Exporters" optional = false python-versions = ">=3.8" groups = ["test"] files = [ - {file = "opentelemetry_exporter_otlp-1.33.1-py3-none-any.whl", hash = "sha256:9bcf1def35b880b55a49e31ebd63910edac14b294fd2ab884953c4deaff5b300"}, - {file = "opentelemetry_exporter_otlp-1.33.1.tar.gz", hash = "sha256:4d050311ea9486e3994575aa237e32932aad58330a31fba24fdba5c0d531cf04"}, + {file = "opentelemetry_exporter_otlp-1.27.0-py3-none-any.whl", hash = "sha256:7688791cbdd951d71eb6445951d1cfbb7b6b2d7ee5948fac805d404802931145"}, + {file = "opentelemetry_exporter_otlp-1.27.0.tar.gz", hash = "sha256:4a599459e623868cc95d933c301199c2367e530f089750e115599fccd67cb2a1"}, ] [package.dependencies] -opentelemetry-exporter-otlp-proto-grpc = "1.33.1" -opentelemetry-exporter-otlp-proto-http = "1.33.1" +opentelemetry-exporter-otlp-proto-grpc = "1.27.0" +opentelemetry-exporter-otlp-proto-http = "1.27.0" [[package]] name = "opentelemetry-exporter-otlp-proto-common" -version = "1.33.1" +version = "1.27.0" description = "OpenTelemetry Protobuf encoding" optional = false python-versions = ">=3.8" groups = ["test"] files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.33.1-py3-none-any.whl", hash = "sha256:b81c1de1ad349785e601d02715b2d29d6818aed2c809c20219f3d1f20b038c36"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.33.1.tar.gz", hash = "sha256:c57b3fa2d0595a21c4ed586f74f948d259d9949b58258f11edb398f246bec131"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.27.0-py3-none-any.whl", hash = "sha256:675db7fffcb60946f3a5c43e17d1168a3307a94a930ecf8d2ea1f286f3d4f79a"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.27.0.tar.gz", hash = "sha256:159d27cf49f359e3798c4c3eb8da6ef4020e292571bd8c5604a2a573231dd5c8"}, ] [package.dependencies] -opentelemetry-proto = "1.33.1" +opentelemetry-proto = "1.27.0" [[package]] name = "opentelemetry-exporter-otlp-proto-grpc" -version = "1.33.1" +version = "1.27.0" description = "OpenTelemetry Collector Protobuf over gRPC Exporter" optional = false python-versions = ">=3.8" groups = ["test"] files = [ - {file = "opentelemetry_exporter_otlp_proto_grpc-1.33.1-py3-none-any.whl", hash = "sha256:7e8da32c7552b756e75b4f9e9c768a61eb47dee60b6550b37af541858d669ce1"}, - {file = "opentelemetry_exporter_otlp_proto_grpc-1.33.1.tar.gz", hash = "sha256:345696af8dc19785fac268c8063f3dc3d5e274c774b308c634f39d9c21955728"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0-py3-none-any.whl", hash = "sha256:56b5bbd5d61aab05e300d9d62a6b3c134827bbd28d0b12f2649c2da368006c9e"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0.tar.gz", hash = "sha256:af6f72f76bcf425dfb5ad11c1a6d6eca2863b91e63575f89bb7b4b55099d968f"}, ] [package.dependencies] deprecated = ">=1.2.6" googleapis-common-protos = ">=1.52,<2.0" -grpcio = [ - {version = ">=1.63.2,<2.0.0", markers = "python_version < \"3.13\""}, - {version = ">=1.66.2,<2.0.0", markers = "python_version >= \"3.13\""}, -] +grpcio = ">=1.0.0,<2.0.0" opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.33.1" -opentelemetry-proto = "1.33.1" -opentelemetry-sdk = ">=1.33.1,<1.34.0" +opentelemetry-exporter-otlp-proto-common = "1.27.0" +opentelemetry-proto = "1.27.0" +opentelemetry-sdk = ">=1.27.0,<1.28.0" [[package]] name = "opentelemetry-exporter-otlp-proto-http" -version = "1.33.1" +version = "1.27.0" description = "OpenTelemetry Collector Protobuf over HTTP Exporter" optional = false python-versions = ">=3.8" groups = ["test"] files = [ - {file = "opentelemetry_exporter_otlp_proto_http-1.33.1-py3-none-any.whl", hash = "sha256:ebd6c523b89a2ecba0549adb92537cc2bf647b4ee61afbbd5a4c6535aa3da7cf"}, - {file = "opentelemetry_exporter_otlp_proto_http-1.33.1.tar.gz", hash = "sha256:46622d964a441acb46f463ebdc26929d9dec9efb2e54ef06acdc7305e8593c38"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.27.0-py3-none-any.whl", hash = "sha256:688027575c9da42e179a69fe17e2d1eba9b14d81de8d13553a21d3114f3b4d75"}, + {file = "opentelemetry_exporter_otlp_proto_http-1.27.0.tar.gz", hash = "sha256:2103479092d8eb18f61f3fbff084f67cc7f2d4a7d37e75304b8b56c1d09ebef5"}, ] [package.dependencies] deprecated = ">=1.2.6" googleapis-common-protos = ">=1.52,<2.0" opentelemetry-api = ">=1.15,<2.0" -opentelemetry-exporter-otlp-proto-common = "1.33.1" -opentelemetry-proto = "1.33.1" -opentelemetry-sdk = ">=1.33.1,<1.34.0" +opentelemetry-exporter-otlp-proto-common = "1.27.0" +opentelemetry-proto = "1.27.0" +opentelemetry-sdk = ">=1.27.0,<1.28.0" requests = ">=2.7,<3.0" [[package]] name = "opentelemetry-proto" -version = "1.33.1" +version = "1.27.0" description = "OpenTelemetry Python Proto" optional = false python-versions = ">=3.8" groups = ["test"] files = [ - {file = "opentelemetry_proto-1.33.1-py3-none-any.whl", hash = "sha256:243d285d9f29663fc7ea91a7171fcc1ccbbfff43b48df0774fd64a37d98eda70"}, - {file = "opentelemetry_proto-1.33.1.tar.gz", hash = "sha256:9627b0a5c90753bf3920c398908307063e4458b287bb890e5c1d6fa11ad50b68"}, + {file = "opentelemetry_proto-1.27.0-py3-none-any.whl", hash = "sha256:b133873de5581a50063e1e4b29cdcf0c5e253a8c2d8dc1229add20a4c3830ace"}, + {file = "opentelemetry_proto-1.27.0.tar.gz", hash = "sha256:33c9345d91dafd8a74fc3d7576c5a38f18b7fdf8d02983ac67485386132aedd6"}, ] [package.dependencies] -protobuf = ">=5.0,<6.0" +protobuf = ">=3.19,<5.0" [[package]] name = "opentelemetry-sdk" -version = "1.33.1" +version = "1.27.0" description = "OpenTelemetry Python SDK" optional = false python-versions = ">=3.8" groups = ["main", "test"] files = [ - {file = "opentelemetry_sdk-1.33.1-py3-none-any.whl", hash = "sha256:19ea73d9a01be29cacaa5d6c8ce0adc0b7f7b4d58cc52f923e4413609f670112"}, - {file = "opentelemetry_sdk-1.33.1.tar.gz", hash = "sha256:85b9fcf7c3d23506fbc9692fd210b8b025a1920535feec50bd54ce203d57a531"}, + {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"}, + {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"}, ] [package.dependencies] -opentelemetry-api = "1.33.1" -opentelemetry-semantic-conventions = "0.54b1" +opentelemetry-api = "1.27.0" +opentelemetry-semantic-conventions = "0.48b0" typing-extensions = ">=3.7.4" [[package]] @@ -1141,30 +1095,30 @@ opentelemetry-sdk = ">=1.12,<2.0" [[package]] name = "opentelemetry-semantic-conventions" -version = "0.54b1" +version = "0.48b0" description = "OpenTelemetry Semantic Conventions" optional = false python-versions = ">=3.8" groups = ["main", "test"] files = [ - {file = "opentelemetry_semantic_conventions-0.54b1-py3-none-any.whl", hash = "sha256:29dab644a7e435b58d3a3918b58c333c92686236b30f7891d5e51f02933ca60d"}, - {file = "opentelemetry_semantic_conventions-0.54b1.tar.gz", hash = "sha256:d1cecedae15d19bdaafca1e56b29a66aa286f50b5d08f036a145c7f3e9ef9cee"}, + {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"}, + {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"}, ] [package.dependencies] deprecated = ">=1.2.6" -opentelemetry-api = "1.33.1" +opentelemetry-api = "1.27.0" [[package]] name = "packaging" -version = "25.0" +version = "24.1" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" groups = ["test"] files = [ - {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, - {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, + {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, + {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] [[package]] @@ -1215,23 +1169,23 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "protobuf" -version = "5.29.5" +version = "4.25.8" description = "" optional = false python-versions = ">=3.8" groups = ["test"] files = [ - {file = "protobuf-5.29.5-cp310-abi3-win32.whl", hash = "sha256:3f1c6468a2cfd102ff4703976138844f78ebd1fb45f49011afc5139e9e283079"}, - {file = "protobuf-5.29.5-cp310-abi3-win_amd64.whl", hash = "sha256:3f76e3a3675b4a4d867b52e4a5f5b78a2ef9565549d4037e06cf7b0942b1d3fc"}, - {file = "protobuf-5.29.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e38c5add5a311f2a6eb0340716ef9b039c1dfa428b28f25a7838ac329204a671"}, - {file = "protobuf-5.29.5-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:fa18533a299d7ab6c55a238bf8629311439995f2e7eca5caaff08663606e9015"}, - {file = "protobuf-5.29.5-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:63848923da3325e1bf7e9003d680ce6e14b07e55d0473253a690c3a8b8fd6e61"}, - {file = "protobuf-5.29.5-cp38-cp38-win32.whl", hash = "sha256:ef91363ad4faba7b25d844ef1ada59ff1604184c0bcd8b39b8a6bef15e1af238"}, - {file = "protobuf-5.29.5-cp38-cp38-win_amd64.whl", hash = "sha256:7318608d56b6402d2ea7704ff1e1e4597bee46d760e7e4dd42a3d45e24b87f2e"}, - {file = "protobuf-5.29.5-cp39-cp39-win32.whl", hash = "sha256:6f642dc9a61782fa72b90878af134c5afe1917c89a568cd3476d758d3c3a0736"}, - {file = "protobuf-5.29.5-cp39-cp39-win_amd64.whl", hash = "sha256:470f3af547ef17847a28e1f47200a1cbf0ba3ff57b7de50d22776607cd2ea353"}, - {file = "protobuf-5.29.5-py3-none-any.whl", hash = "sha256:6cf42630262c59b2d8de33954443d94b746c952b01434fc58a417fdbd2e84bd5"}, - {file = "protobuf-5.29.5.tar.gz", hash = "sha256:bc1463bafd4b0929216c35f437a8e28731a2b7fe3d98bb77a600efced5a15c84"}, + {file = "protobuf-4.25.8-cp310-abi3-win32.whl", hash = "sha256:504435d831565f7cfac9f0714440028907f1975e4bed228e58e72ecfff58a1e0"}, + {file = "protobuf-4.25.8-cp310-abi3-win_amd64.whl", hash = "sha256:bd551eb1fe1d7e92c1af1d75bdfa572eff1ab0e5bf1736716814cdccdb2360f9"}, + {file = "protobuf-4.25.8-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:ca809b42f4444f144f2115c4c1a747b9a404d590f18f37e9402422033e464e0f"}, + {file = "protobuf-4.25.8-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:9ad7ef62d92baf5a8654fbb88dac7fa5594cfa70fd3440488a5ca3bfc6d795a7"}, + {file = "protobuf-4.25.8-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:83e6e54e93d2b696a92cad6e6efc924f3850f82b52e1563778dfab8b355101b0"}, + {file = "protobuf-4.25.8-cp38-cp38-win32.whl", hash = "sha256:27d498ffd1f21fb81d987a041c32d07857d1d107909f5134ba3350e1ce80a4af"}, + {file = "protobuf-4.25.8-cp38-cp38-win_amd64.whl", hash = "sha256:d552c53d0415449c8d17ced5c341caba0d89dbf433698e1436c8fa0aae7808a3"}, + {file = "protobuf-4.25.8-cp39-cp39-win32.whl", hash = "sha256:077ff8badf2acf8bc474406706ad890466274191a48d0abd3bd6987107c9cde5"}, + {file = "protobuf-4.25.8-cp39-cp39-win_amd64.whl", hash = "sha256:f4510b93a3bec6eba8fd8f1093e9d7fb0d4a24d1a81377c10c0e5bbfe9e4ed24"}, + {file = "protobuf-4.25.8-py3-none-any.whl", hash = "sha256:15a0af558aa3b13efef102ae6e4f3efac06f1eea11afb3a57db2901447d9fb59"}, + {file = "protobuf-4.25.8.tar.gz", hash = "sha256:6135cf8affe1fc6f76cced2641e4ea8d3e59518d1f24ae41ba97bcad82d397cd"}, ] [[package]] @@ -1516,44 +1470,44 @@ files = [ [[package]] name = "s3transfer" -version = "0.11.5" +version = "0.11.0" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">=3.8" groups = ["main", "test"] files = [ - {file = "s3transfer-0.11.5-py3-none-any.whl", hash = "sha256:757af0f2ac150d3c75bc4177a32355c3862a98d20447b69a0161812992fe0bd4"}, - {file = "s3transfer-0.11.5.tar.gz", hash = "sha256:8c8aad92784779ab8688a61aefff3e28e9ebdce43142808eaa3f0b0f402f68b7"}, + {file = "s3transfer-0.11.0-py3-none-any.whl", hash = "sha256:f43b03931c198743569bbfb6a328a53f4b2b4ec723cd7c01fab68e3119db3f8b"}, + {file = "s3transfer-0.11.0.tar.gz", hash = "sha256:6563eda054c33bdebef7cbf309488634651c47270d828e594d151cd289fb7cf7"}, ] [package.dependencies] -botocore = ">=1.37.4,<2.0a.0" +botocore = ">=1.33.2,<2.0a.0" [package.extras] -crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"] +crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] [[package]] name = "six" -version = "1.17.0" +version = "1.16.0" description = "Python 2 and 3 compatibility utilities" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" groups = ["main", "test"] files = [ - {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, - {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, ] [[package]] name = "soupsieve" -version = "2.7" +version = "2.6" description = "A modern CSS selector implementation for Beautiful Soup." optional = false python-versions = ">=3.8" groups = ["test"] files = [ - {file = "soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4"}, - {file = "soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a"}, + {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, + {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, ] [[package]] @@ -1652,6 +1606,21 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3_binary"] +[[package]] +name = "tabulate" +version = "0.9.0" +description = "Pretty-print tabular data" +optional = false +python-versions = ">=3.7" +groups = ["test"] +files = [ + {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, + {file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"}, +] + +[package.extras] +widechars = ["wcwidth"] + [[package]] name = "toml" version = "0.10.2" @@ -1666,45 +1635,15 @@ files = [ [[package]] name = "tomli" -version = "2.2.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" groups = ["dev", "test"] markers = "python_version < \"3.11\"" files = [ - {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, - {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, - {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, - {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, - {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, - {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, - {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, - {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, - {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, - {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, - {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, - {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, - {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, - {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, - {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, - {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, - {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, - {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] @@ -1735,29 +1674,482 @@ files = [ {file = "types_aws_xray_sdk-2.14.0.20240606-py3-none-any.whl", hash = "sha256:c238ad639bb50896f1326c12bcc36b7832b5bc7c4b5e2b19a7efcd89d7d28b94"}, ] +[[package]] +name = "types-awscrt" +version = "0.27.2" +description = "Type annotations and code completion for awscrt" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "types_awscrt-0.27.2-py3-none-any.whl", hash = "sha256:49a045f25bbd5ad2865f314512afced933aed35ddbafc252e2268efa8a787e4e"}, + {file = "types_awscrt-0.27.2.tar.gz", hash = "sha256:acd04f57119eb15626ab0ba9157fc24672421de56e7bd7b9f61681fedee44e91"}, +] + +[[package]] +name = "types-boto3" +version = "1.38.40" +description = "Type annotations for boto3 1.38.40 generated with mypy-boto3-builder 8.11.0" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "types_boto3-1.38.40-py3-none-any.whl", hash = "sha256:83c8eef33297debdd7994003524977e9b2947aab331cfdd18f81260981a1681a"}, + {file = "types_boto3-1.38.40.tar.gz", hash = "sha256:10e29da59c9d1c24830f1871ee1abdc3e97e5248698e4e6b69737f617c276857"}, +] + +[package.dependencies] +botocore-stubs = "*" +types-s3transfer = "*" +typing-extensions = {version = ">=4.1.0", markers = "python_version < \"3.12\""} + +[package.extras] +accessanalyzer = ["types-boto3-accessanalyzer (>=1.38.0,<1.39.0)"] +account = ["types-boto3-account (>=1.38.0,<1.39.0)"] +acm = ["types-boto3-acm (>=1.38.0,<1.39.0)"] +acm-pca = ["types-boto3-acm-pca (>=1.38.0,<1.39.0)"] +aiops = ["types-boto3-aiops (>=1.38.0,<1.39.0)"] +all = ["types-boto3-accessanalyzer (>=1.38.0,<1.39.0)", "types-boto3-account (>=1.38.0,<1.39.0)", "types-boto3-acm (>=1.38.0,<1.39.0)", "types-boto3-acm-pca (>=1.38.0,<1.39.0)", "types-boto3-aiops (>=1.38.0,<1.39.0)", "types-boto3-amp (>=1.38.0,<1.39.0)", "types-boto3-amplify (>=1.38.0,<1.39.0)", "types-boto3-amplifybackend (>=1.38.0,<1.39.0)", "types-boto3-amplifyuibuilder (>=1.38.0,<1.39.0)", "types-boto3-apigateway (>=1.38.0,<1.39.0)", "types-boto3-apigatewaymanagementapi (>=1.38.0,<1.39.0)", "types-boto3-apigatewayv2 (>=1.38.0,<1.39.0)", "types-boto3-appconfig (>=1.38.0,<1.39.0)", "types-boto3-appconfigdata (>=1.38.0,<1.39.0)", "types-boto3-appfabric (>=1.38.0,<1.39.0)", "types-boto3-appflow (>=1.38.0,<1.39.0)", "types-boto3-appintegrations (>=1.38.0,<1.39.0)", "types-boto3-application-autoscaling (>=1.38.0,<1.39.0)", "types-boto3-application-insights (>=1.38.0,<1.39.0)", "types-boto3-application-signals (>=1.38.0,<1.39.0)", "types-boto3-applicationcostprofiler (>=1.38.0,<1.39.0)", "types-boto3-appmesh (>=1.38.0,<1.39.0)", "types-boto3-apprunner (>=1.38.0,<1.39.0)", "types-boto3-appstream (>=1.38.0,<1.39.0)", "types-boto3-appsync (>=1.38.0,<1.39.0)", "types-boto3-apptest (>=1.38.0,<1.39.0)", "types-boto3-arc-zonal-shift (>=1.38.0,<1.39.0)", "types-boto3-artifact (>=1.38.0,<1.39.0)", "types-boto3-athena (>=1.38.0,<1.39.0)", "types-boto3-auditmanager (>=1.38.0,<1.39.0)", "types-boto3-autoscaling (>=1.38.0,<1.39.0)", "types-boto3-autoscaling-plans (>=1.38.0,<1.39.0)", "types-boto3-b2bi (>=1.38.0,<1.39.0)", "types-boto3-backup (>=1.38.0,<1.39.0)", "types-boto3-backup-gateway (>=1.38.0,<1.39.0)", "types-boto3-backupsearch (>=1.38.0,<1.39.0)", "types-boto3-batch (>=1.38.0,<1.39.0)", "types-boto3-bcm-data-exports (>=1.38.0,<1.39.0)", "types-boto3-bcm-pricing-calculator (>=1.38.0,<1.39.0)", "types-boto3-bedrock (>=1.38.0,<1.39.0)", "types-boto3-bedrock-agent (>=1.38.0,<1.39.0)", "types-boto3-bedrock-agent-runtime (>=1.38.0,<1.39.0)", "types-boto3-bedrock-data-automation (>=1.38.0,<1.39.0)", "types-boto3-bedrock-data-automation-runtime (>=1.38.0,<1.39.0)", "types-boto3-bedrock-runtime (>=1.38.0,<1.39.0)", "types-boto3-billing (>=1.38.0,<1.39.0)", "types-boto3-billingconductor (>=1.38.0,<1.39.0)", "types-boto3-braket (>=1.38.0,<1.39.0)", "types-boto3-budgets (>=1.38.0,<1.39.0)", "types-boto3-ce (>=1.38.0,<1.39.0)", "types-boto3-chatbot (>=1.38.0,<1.39.0)", "types-boto3-chime (>=1.38.0,<1.39.0)", "types-boto3-chime-sdk-identity (>=1.38.0,<1.39.0)", "types-boto3-chime-sdk-media-pipelines (>=1.38.0,<1.39.0)", "types-boto3-chime-sdk-meetings (>=1.38.0,<1.39.0)", "types-boto3-chime-sdk-messaging (>=1.38.0,<1.39.0)", "types-boto3-chime-sdk-voice (>=1.38.0,<1.39.0)", "types-boto3-cleanrooms (>=1.38.0,<1.39.0)", "types-boto3-cleanroomsml (>=1.38.0,<1.39.0)", "types-boto3-cloud9 (>=1.38.0,<1.39.0)", "types-boto3-cloudcontrol (>=1.38.0,<1.39.0)", "types-boto3-clouddirectory (>=1.38.0,<1.39.0)", "types-boto3-cloudformation (>=1.38.0,<1.39.0)", "types-boto3-cloudfront (>=1.38.0,<1.39.0)", "types-boto3-cloudfront-keyvaluestore (>=1.38.0,<1.39.0)", "types-boto3-cloudhsm (>=1.38.0,<1.39.0)", "types-boto3-cloudhsmv2 (>=1.38.0,<1.39.0)", "types-boto3-cloudsearch (>=1.38.0,<1.39.0)", "types-boto3-cloudsearchdomain (>=1.38.0,<1.39.0)", "types-boto3-cloudtrail (>=1.38.0,<1.39.0)", "types-boto3-cloudtrail-data (>=1.38.0,<1.39.0)", "types-boto3-cloudwatch (>=1.38.0,<1.39.0)", "types-boto3-codeartifact (>=1.38.0,<1.39.0)", "types-boto3-codebuild (>=1.38.0,<1.39.0)", "types-boto3-codecatalyst (>=1.38.0,<1.39.0)", "types-boto3-codecommit (>=1.38.0,<1.39.0)", "types-boto3-codeconnections (>=1.38.0,<1.39.0)", "types-boto3-codedeploy (>=1.38.0,<1.39.0)", "types-boto3-codeguru-reviewer (>=1.38.0,<1.39.0)", "types-boto3-codeguru-security (>=1.38.0,<1.39.0)", "types-boto3-codeguruprofiler (>=1.38.0,<1.39.0)", "types-boto3-codepipeline (>=1.38.0,<1.39.0)", "types-boto3-codestar-connections (>=1.38.0,<1.39.0)", "types-boto3-codestar-notifications (>=1.38.0,<1.39.0)", "types-boto3-cognito-identity (>=1.38.0,<1.39.0)", "types-boto3-cognito-idp (>=1.38.0,<1.39.0)", "types-boto3-cognito-sync (>=1.38.0,<1.39.0)", "types-boto3-comprehend (>=1.38.0,<1.39.0)", "types-boto3-comprehendmedical (>=1.38.0,<1.39.0)", "types-boto3-compute-optimizer (>=1.38.0,<1.39.0)", "types-boto3-config (>=1.38.0,<1.39.0)", "types-boto3-connect (>=1.38.0,<1.39.0)", "types-boto3-connect-contact-lens (>=1.38.0,<1.39.0)", "types-boto3-connectcampaigns (>=1.38.0,<1.39.0)", "types-boto3-connectcampaignsv2 (>=1.38.0,<1.39.0)", "types-boto3-connectcases (>=1.38.0,<1.39.0)", "types-boto3-connectparticipant (>=1.38.0,<1.39.0)", "types-boto3-controlcatalog (>=1.38.0,<1.39.0)", "types-boto3-controltower (>=1.38.0,<1.39.0)", "types-boto3-cost-optimization-hub (>=1.38.0,<1.39.0)", "types-boto3-cur (>=1.38.0,<1.39.0)", "types-boto3-customer-profiles (>=1.38.0,<1.39.0)", "types-boto3-databrew (>=1.38.0,<1.39.0)", "types-boto3-dataexchange (>=1.38.0,<1.39.0)", "types-boto3-datapipeline (>=1.38.0,<1.39.0)", "types-boto3-datasync (>=1.38.0,<1.39.0)", "types-boto3-datazone (>=1.38.0,<1.39.0)", "types-boto3-dax (>=1.38.0,<1.39.0)", "types-boto3-deadline (>=1.38.0,<1.39.0)", "types-boto3-detective (>=1.38.0,<1.39.0)", "types-boto3-devicefarm (>=1.38.0,<1.39.0)", "types-boto3-devops-guru (>=1.38.0,<1.39.0)", "types-boto3-directconnect (>=1.38.0,<1.39.0)", "types-boto3-discovery (>=1.38.0,<1.39.0)", "types-boto3-dlm (>=1.38.0,<1.39.0)", "types-boto3-dms (>=1.38.0,<1.39.0)", "types-boto3-docdb (>=1.38.0,<1.39.0)", "types-boto3-docdb-elastic (>=1.38.0,<1.39.0)", "types-boto3-drs (>=1.38.0,<1.39.0)", "types-boto3-ds (>=1.38.0,<1.39.0)", "types-boto3-ds-data (>=1.38.0,<1.39.0)", "types-boto3-dsql (>=1.38.0,<1.39.0)", "types-boto3-dynamodb (>=1.38.0,<1.39.0)", "types-boto3-dynamodbstreams (>=1.38.0,<1.39.0)", "types-boto3-ebs (>=1.38.0,<1.39.0)", "types-boto3-ec2 (>=1.38.0,<1.39.0)", "types-boto3-ec2-instance-connect (>=1.38.0,<1.39.0)", "types-boto3-ecr (>=1.38.0,<1.39.0)", "types-boto3-ecr-public (>=1.38.0,<1.39.0)", "types-boto3-ecs (>=1.38.0,<1.39.0)", "types-boto3-efs (>=1.38.0,<1.39.0)", "types-boto3-eks (>=1.38.0,<1.39.0)", "types-boto3-eks-auth (>=1.38.0,<1.39.0)", "types-boto3-elasticache (>=1.38.0,<1.39.0)", "types-boto3-elasticbeanstalk (>=1.38.0,<1.39.0)", "types-boto3-elastictranscoder (>=1.38.0,<1.39.0)", "types-boto3-elb (>=1.38.0,<1.39.0)", "types-boto3-elbv2 (>=1.38.0,<1.39.0)", "types-boto3-emr (>=1.38.0,<1.39.0)", "types-boto3-emr-containers (>=1.38.0,<1.39.0)", "types-boto3-emr-serverless (>=1.38.0,<1.39.0)", "types-boto3-entityresolution (>=1.38.0,<1.39.0)", "types-boto3-es (>=1.38.0,<1.39.0)", "types-boto3-events (>=1.38.0,<1.39.0)", "types-boto3-evidently (>=1.38.0,<1.39.0)", "types-boto3-evs (>=1.38.0,<1.39.0)", "types-boto3-finspace (>=1.38.0,<1.39.0)", "types-boto3-finspace-data (>=1.38.0,<1.39.0)", "types-boto3-firehose (>=1.38.0,<1.39.0)", "types-boto3-fis (>=1.38.0,<1.39.0)", "types-boto3-fms (>=1.38.0,<1.39.0)", "types-boto3-forecast (>=1.38.0,<1.39.0)", "types-boto3-forecastquery (>=1.38.0,<1.39.0)", "types-boto3-frauddetector (>=1.38.0,<1.39.0)", "types-boto3-freetier (>=1.38.0,<1.39.0)", "types-boto3-fsx (>=1.38.0,<1.39.0)", "types-boto3-gamelift (>=1.38.0,<1.39.0)", "types-boto3-gameliftstreams (>=1.38.0,<1.39.0)", "types-boto3-geo-maps (>=1.38.0,<1.39.0)", "types-boto3-geo-places (>=1.38.0,<1.39.0)", "types-boto3-geo-routes (>=1.38.0,<1.39.0)", "types-boto3-glacier (>=1.38.0,<1.39.0)", "types-boto3-globalaccelerator (>=1.38.0,<1.39.0)", "types-boto3-glue (>=1.38.0,<1.39.0)", "types-boto3-grafana (>=1.38.0,<1.39.0)", "types-boto3-greengrass (>=1.38.0,<1.39.0)", "types-boto3-greengrassv2 (>=1.38.0,<1.39.0)", "types-boto3-groundstation (>=1.38.0,<1.39.0)", "types-boto3-guardduty (>=1.38.0,<1.39.0)", "types-boto3-health (>=1.38.0,<1.39.0)", "types-boto3-healthlake (>=1.38.0,<1.39.0)", "types-boto3-iam (>=1.38.0,<1.39.0)", "types-boto3-identitystore (>=1.38.0,<1.39.0)", "types-boto3-imagebuilder (>=1.38.0,<1.39.0)", "types-boto3-importexport (>=1.38.0,<1.39.0)", "types-boto3-inspector (>=1.38.0,<1.39.0)", "types-boto3-inspector-scan (>=1.38.0,<1.39.0)", "types-boto3-inspector2 (>=1.38.0,<1.39.0)", "types-boto3-internetmonitor (>=1.38.0,<1.39.0)", "types-boto3-invoicing (>=1.38.0,<1.39.0)", "types-boto3-iot (>=1.38.0,<1.39.0)", "types-boto3-iot-data (>=1.38.0,<1.39.0)", "types-boto3-iot-jobs-data (>=1.38.0,<1.39.0)", "types-boto3-iot-managed-integrations (>=1.38.0,<1.39.0)", "types-boto3-iotanalytics (>=1.38.0,<1.39.0)", "types-boto3-iotdeviceadvisor (>=1.38.0,<1.39.0)", "types-boto3-iotevents (>=1.38.0,<1.39.0)", "types-boto3-iotevents-data (>=1.38.0,<1.39.0)", "types-boto3-iotfleethub (>=1.38.0,<1.39.0)", "types-boto3-iotfleetwise (>=1.38.0,<1.39.0)", "types-boto3-iotsecuretunneling (>=1.38.0,<1.39.0)", "types-boto3-iotsitewise (>=1.38.0,<1.39.0)", "types-boto3-iotthingsgraph (>=1.38.0,<1.39.0)", "types-boto3-iottwinmaker (>=1.38.0,<1.39.0)", "types-boto3-iotwireless (>=1.38.0,<1.39.0)", "types-boto3-ivs (>=1.38.0,<1.39.0)", "types-boto3-ivs-realtime (>=1.38.0,<1.39.0)", "types-boto3-ivschat (>=1.38.0,<1.39.0)", "types-boto3-kafka (>=1.38.0,<1.39.0)", "types-boto3-kafkaconnect (>=1.38.0,<1.39.0)", "types-boto3-kendra (>=1.38.0,<1.39.0)", "types-boto3-kendra-ranking (>=1.38.0,<1.39.0)", "types-boto3-keyspaces (>=1.38.0,<1.39.0)", "types-boto3-kinesis (>=1.38.0,<1.39.0)", "types-boto3-kinesis-video-archived-media (>=1.38.0,<1.39.0)", "types-boto3-kinesis-video-media (>=1.38.0,<1.39.0)", "types-boto3-kinesis-video-signaling (>=1.38.0,<1.39.0)", "types-boto3-kinesis-video-webrtc-storage (>=1.38.0,<1.39.0)", "types-boto3-kinesisanalytics (>=1.38.0,<1.39.0)", "types-boto3-kinesisanalyticsv2 (>=1.38.0,<1.39.0)", "types-boto3-kinesisvideo (>=1.38.0,<1.39.0)", "types-boto3-kms (>=1.38.0,<1.39.0)", "types-boto3-lakeformation (>=1.38.0,<1.39.0)", "types-boto3-lambda (>=1.38.0,<1.39.0)", "types-boto3-launch-wizard (>=1.38.0,<1.39.0)", "types-boto3-lex-models (>=1.38.0,<1.39.0)", "types-boto3-lex-runtime (>=1.38.0,<1.39.0)", "types-boto3-lexv2-models (>=1.38.0,<1.39.0)", "types-boto3-lexv2-runtime (>=1.38.0,<1.39.0)", "types-boto3-license-manager (>=1.38.0,<1.39.0)", "types-boto3-license-manager-linux-subscriptions (>=1.38.0,<1.39.0)", "types-boto3-license-manager-user-subscriptions (>=1.38.0,<1.39.0)", "types-boto3-lightsail (>=1.38.0,<1.39.0)", "types-boto3-location (>=1.38.0,<1.39.0)", "types-boto3-logs (>=1.38.0,<1.39.0)", "types-boto3-lookoutequipment (>=1.38.0,<1.39.0)", "types-boto3-lookoutmetrics (>=1.38.0,<1.39.0)", "types-boto3-lookoutvision (>=1.38.0,<1.39.0)", "types-boto3-m2 (>=1.38.0,<1.39.0)", "types-boto3-machinelearning (>=1.38.0,<1.39.0)", "types-boto3-macie2 (>=1.38.0,<1.39.0)", "types-boto3-mailmanager (>=1.38.0,<1.39.0)", "types-boto3-managedblockchain (>=1.38.0,<1.39.0)", "types-boto3-managedblockchain-query (>=1.38.0,<1.39.0)", "types-boto3-marketplace-agreement (>=1.38.0,<1.39.0)", "types-boto3-marketplace-catalog (>=1.38.0,<1.39.0)", "types-boto3-marketplace-deployment (>=1.38.0,<1.39.0)", "types-boto3-marketplace-entitlement (>=1.38.0,<1.39.0)", "types-boto3-marketplace-reporting (>=1.38.0,<1.39.0)", "types-boto3-marketplacecommerceanalytics (>=1.38.0,<1.39.0)", "types-boto3-mediaconnect (>=1.38.0,<1.39.0)", "types-boto3-mediaconvert (>=1.38.0,<1.39.0)", "types-boto3-medialive (>=1.38.0,<1.39.0)", "types-boto3-mediapackage (>=1.38.0,<1.39.0)", "types-boto3-mediapackage-vod (>=1.38.0,<1.39.0)", "types-boto3-mediapackagev2 (>=1.38.0,<1.39.0)", "types-boto3-mediastore (>=1.38.0,<1.39.0)", "types-boto3-mediastore-data (>=1.38.0,<1.39.0)", "types-boto3-mediatailor (>=1.38.0,<1.39.0)", "types-boto3-medical-imaging (>=1.38.0,<1.39.0)", "types-boto3-memorydb (>=1.38.0,<1.39.0)", "types-boto3-meteringmarketplace (>=1.38.0,<1.39.0)", "types-boto3-mgh (>=1.38.0,<1.39.0)", "types-boto3-mgn (>=1.38.0,<1.39.0)", "types-boto3-migration-hub-refactor-spaces (>=1.38.0,<1.39.0)", "types-boto3-migrationhub-config (>=1.38.0,<1.39.0)", "types-boto3-migrationhuborchestrator (>=1.38.0,<1.39.0)", "types-boto3-migrationhubstrategy (>=1.38.0,<1.39.0)", "types-boto3-mpa (>=1.38.0,<1.39.0)", "types-boto3-mq (>=1.38.0,<1.39.0)", "types-boto3-mturk (>=1.38.0,<1.39.0)", "types-boto3-mwaa (>=1.38.0,<1.39.0)", "types-boto3-neptune (>=1.38.0,<1.39.0)", "types-boto3-neptune-graph (>=1.38.0,<1.39.0)", "types-boto3-neptunedata (>=1.38.0,<1.39.0)", "types-boto3-network-firewall (>=1.38.0,<1.39.0)", "types-boto3-networkflowmonitor (>=1.38.0,<1.39.0)", "types-boto3-networkmanager (>=1.38.0,<1.39.0)", "types-boto3-networkmonitor (>=1.38.0,<1.39.0)", "types-boto3-notifications (>=1.38.0,<1.39.0)", "types-boto3-notificationscontacts (>=1.38.0,<1.39.0)", "types-boto3-oam (>=1.38.0,<1.39.0)", "types-boto3-observabilityadmin (>=1.38.0,<1.39.0)", "types-boto3-omics (>=1.38.0,<1.39.0)", "types-boto3-opensearch (>=1.38.0,<1.39.0)", "types-boto3-opensearchserverless (>=1.38.0,<1.39.0)", "types-boto3-opsworks (>=1.38.0,<1.39.0)", "types-boto3-opsworkscm (>=1.38.0,<1.39.0)", "types-boto3-organizations (>=1.38.0,<1.39.0)", "types-boto3-osis (>=1.38.0,<1.39.0)", "types-boto3-outposts (>=1.38.0,<1.39.0)", "types-boto3-panorama (>=1.38.0,<1.39.0)", "types-boto3-partnercentral-selling (>=1.38.0,<1.39.0)", "types-boto3-payment-cryptography (>=1.38.0,<1.39.0)", "types-boto3-payment-cryptography-data (>=1.38.0,<1.39.0)", "types-boto3-pca-connector-ad (>=1.38.0,<1.39.0)", "types-boto3-pca-connector-scep (>=1.38.0,<1.39.0)", "types-boto3-pcs (>=1.38.0,<1.39.0)", "types-boto3-personalize (>=1.38.0,<1.39.0)", "types-boto3-personalize-events (>=1.38.0,<1.39.0)", "types-boto3-personalize-runtime (>=1.38.0,<1.39.0)", "types-boto3-pi (>=1.38.0,<1.39.0)", "types-boto3-pinpoint (>=1.38.0,<1.39.0)", "types-boto3-pinpoint-email (>=1.38.0,<1.39.0)", "types-boto3-pinpoint-sms-voice (>=1.38.0,<1.39.0)", "types-boto3-pinpoint-sms-voice-v2 (>=1.38.0,<1.39.0)", "types-boto3-pipes (>=1.38.0,<1.39.0)", "types-boto3-polly (>=1.38.0,<1.39.0)", "types-boto3-pricing (>=1.38.0,<1.39.0)", "types-boto3-proton (>=1.38.0,<1.39.0)", "types-boto3-qapps (>=1.38.0,<1.39.0)", "types-boto3-qbusiness (>=1.38.0,<1.39.0)", "types-boto3-qconnect (>=1.38.0,<1.39.0)", "types-boto3-qldb (>=1.38.0,<1.39.0)", "types-boto3-qldb-session (>=1.38.0,<1.39.0)", "types-boto3-quicksight (>=1.38.0,<1.39.0)", "types-boto3-ram (>=1.38.0,<1.39.0)", "types-boto3-rbin (>=1.38.0,<1.39.0)", "types-boto3-rds (>=1.38.0,<1.39.0)", "types-boto3-rds-data (>=1.38.0,<1.39.0)", "types-boto3-redshift (>=1.38.0,<1.39.0)", "types-boto3-redshift-data (>=1.38.0,<1.39.0)", "types-boto3-redshift-serverless (>=1.38.0,<1.39.0)", "types-boto3-rekognition (>=1.38.0,<1.39.0)", "types-boto3-repostspace (>=1.38.0,<1.39.0)", "types-boto3-resiliencehub (>=1.38.0,<1.39.0)", "types-boto3-resource-explorer-2 (>=1.38.0,<1.39.0)", "types-boto3-resource-groups (>=1.38.0,<1.39.0)", "types-boto3-resourcegroupstaggingapi (>=1.38.0,<1.39.0)", "types-boto3-robomaker (>=1.38.0,<1.39.0)", "types-boto3-rolesanywhere (>=1.38.0,<1.39.0)", "types-boto3-route53 (>=1.38.0,<1.39.0)", "types-boto3-route53-recovery-cluster (>=1.38.0,<1.39.0)", "types-boto3-route53-recovery-control-config (>=1.38.0,<1.39.0)", "types-boto3-route53-recovery-readiness (>=1.38.0,<1.39.0)", "types-boto3-route53domains (>=1.38.0,<1.39.0)", "types-boto3-route53profiles (>=1.38.0,<1.39.0)", "types-boto3-route53resolver (>=1.38.0,<1.39.0)", "types-boto3-rum (>=1.38.0,<1.39.0)", "types-boto3-s3 (>=1.38.0,<1.39.0)", "types-boto3-s3control (>=1.38.0,<1.39.0)", "types-boto3-s3outposts (>=1.38.0,<1.39.0)", "types-boto3-s3tables (>=1.38.0,<1.39.0)", "types-boto3-sagemaker (>=1.38.0,<1.39.0)", "types-boto3-sagemaker-a2i-runtime (>=1.38.0,<1.39.0)", "types-boto3-sagemaker-edge (>=1.38.0,<1.39.0)", "types-boto3-sagemaker-featurestore-runtime (>=1.38.0,<1.39.0)", "types-boto3-sagemaker-geospatial (>=1.38.0,<1.39.0)", "types-boto3-sagemaker-metrics (>=1.38.0,<1.39.0)", "types-boto3-sagemaker-runtime (>=1.38.0,<1.39.0)", "types-boto3-savingsplans (>=1.38.0,<1.39.0)", "types-boto3-scheduler (>=1.38.0,<1.39.0)", "types-boto3-schemas (>=1.38.0,<1.39.0)", "types-boto3-sdb (>=1.38.0,<1.39.0)", "types-boto3-secretsmanager (>=1.38.0,<1.39.0)", "types-boto3-security-ir (>=1.38.0,<1.39.0)", "types-boto3-securityhub (>=1.38.0,<1.39.0)", "types-boto3-securitylake (>=1.38.0,<1.39.0)", "types-boto3-serverlessrepo (>=1.38.0,<1.39.0)", "types-boto3-service-quotas (>=1.38.0,<1.39.0)", "types-boto3-servicecatalog (>=1.38.0,<1.39.0)", "types-boto3-servicecatalog-appregistry (>=1.38.0,<1.39.0)", "types-boto3-servicediscovery (>=1.38.0,<1.39.0)", "types-boto3-ses (>=1.38.0,<1.39.0)", "types-boto3-sesv2 (>=1.38.0,<1.39.0)", "types-boto3-shield (>=1.38.0,<1.39.0)", "types-boto3-signer (>=1.38.0,<1.39.0)", "types-boto3-simspaceweaver (>=1.38.0,<1.39.0)", "types-boto3-sms (>=1.38.0,<1.39.0)", "types-boto3-snow-device-management (>=1.38.0,<1.39.0)", "types-boto3-snowball (>=1.38.0,<1.39.0)", "types-boto3-sns (>=1.38.0,<1.39.0)", "types-boto3-socialmessaging (>=1.38.0,<1.39.0)", "types-boto3-sqs (>=1.38.0,<1.39.0)", "types-boto3-ssm (>=1.38.0,<1.39.0)", "types-boto3-ssm-contacts (>=1.38.0,<1.39.0)", "types-boto3-ssm-guiconnect (>=1.38.0,<1.39.0)", "types-boto3-ssm-incidents (>=1.38.0,<1.39.0)", "types-boto3-ssm-quicksetup (>=1.38.0,<1.39.0)", "types-boto3-ssm-sap (>=1.38.0,<1.39.0)", "types-boto3-sso (>=1.38.0,<1.39.0)", "types-boto3-sso-admin (>=1.38.0,<1.39.0)", "types-boto3-sso-oidc (>=1.38.0,<1.39.0)", "types-boto3-stepfunctions (>=1.38.0,<1.39.0)", "types-boto3-storagegateway (>=1.38.0,<1.39.0)", "types-boto3-sts (>=1.38.0,<1.39.0)", "types-boto3-supplychain (>=1.38.0,<1.39.0)", "types-boto3-support (>=1.38.0,<1.39.0)", "types-boto3-support-app (>=1.38.0,<1.39.0)", "types-boto3-swf (>=1.38.0,<1.39.0)", "types-boto3-synthetics (>=1.38.0,<1.39.0)", "types-boto3-taxsettings (>=1.38.0,<1.39.0)", "types-boto3-textract (>=1.38.0,<1.39.0)", "types-boto3-timestream-influxdb (>=1.38.0,<1.39.0)", "types-boto3-timestream-query (>=1.38.0,<1.39.0)", "types-boto3-timestream-write (>=1.38.0,<1.39.0)", "types-boto3-tnb (>=1.38.0,<1.39.0)", "types-boto3-transcribe (>=1.38.0,<1.39.0)", "types-boto3-transfer (>=1.38.0,<1.39.0)", "types-boto3-translate (>=1.38.0,<1.39.0)", "types-boto3-trustedadvisor (>=1.38.0,<1.39.0)", "types-boto3-verifiedpermissions (>=1.38.0,<1.39.0)", "types-boto3-voice-id (>=1.38.0,<1.39.0)", "types-boto3-vpc-lattice (>=1.38.0,<1.39.0)", "types-boto3-waf (>=1.38.0,<1.39.0)", "types-boto3-waf-regional (>=1.38.0,<1.39.0)", "types-boto3-wafv2 (>=1.38.0,<1.39.0)", "types-boto3-wellarchitected (>=1.38.0,<1.39.0)", "types-boto3-wisdom (>=1.38.0,<1.39.0)", "types-boto3-workdocs (>=1.38.0,<1.39.0)", "types-boto3-workmail (>=1.38.0,<1.39.0)", "types-boto3-workmailmessageflow (>=1.38.0,<1.39.0)", "types-boto3-workspaces (>=1.38.0,<1.39.0)", "types-boto3-workspaces-thin-client (>=1.38.0,<1.39.0)", "types-boto3-workspaces-web (>=1.38.0,<1.39.0)", "types-boto3-xray (>=1.38.0,<1.39.0)"] +amp = ["types-boto3-amp (>=1.38.0,<1.39.0)"] +amplify = ["types-boto3-amplify (>=1.38.0,<1.39.0)"] +amplifybackend = ["types-boto3-amplifybackend (>=1.38.0,<1.39.0)"] +amplifyuibuilder = ["types-boto3-amplifyuibuilder (>=1.38.0,<1.39.0)"] +apigateway = ["types-boto3-apigateway (>=1.38.0,<1.39.0)"] +apigatewaymanagementapi = ["types-boto3-apigatewaymanagementapi (>=1.38.0,<1.39.0)"] +apigatewayv2 = ["types-boto3-apigatewayv2 (>=1.38.0,<1.39.0)"] +appconfig = ["types-boto3-appconfig (>=1.38.0,<1.39.0)"] +appconfigdata = ["types-boto3-appconfigdata (>=1.38.0,<1.39.0)"] +appfabric = ["types-boto3-appfabric (>=1.38.0,<1.39.0)"] +appflow = ["types-boto3-appflow (>=1.38.0,<1.39.0)"] +appintegrations = ["types-boto3-appintegrations (>=1.38.0,<1.39.0)"] +application-autoscaling = ["types-boto3-application-autoscaling (>=1.38.0,<1.39.0)"] +application-insights = ["types-boto3-application-insights (>=1.38.0,<1.39.0)"] +application-signals = ["types-boto3-application-signals (>=1.38.0,<1.39.0)"] +applicationcostprofiler = ["types-boto3-applicationcostprofiler (>=1.38.0,<1.39.0)"] +appmesh = ["types-boto3-appmesh (>=1.38.0,<1.39.0)"] +apprunner = ["types-boto3-apprunner (>=1.38.0,<1.39.0)"] +appstream = ["types-boto3-appstream (>=1.38.0,<1.39.0)"] +appsync = ["types-boto3-appsync (>=1.38.0,<1.39.0)"] +apptest = ["types-boto3-apptest (>=1.38.0,<1.39.0)"] +arc-zonal-shift = ["types-boto3-arc-zonal-shift (>=1.38.0,<1.39.0)"] +artifact = ["types-boto3-artifact (>=1.38.0,<1.39.0)"] +athena = ["types-boto3-athena (>=1.38.0,<1.39.0)"] +auditmanager = ["types-boto3-auditmanager (>=1.38.0,<1.39.0)"] +autoscaling = ["types-boto3-autoscaling (>=1.38.0,<1.39.0)"] +autoscaling-plans = ["types-boto3-autoscaling-plans (>=1.38.0,<1.39.0)"] +b2bi = ["types-boto3-b2bi (>=1.38.0,<1.39.0)"] +backup = ["types-boto3-backup (>=1.38.0,<1.39.0)"] +backup-gateway = ["types-boto3-backup-gateway (>=1.38.0,<1.39.0)"] +backupsearch = ["types-boto3-backupsearch (>=1.38.0,<1.39.0)"] +batch = ["types-boto3-batch (>=1.38.0,<1.39.0)"] +bcm-data-exports = ["types-boto3-bcm-data-exports (>=1.38.0,<1.39.0)"] +bcm-pricing-calculator = ["types-boto3-bcm-pricing-calculator (>=1.38.0,<1.39.0)"] +bedrock = ["types-boto3-bedrock (>=1.38.0,<1.39.0)"] +bedrock-agent = ["types-boto3-bedrock-agent (>=1.38.0,<1.39.0)"] +bedrock-agent-runtime = ["types-boto3-bedrock-agent-runtime (>=1.38.0,<1.39.0)"] +bedrock-data-automation = ["types-boto3-bedrock-data-automation (>=1.38.0,<1.39.0)"] +bedrock-data-automation-runtime = ["types-boto3-bedrock-data-automation-runtime (>=1.38.0,<1.39.0)"] +bedrock-runtime = ["types-boto3-bedrock-runtime (>=1.38.0,<1.39.0)"] +billing = ["types-boto3-billing (>=1.38.0,<1.39.0)"] +billingconductor = ["types-boto3-billingconductor (>=1.38.0,<1.39.0)"] +boto3 = ["boto3 (==1.38.40)"] +braket = ["types-boto3-braket (>=1.38.0,<1.39.0)"] +budgets = ["types-boto3-budgets (>=1.38.0,<1.39.0)"] +ce = ["types-boto3-ce (>=1.38.0,<1.39.0)"] +chatbot = ["types-boto3-chatbot (>=1.38.0,<1.39.0)"] +chime = ["types-boto3-chime (>=1.38.0,<1.39.0)"] +chime-sdk-identity = ["types-boto3-chime-sdk-identity (>=1.38.0,<1.39.0)"] +chime-sdk-media-pipelines = ["types-boto3-chime-sdk-media-pipelines (>=1.38.0,<1.39.0)"] +chime-sdk-meetings = ["types-boto3-chime-sdk-meetings (>=1.38.0,<1.39.0)"] +chime-sdk-messaging = ["types-boto3-chime-sdk-messaging (>=1.38.0,<1.39.0)"] +chime-sdk-voice = ["types-boto3-chime-sdk-voice (>=1.38.0,<1.39.0)"] +cleanrooms = ["types-boto3-cleanrooms (>=1.38.0,<1.39.0)"] +cleanroomsml = ["types-boto3-cleanroomsml (>=1.38.0,<1.39.0)"] +cloud9 = ["types-boto3-cloud9 (>=1.38.0,<1.39.0)"] +cloudcontrol = ["types-boto3-cloudcontrol (>=1.38.0,<1.39.0)"] +clouddirectory = ["types-boto3-clouddirectory (>=1.38.0,<1.39.0)"] +cloudformation = ["types-boto3-cloudformation (>=1.38.0,<1.39.0)"] +cloudfront = ["types-boto3-cloudfront (>=1.38.0,<1.39.0)"] +cloudfront-keyvaluestore = ["types-boto3-cloudfront-keyvaluestore (>=1.38.0,<1.39.0)"] +cloudhsm = ["types-boto3-cloudhsm (>=1.38.0,<1.39.0)"] +cloudhsmv2 = ["types-boto3-cloudhsmv2 (>=1.38.0,<1.39.0)"] +cloudsearch = ["types-boto3-cloudsearch (>=1.38.0,<1.39.0)"] +cloudsearchdomain = ["types-boto3-cloudsearchdomain (>=1.38.0,<1.39.0)"] +cloudtrail = ["types-boto3-cloudtrail (>=1.38.0,<1.39.0)"] +cloudtrail-data = ["types-boto3-cloudtrail-data (>=1.38.0,<1.39.0)"] +cloudwatch = ["types-boto3-cloudwatch (>=1.38.0,<1.39.0)"] +codeartifact = ["types-boto3-codeartifact (>=1.38.0,<1.39.0)"] +codebuild = ["types-boto3-codebuild (>=1.38.0,<1.39.0)"] +codecatalyst = ["types-boto3-codecatalyst (>=1.38.0,<1.39.0)"] +codecommit = ["types-boto3-codecommit (>=1.38.0,<1.39.0)"] +codeconnections = ["types-boto3-codeconnections (>=1.38.0,<1.39.0)"] +codedeploy = ["types-boto3-codedeploy (>=1.38.0,<1.39.0)"] +codeguru-reviewer = ["types-boto3-codeguru-reviewer (>=1.38.0,<1.39.0)"] +codeguru-security = ["types-boto3-codeguru-security (>=1.38.0,<1.39.0)"] +codeguruprofiler = ["types-boto3-codeguruprofiler (>=1.38.0,<1.39.0)"] +codepipeline = ["types-boto3-codepipeline (>=1.38.0,<1.39.0)"] +codestar-connections = ["types-boto3-codestar-connections (>=1.38.0,<1.39.0)"] +codestar-notifications = ["types-boto3-codestar-notifications (>=1.38.0,<1.39.0)"] +cognito-identity = ["types-boto3-cognito-identity (>=1.38.0,<1.39.0)"] +cognito-idp = ["types-boto3-cognito-idp (>=1.38.0,<1.39.0)"] +cognito-sync = ["types-boto3-cognito-sync (>=1.38.0,<1.39.0)"] +comprehend = ["types-boto3-comprehend (>=1.38.0,<1.39.0)"] +comprehendmedical = ["types-boto3-comprehendmedical (>=1.38.0,<1.39.0)"] +compute-optimizer = ["types-boto3-compute-optimizer (>=1.38.0,<1.39.0)"] +config = ["types-boto3-config (>=1.38.0,<1.39.0)"] +connect = ["types-boto3-connect (>=1.38.0,<1.39.0)"] +connect-contact-lens = ["types-boto3-connect-contact-lens (>=1.38.0,<1.39.0)"] +connectcampaigns = ["types-boto3-connectcampaigns (>=1.38.0,<1.39.0)"] +connectcampaignsv2 = ["types-boto3-connectcampaignsv2 (>=1.38.0,<1.39.0)"] +connectcases = ["types-boto3-connectcases (>=1.38.0,<1.39.0)"] +connectparticipant = ["types-boto3-connectparticipant (>=1.38.0,<1.39.0)"] +controlcatalog = ["types-boto3-controlcatalog (>=1.38.0,<1.39.0)"] +controltower = ["types-boto3-controltower (>=1.38.0,<1.39.0)"] +cost-optimization-hub = ["types-boto3-cost-optimization-hub (>=1.38.0,<1.39.0)"] +cur = ["types-boto3-cur (>=1.38.0,<1.39.0)"] +customer-profiles = ["types-boto3-customer-profiles (>=1.38.0,<1.39.0)"] +databrew = ["types-boto3-databrew (>=1.38.0,<1.39.0)"] +dataexchange = ["types-boto3-dataexchange (>=1.38.0,<1.39.0)"] +datapipeline = ["types-boto3-datapipeline (>=1.38.0,<1.39.0)"] +datasync = ["types-boto3-datasync (>=1.38.0,<1.39.0)"] +datazone = ["types-boto3-datazone (>=1.38.0,<1.39.0)"] +dax = ["types-boto3-dax (>=1.38.0,<1.39.0)"] +deadline = ["types-boto3-deadline (>=1.38.0,<1.39.0)"] +detective = ["types-boto3-detective (>=1.38.0,<1.39.0)"] +devicefarm = ["types-boto3-devicefarm (>=1.38.0,<1.39.0)"] +devops-guru = ["types-boto3-devops-guru (>=1.38.0,<1.39.0)"] +directconnect = ["types-boto3-directconnect (>=1.38.0,<1.39.0)"] +discovery = ["types-boto3-discovery (>=1.38.0,<1.39.0)"] +dlm = ["types-boto3-dlm (>=1.38.0,<1.39.0)"] +dms = ["types-boto3-dms (>=1.38.0,<1.39.0)"] +docdb = ["types-boto3-docdb (>=1.38.0,<1.39.0)"] +docdb-elastic = ["types-boto3-docdb-elastic (>=1.38.0,<1.39.0)"] +drs = ["types-boto3-drs (>=1.38.0,<1.39.0)"] +ds = ["types-boto3-ds (>=1.38.0,<1.39.0)"] +ds-data = ["types-boto3-ds-data (>=1.38.0,<1.39.0)"] +dsql = ["types-boto3-dsql (>=1.38.0,<1.39.0)"] +dynamodb = ["types-boto3-dynamodb (>=1.38.0,<1.39.0)"] +dynamodbstreams = ["types-boto3-dynamodbstreams (>=1.38.0,<1.39.0)"] +ebs = ["types-boto3-ebs (>=1.38.0,<1.39.0)"] +ec2 = ["types-boto3-ec2 (>=1.38.0,<1.39.0)"] +ec2-instance-connect = ["types-boto3-ec2-instance-connect (>=1.38.0,<1.39.0)"] +ecr = ["types-boto3-ecr (>=1.38.0,<1.39.0)"] +ecr-public = ["types-boto3-ecr-public (>=1.38.0,<1.39.0)"] +ecs = ["types-boto3-ecs (>=1.38.0,<1.39.0)"] +efs = ["types-boto3-efs (>=1.38.0,<1.39.0)"] +eks = ["types-boto3-eks (>=1.38.0,<1.39.0)"] +eks-auth = ["types-boto3-eks-auth (>=1.38.0,<1.39.0)"] +elasticache = ["types-boto3-elasticache (>=1.38.0,<1.39.0)"] +elasticbeanstalk = ["types-boto3-elasticbeanstalk (>=1.38.0,<1.39.0)"] +elastictranscoder = ["types-boto3-elastictranscoder (>=1.38.0,<1.39.0)"] +elb = ["types-boto3-elb (>=1.38.0,<1.39.0)"] +elbv2 = ["types-boto3-elbv2 (>=1.38.0,<1.39.0)"] +emr = ["types-boto3-emr (>=1.38.0,<1.39.0)"] +emr-containers = ["types-boto3-emr-containers (>=1.38.0,<1.39.0)"] +emr-serverless = ["types-boto3-emr-serverless (>=1.38.0,<1.39.0)"] +entityresolution = ["types-boto3-entityresolution (>=1.38.0,<1.39.0)"] +es = ["types-boto3-es (>=1.38.0,<1.39.0)"] +essential = ["types-boto3-cloudformation (>=1.38.0,<1.39.0)", "types-boto3-dynamodb (>=1.38.0,<1.39.0)", "types-boto3-ec2 (>=1.38.0,<1.39.0)", "types-boto3-lambda (>=1.38.0,<1.39.0)", "types-boto3-rds (>=1.38.0,<1.39.0)", "types-boto3-s3 (>=1.38.0,<1.39.0)", "types-boto3-sqs (>=1.38.0,<1.39.0)"] +events = ["types-boto3-events (>=1.38.0,<1.39.0)"] +evidently = ["types-boto3-evidently (>=1.38.0,<1.39.0)"] +evs = ["types-boto3-evs (>=1.38.0,<1.39.0)"] +finspace = ["types-boto3-finspace (>=1.38.0,<1.39.0)"] +finspace-data = ["types-boto3-finspace-data (>=1.38.0,<1.39.0)"] +firehose = ["types-boto3-firehose (>=1.38.0,<1.39.0)"] +fis = ["types-boto3-fis (>=1.38.0,<1.39.0)"] +fms = ["types-boto3-fms (>=1.38.0,<1.39.0)"] +forecast = ["types-boto3-forecast (>=1.38.0,<1.39.0)"] +forecastquery = ["types-boto3-forecastquery (>=1.38.0,<1.39.0)"] +frauddetector = ["types-boto3-frauddetector (>=1.38.0,<1.39.0)"] +freetier = ["types-boto3-freetier (>=1.38.0,<1.39.0)"] +fsx = ["types-boto3-fsx (>=1.38.0,<1.39.0)"] +full = ["types-boto3-full (>=1.38.0,<1.39.0)"] +gamelift = ["types-boto3-gamelift (>=1.38.0,<1.39.0)"] +gameliftstreams = ["types-boto3-gameliftstreams (>=1.38.0,<1.39.0)"] +geo-maps = ["types-boto3-geo-maps (>=1.38.0,<1.39.0)"] +geo-places = ["types-boto3-geo-places (>=1.38.0,<1.39.0)"] +geo-routes = ["types-boto3-geo-routes (>=1.38.0,<1.39.0)"] +glacier = ["types-boto3-glacier (>=1.38.0,<1.39.0)"] +globalaccelerator = ["types-boto3-globalaccelerator (>=1.38.0,<1.39.0)"] +glue = ["types-boto3-glue (>=1.38.0,<1.39.0)"] +grafana = ["types-boto3-grafana (>=1.38.0,<1.39.0)"] +greengrass = ["types-boto3-greengrass (>=1.38.0,<1.39.0)"] +greengrassv2 = ["types-boto3-greengrassv2 (>=1.38.0,<1.39.0)"] +groundstation = ["types-boto3-groundstation (>=1.38.0,<1.39.0)"] +guardduty = ["types-boto3-guardduty (>=1.38.0,<1.39.0)"] +health = ["types-boto3-health (>=1.38.0,<1.39.0)"] +healthlake = ["types-boto3-healthlake (>=1.38.0,<1.39.0)"] +iam = ["types-boto3-iam (>=1.38.0,<1.39.0)"] +identitystore = ["types-boto3-identitystore (>=1.38.0,<1.39.0)"] +imagebuilder = ["types-boto3-imagebuilder (>=1.38.0,<1.39.0)"] +importexport = ["types-boto3-importexport (>=1.38.0,<1.39.0)"] +inspector = ["types-boto3-inspector (>=1.38.0,<1.39.0)"] +inspector-scan = ["types-boto3-inspector-scan (>=1.38.0,<1.39.0)"] +inspector2 = ["types-boto3-inspector2 (>=1.38.0,<1.39.0)"] +internetmonitor = ["types-boto3-internetmonitor (>=1.38.0,<1.39.0)"] +invoicing = ["types-boto3-invoicing (>=1.38.0,<1.39.0)"] +iot = ["types-boto3-iot (>=1.38.0,<1.39.0)"] +iot-data = ["types-boto3-iot-data (>=1.38.0,<1.39.0)"] +iot-jobs-data = ["types-boto3-iot-jobs-data (>=1.38.0,<1.39.0)"] +iot-managed-integrations = ["types-boto3-iot-managed-integrations (>=1.38.0,<1.39.0)"] +iotanalytics = ["types-boto3-iotanalytics (>=1.38.0,<1.39.0)"] +iotdeviceadvisor = ["types-boto3-iotdeviceadvisor (>=1.38.0,<1.39.0)"] +iotevents = ["types-boto3-iotevents (>=1.38.0,<1.39.0)"] +iotevents-data = ["types-boto3-iotevents-data (>=1.38.0,<1.39.0)"] +iotfleethub = ["types-boto3-iotfleethub (>=1.38.0,<1.39.0)"] +iotfleetwise = ["types-boto3-iotfleetwise (>=1.38.0,<1.39.0)"] +iotsecuretunneling = ["types-boto3-iotsecuretunneling (>=1.38.0,<1.39.0)"] +iotsitewise = ["types-boto3-iotsitewise (>=1.38.0,<1.39.0)"] +iotthingsgraph = ["types-boto3-iotthingsgraph (>=1.38.0,<1.39.0)"] +iottwinmaker = ["types-boto3-iottwinmaker (>=1.38.0,<1.39.0)"] +iotwireless = ["types-boto3-iotwireless (>=1.38.0,<1.39.0)"] +ivs = ["types-boto3-ivs (>=1.38.0,<1.39.0)"] +ivs-realtime = ["types-boto3-ivs-realtime (>=1.38.0,<1.39.0)"] +ivschat = ["types-boto3-ivschat (>=1.38.0,<1.39.0)"] +kafka = ["types-boto3-kafka (>=1.38.0,<1.39.0)"] +kafkaconnect = ["types-boto3-kafkaconnect (>=1.38.0,<1.39.0)"] +kendra = ["types-boto3-kendra (>=1.38.0,<1.39.0)"] +kendra-ranking = ["types-boto3-kendra-ranking (>=1.38.0,<1.39.0)"] +keyspaces = ["types-boto3-keyspaces (>=1.38.0,<1.39.0)"] +kinesis = ["types-boto3-kinesis (>=1.38.0,<1.39.0)"] +kinesis-video-archived-media = ["types-boto3-kinesis-video-archived-media (>=1.38.0,<1.39.0)"] +kinesis-video-media = ["types-boto3-kinesis-video-media (>=1.38.0,<1.39.0)"] +kinesis-video-signaling = ["types-boto3-kinesis-video-signaling (>=1.38.0,<1.39.0)"] +kinesis-video-webrtc-storage = ["types-boto3-kinesis-video-webrtc-storage (>=1.38.0,<1.39.0)"] +kinesisanalytics = ["types-boto3-kinesisanalytics (>=1.38.0,<1.39.0)"] +kinesisanalyticsv2 = ["types-boto3-kinesisanalyticsv2 (>=1.38.0,<1.39.0)"] +kinesisvideo = ["types-boto3-kinesisvideo (>=1.38.0,<1.39.0)"] +kms = ["types-boto3-kms (>=1.38.0,<1.39.0)"] +lakeformation = ["types-boto3-lakeformation (>=1.38.0,<1.39.0)"] +lambda = ["types-boto3-lambda (>=1.38.0,<1.39.0)"] +launch-wizard = ["types-boto3-launch-wizard (>=1.38.0,<1.39.0)"] +lex-models = ["types-boto3-lex-models (>=1.38.0,<1.39.0)"] +lex-runtime = ["types-boto3-lex-runtime (>=1.38.0,<1.39.0)"] +lexv2-models = ["types-boto3-lexv2-models (>=1.38.0,<1.39.0)"] +lexv2-runtime = ["types-boto3-lexv2-runtime (>=1.38.0,<1.39.0)"] +license-manager = ["types-boto3-license-manager (>=1.38.0,<1.39.0)"] +license-manager-linux-subscriptions = ["types-boto3-license-manager-linux-subscriptions (>=1.38.0,<1.39.0)"] +license-manager-user-subscriptions = ["types-boto3-license-manager-user-subscriptions (>=1.38.0,<1.39.0)"] +lightsail = ["types-boto3-lightsail (>=1.38.0,<1.39.0)"] +location = ["types-boto3-location (>=1.38.0,<1.39.0)"] +logs = ["types-boto3-logs (>=1.38.0,<1.39.0)"] +lookoutequipment = ["types-boto3-lookoutequipment (>=1.38.0,<1.39.0)"] +lookoutmetrics = ["types-boto3-lookoutmetrics (>=1.38.0,<1.39.0)"] +lookoutvision = ["types-boto3-lookoutvision (>=1.38.0,<1.39.0)"] +m2 = ["types-boto3-m2 (>=1.38.0,<1.39.0)"] +machinelearning = ["types-boto3-machinelearning (>=1.38.0,<1.39.0)"] +macie2 = ["types-boto3-macie2 (>=1.38.0,<1.39.0)"] +mailmanager = ["types-boto3-mailmanager (>=1.38.0,<1.39.0)"] +managedblockchain = ["types-boto3-managedblockchain (>=1.38.0,<1.39.0)"] +managedblockchain-query = ["types-boto3-managedblockchain-query (>=1.38.0,<1.39.0)"] +marketplace-agreement = ["types-boto3-marketplace-agreement (>=1.38.0,<1.39.0)"] +marketplace-catalog = ["types-boto3-marketplace-catalog (>=1.38.0,<1.39.0)"] +marketplace-deployment = ["types-boto3-marketplace-deployment (>=1.38.0,<1.39.0)"] +marketplace-entitlement = ["types-boto3-marketplace-entitlement (>=1.38.0,<1.39.0)"] +marketplace-reporting = ["types-boto3-marketplace-reporting (>=1.38.0,<1.39.0)"] +marketplacecommerceanalytics = ["types-boto3-marketplacecommerceanalytics (>=1.38.0,<1.39.0)"] +mediaconnect = ["types-boto3-mediaconnect (>=1.38.0,<1.39.0)"] +mediaconvert = ["types-boto3-mediaconvert (>=1.38.0,<1.39.0)"] +medialive = ["types-boto3-medialive (>=1.38.0,<1.39.0)"] +mediapackage = ["types-boto3-mediapackage (>=1.38.0,<1.39.0)"] +mediapackage-vod = ["types-boto3-mediapackage-vod (>=1.38.0,<1.39.0)"] +mediapackagev2 = ["types-boto3-mediapackagev2 (>=1.38.0,<1.39.0)"] +mediastore = ["types-boto3-mediastore (>=1.38.0,<1.39.0)"] +mediastore-data = ["types-boto3-mediastore-data (>=1.38.0,<1.39.0)"] +mediatailor = ["types-boto3-mediatailor (>=1.38.0,<1.39.0)"] +medical-imaging = ["types-boto3-medical-imaging (>=1.38.0,<1.39.0)"] +memorydb = ["types-boto3-memorydb (>=1.38.0,<1.39.0)"] +meteringmarketplace = ["types-boto3-meteringmarketplace (>=1.38.0,<1.39.0)"] +mgh = ["types-boto3-mgh (>=1.38.0,<1.39.0)"] +mgn = ["types-boto3-mgn (>=1.38.0,<1.39.0)"] +migration-hub-refactor-spaces = ["types-boto3-migration-hub-refactor-spaces (>=1.38.0,<1.39.0)"] +migrationhub-config = ["types-boto3-migrationhub-config (>=1.38.0,<1.39.0)"] +migrationhuborchestrator = ["types-boto3-migrationhuborchestrator (>=1.38.0,<1.39.0)"] +migrationhubstrategy = ["types-boto3-migrationhubstrategy (>=1.38.0,<1.39.0)"] +mpa = ["types-boto3-mpa (>=1.38.0,<1.39.0)"] +mq = ["types-boto3-mq (>=1.38.0,<1.39.0)"] +mturk = ["types-boto3-mturk (>=1.38.0,<1.39.0)"] +mwaa = ["types-boto3-mwaa (>=1.38.0,<1.39.0)"] +neptune = ["types-boto3-neptune (>=1.38.0,<1.39.0)"] +neptune-graph = ["types-boto3-neptune-graph (>=1.38.0,<1.39.0)"] +neptunedata = ["types-boto3-neptunedata (>=1.38.0,<1.39.0)"] +network-firewall = ["types-boto3-network-firewall (>=1.38.0,<1.39.0)"] +networkflowmonitor = ["types-boto3-networkflowmonitor (>=1.38.0,<1.39.0)"] +networkmanager = ["types-boto3-networkmanager (>=1.38.0,<1.39.0)"] +networkmonitor = ["types-boto3-networkmonitor (>=1.38.0,<1.39.0)"] +notifications = ["types-boto3-notifications (>=1.38.0,<1.39.0)"] +notificationscontacts = ["types-boto3-notificationscontacts (>=1.38.0,<1.39.0)"] +oam = ["types-boto3-oam (>=1.38.0,<1.39.0)"] +observabilityadmin = ["types-boto3-observabilityadmin (>=1.38.0,<1.39.0)"] +omics = ["types-boto3-omics (>=1.38.0,<1.39.0)"] +opensearch = ["types-boto3-opensearch (>=1.38.0,<1.39.0)"] +opensearchserverless = ["types-boto3-opensearchserverless (>=1.38.0,<1.39.0)"] +opsworks = ["types-boto3-opsworks (>=1.38.0,<1.39.0)"] +opsworkscm = ["types-boto3-opsworkscm (>=1.38.0,<1.39.0)"] +organizations = ["types-boto3-organizations (>=1.38.0,<1.39.0)"] +osis = ["types-boto3-osis (>=1.38.0,<1.39.0)"] +outposts = ["types-boto3-outposts (>=1.38.0,<1.39.0)"] +panorama = ["types-boto3-panorama (>=1.38.0,<1.39.0)"] +partnercentral-selling = ["types-boto3-partnercentral-selling (>=1.38.0,<1.39.0)"] +payment-cryptography = ["types-boto3-payment-cryptography (>=1.38.0,<1.39.0)"] +payment-cryptography-data = ["types-boto3-payment-cryptography-data (>=1.38.0,<1.39.0)"] +pca-connector-ad = ["types-boto3-pca-connector-ad (>=1.38.0,<1.39.0)"] +pca-connector-scep = ["types-boto3-pca-connector-scep (>=1.38.0,<1.39.0)"] +pcs = ["types-boto3-pcs (>=1.38.0,<1.39.0)"] +personalize = ["types-boto3-personalize (>=1.38.0,<1.39.0)"] +personalize-events = ["types-boto3-personalize-events (>=1.38.0,<1.39.0)"] +personalize-runtime = ["types-boto3-personalize-runtime (>=1.38.0,<1.39.0)"] +pi = ["types-boto3-pi (>=1.38.0,<1.39.0)"] +pinpoint = ["types-boto3-pinpoint (>=1.38.0,<1.39.0)"] +pinpoint-email = ["types-boto3-pinpoint-email (>=1.38.0,<1.39.0)"] +pinpoint-sms-voice = ["types-boto3-pinpoint-sms-voice (>=1.38.0,<1.39.0)"] +pinpoint-sms-voice-v2 = ["types-boto3-pinpoint-sms-voice-v2 (>=1.38.0,<1.39.0)"] +pipes = ["types-boto3-pipes (>=1.38.0,<1.39.0)"] +polly = ["types-boto3-polly (>=1.38.0,<1.39.0)"] +pricing = ["types-boto3-pricing (>=1.38.0,<1.39.0)"] +proton = ["types-boto3-proton (>=1.38.0,<1.39.0)"] +qapps = ["types-boto3-qapps (>=1.38.0,<1.39.0)"] +qbusiness = ["types-boto3-qbusiness (>=1.38.0,<1.39.0)"] +qconnect = ["types-boto3-qconnect (>=1.38.0,<1.39.0)"] +qldb = ["types-boto3-qldb (>=1.38.0,<1.39.0)"] +qldb-session = ["types-boto3-qldb-session (>=1.38.0,<1.39.0)"] +quicksight = ["types-boto3-quicksight (>=1.38.0,<1.39.0)"] +ram = ["types-boto3-ram (>=1.38.0,<1.39.0)"] +rbin = ["types-boto3-rbin (>=1.38.0,<1.39.0)"] +rds = ["types-boto3-rds (>=1.38.0,<1.39.0)"] +rds-data = ["types-boto3-rds-data (>=1.38.0,<1.39.0)"] +redshift = ["types-boto3-redshift (>=1.38.0,<1.39.0)"] +redshift-data = ["types-boto3-redshift-data (>=1.38.0,<1.39.0)"] +redshift-serverless = ["types-boto3-redshift-serverless (>=1.38.0,<1.39.0)"] +rekognition = ["types-boto3-rekognition (>=1.38.0,<1.39.0)"] +repostspace = ["types-boto3-repostspace (>=1.38.0,<1.39.0)"] +resiliencehub = ["types-boto3-resiliencehub (>=1.38.0,<1.39.0)"] +resource-explorer-2 = ["types-boto3-resource-explorer-2 (>=1.38.0,<1.39.0)"] +resource-groups = ["types-boto3-resource-groups (>=1.38.0,<1.39.0)"] +resourcegroupstaggingapi = ["types-boto3-resourcegroupstaggingapi (>=1.38.0,<1.39.0)"] +robomaker = ["types-boto3-robomaker (>=1.38.0,<1.39.0)"] +rolesanywhere = ["types-boto3-rolesanywhere (>=1.38.0,<1.39.0)"] +route53 = ["types-boto3-route53 (>=1.38.0,<1.39.0)"] +route53-recovery-cluster = ["types-boto3-route53-recovery-cluster (>=1.38.0,<1.39.0)"] +route53-recovery-control-config = ["types-boto3-route53-recovery-control-config (>=1.38.0,<1.39.0)"] +route53-recovery-readiness = ["types-boto3-route53-recovery-readiness (>=1.38.0,<1.39.0)"] +route53domains = ["types-boto3-route53domains (>=1.38.0,<1.39.0)"] +route53profiles = ["types-boto3-route53profiles (>=1.38.0,<1.39.0)"] +route53resolver = ["types-boto3-route53resolver (>=1.38.0,<1.39.0)"] +rum = ["types-boto3-rum (>=1.38.0,<1.39.0)"] +s3 = ["types-boto3-s3 (>=1.38.0,<1.39.0)"] +s3control = ["types-boto3-s3control (>=1.38.0,<1.39.0)"] +s3outposts = ["types-boto3-s3outposts (>=1.38.0,<1.39.0)"] +s3tables = ["types-boto3-s3tables (>=1.38.0,<1.39.0)"] +sagemaker = ["types-boto3-sagemaker (>=1.38.0,<1.39.0)"] +sagemaker-a2i-runtime = ["types-boto3-sagemaker-a2i-runtime (>=1.38.0,<1.39.0)"] +sagemaker-edge = ["types-boto3-sagemaker-edge (>=1.38.0,<1.39.0)"] +sagemaker-featurestore-runtime = ["types-boto3-sagemaker-featurestore-runtime (>=1.38.0,<1.39.0)"] +sagemaker-geospatial = ["types-boto3-sagemaker-geospatial (>=1.38.0,<1.39.0)"] +sagemaker-metrics = ["types-boto3-sagemaker-metrics (>=1.38.0,<1.39.0)"] +sagemaker-runtime = ["types-boto3-sagemaker-runtime (>=1.38.0,<1.39.0)"] +savingsplans = ["types-boto3-savingsplans (>=1.38.0,<1.39.0)"] +scheduler = ["types-boto3-scheduler (>=1.38.0,<1.39.0)"] +schemas = ["types-boto3-schemas (>=1.38.0,<1.39.0)"] +sdb = ["types-boto3-sdb (>=1.38.0,<1.39.0)"] +secretsmanager = ["types-boto3-secretsmanager (>=1.38.0,<1.39.0)"] +security-ir = ["types-boto3-security-ir (>=1.38.0,<1.39.0)"] +securityhub = ["types-boto3-securityhub (>=1.38.0,<1.39.0)"] +securitylake = ["types-boto3-securitylake (>=1.38.0,<1.39.0)"] +serverlessrepo = ["types-boto3-serverlessrepo (>=1.38.0,<1.39.0)"] +service-quotas = ["types-boto3-service-quotas (>=1.38.0,<1.39.0)"] +servicecatalog = ["types-boto3-servicecatalog (>=1.38.0,<1.39.0)"] +servicecatalog-appregistry = ["types-boto3-servicecatalog-appregistry (>=1.38.0,<1.39.0)"] +servicediscovery = ["types-boto3-servicediscovery (>=1.38.0,<1.39.0)"] +ses = ["types-boto3-ses (>=1.38.0,<1.39.0)"] +sesv2 = ["types-boto3-sesv2 (>=1.38.0,<1.39.0)"] +shield = ["types-boto3-shield (>=1.38.0,<1.39.0)"] +signer = ["types-boto3-signer (>=1.38.0,<1.39.0)"] +simspaceweaver = ["types-boto3-simspaceweaver (>=1.38.0,<1.39.0)"] +sms = ["types-boto3-sms (>=1.38.0,<1.39.0)"] +snow-device-management = ["types-boto3-snow-device-management (>=1.38.0,<1.39.0)"] +snowball = ["types-boto3-snowball (>=1.38.0,<1.39.0)"] +sns = ["types-boto3-sns (>=1.38.0,<1.39.0)"] +socialmessaging = ["types-boto3-socialmessaging (>=1.38.0,<1.39.0)"] +sqs = ["types-boto3-sqs (>=1.38.0,<1.39.0)"] +ssm = ["types-boto3-ssm (>=1.38.0,<1.39.0)"] +ssm-contacts = ["types-boto3-ssm-contacts (>=1.38.0,<1.39.0)"] +ssm-guiconnect = ["types-boto3-ssm-guiconnect (>=1.38.0,<1.39.0)"] +ssm-incidents = ["types-boto3-ssm-incidents (>=1.38.0,<1.39.0)"] +ssm-quicksetup = ["types-boto3-ssm-quicksetup (>=1.38.0,<1.39.0)"] +ssm-sap = ["types-boto3-ssm-sap (>=1.38.0,<1.39.0)"] +sso = ["types-boto3-sso (>=1.38.0,<1.39.0)"] +sso-admin = ["types-boto3-sso-admin (>=1.38.0,<1.39.0)"] +sso-oidc = ["types-boto3-sso-oidc (>=1.38.0,<1.39.0)"] +stepfunctions = ["types-boto3-stepfunctions (>=1.38.0,<1.39.0)"] +storagegateway = ["types-boto3-storagegateway (>=1.38.0,<1.39.0)"] +sts = ["types-boto3-sts (>=1.38.0,<1.39.0)"] +supplychain = ["types-boto3-supplychain (>=1.38.0,<1.39.0)"] +support = ["types-boto3-support (>=1.38.0,<1.39.0)"] +support-app = ["types-boto3-support-app (>=1.38.0,<1.39.0)"] +swf = ["types-boto3-swf (>=1.38.0,<1.39.0)"] +synthetics = ["types-boto3-synthetics (>=1.38.0,<1.39.0)"] +taxsettings = ["types-boto3-taxsettings (>=1.38.0,<1.39.0)"] +textract = ["types-boto3-textract (>=1.38.0,<1.39.0)"] +timestream-influxdb = ["types-boto3-timestream-influxdb (>=1.38.0,<1.39.0)"] +timestream-query = ["types-boto3-timestream-query (>=1.38.0,<1.39.0)"] +timestream-write = ["types-boto3-timestream-write (>=1.38.0,<1.39.0)"] +tnb = ["types-boto3-tnb (>=1.38.0,<1.39.0)"] +transcribe = ["types-boto3-transcribe (>=1.38.0,<1.39.0)"] +transfer = ["types-boto3-transfer (>=1.38.0,<1.39.0)"] +translate = ["types-boto3-translate (>=1.38.0,<1.39.0)"] +trustedadvisor = ["types-boto3-trustedadvisor (>=1.38.0,<1.39.0)"] +verifiedpermissions = ["types-boto3-verifiedpermissions (>=1.38.0,<1.39.0)"] +voice-id = ["types-boto3-voice-id (>=1.38.0,<1.39.0)"] +vpc-lattice = ["types-boto3-vpc-lattice (>=1.38.0,<1.39.0)"] +waf = ["types-boto3-waf (>=1.38.0,<1.39.0)"] +waf-regional = ["types-boto3-waf-regional (>=1.38.0,<1.39.0)"] +wafv2 = ["types-boto3-wafv2 (>=1.38.0,<1.39.0)"] +wellarchitected = ["types-boto3-wellarchitected (>=1.38.0,<1.39.0)"] +wisdom = ["types-boto3-wisdom (>=1.38.0,<1.39.0)"] +workdocs = ["types-boto3-workdocs (>=1.38.0,<1.39.0)"] +workmail = ["types-boto3-workmail (>=1.38.0,<1.39.0)"] +workmailmessageflow = ["types-boto3-workmailmessageflow (>=1.38.0,<1.39.0)"] +workspaces = ["types-boto3-workspaces (>=1.38.0,<1.39.0)"] +workspaces-thin-client = ["types-boto3-workspaces-thin-client (>=1.38.0,<1.39.0)"] +workspaces-web = ["types-boto3-workspaces-web (>=1.38.0,<1.39.0)"] +xray = ["types-boto3-xray (>=1.38.0,<1.39.0)"] + +[[package]] +name = "types-s3transfer" +version = "0.13.0" +description = "Type annotations and code completion for s3transfer" +optional = false +python-versions = ">=3.8" +groups = ["test"] +files = [ + {file = "types_s3transfer-0.13.0-py3-none-any.whl", hash = "sha256:79c8375cbf48a64bff7654c02df1ec4b20d74f8c5672fc13e382f593ca5565b3"}, + {file = "types_s3transfer-0.13.0.tar.gz", hash = "sha256:203dadcb9865c2f68fb44bc0440e1dc05b79197ba4a641c0976c26c9af75ef52"}, +] + [[package]] name = "typing-extensions" -version = "4.13.2" +version = "4.12.2" description = "Backported and Experimental Type Hints for Python 3.8+" optional = false python-versions = ">=3.8" groups = ["main", "dev", "test"] files = [ - {file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"}, - {file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"}, + {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, + {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, ] [[package]] name = "tzdata" -version = "2025.2" +version = "2024.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" groups = ["dev", "test"] markers = "sys_platform == \"win32\"" files = [ - {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, - {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, ] [[package]] @@ -1799,91 +2191,82 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "wrapt" -version = "1.17.2" +version = "1.16.0" description = "Module for decorators, wrappers and monkey patching." optional = false -python-versions = ">=3.8" +python-versions = ">=3.6" groups = ["main", "test"] files = [ - {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"}, - {file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"}, - {file = "wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7"}, - {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c"}, - {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72"}, - {file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061"}, - {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2"}, - {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c"}, - {file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62"}, - {file = "wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563"}, - {file = "wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f"}, - {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58"}, - {file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda"}, - {file = "wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438"}, - {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a"}, - {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000"}, - {file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6"}, - {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b"}, - {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662"}, - {file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72"}, - {file = "wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317"}, - {file = "wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3"}, - {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925"}, - {file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392"}, - {file = "wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40"}, - {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d"}, - {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b"}, - {file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98"}, - {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82"}, - {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae"}, - {file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9"}, - {file = "wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9"}, - {file = "wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991"}, - {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125"}, - {file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998"}, - {file = "wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5"}, - {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8"}, - {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6"}, - {file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc"}, - {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2"}, - {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b"}, - {file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504"}, - {file = "wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a"}, - {file = "wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845"}, - {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192"}, - {file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b"}, - {file = "wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0"}, - {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306"}, - {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb"}, - {file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681"}, - {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6"}, - {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6"}, - {file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f"}, - {file = "wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555"}, - {file = "wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c"}, - {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9"}, - {file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119"}, - {file = "wrapt-1.17.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6"}, - {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9"}, - {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a"}, - {file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2"}, - {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a"}, - {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04"}, - {file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f"}, - {file = "wrapt-1.17.2-cp38-cp38-win32.whl", hash = "sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7"}, - {file = "wrapt-1.17.2-cp38-cp38-win_amd64.whl", hash = "sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3"}, - {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a"}, - {file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061"}, - {file = "wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82"}, - {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9"}, - {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f"}, - {file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b"}, - {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f"}, - {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8"}, - {file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9"}, - {file = "wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb"}, - {file = "wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb"}, - {file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"}, - {file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, + {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, + {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, + {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, + {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, + {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, + {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:43aa59eadec7890d9958748db829df269f0368521ba6dc68cc172d5d03ed8060"}, + {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72554a23c78a8e7aa02abbd699d129eead8b147a23c56e08d08dfc29cfdddca1"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d2efee35b4b0a347e0d99d28e884dfd82797852d62fcd7ebdeee26f3ceb72cf3"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6dcfcffe73710be01d90cae08c3e548d90932d37b39ef83969ae135d36ef3956"}, + {file = "wrapt-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb6e651000a19c96f452c85132811d25e9264d836951022d6e81df2fff38337d"}, + {file = "wrapt-1.16.0-cp311-cp311-win32.whl", hash = "sha256:66027d667efe95cc4fa945af59f92c5a02c6f5bb6012bff9e60542c74c75c362"}, + {file = "wrapt-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:aefbc4cb0a54f91af643660a0a150ce2c090d3652cf4052a5397fb2de549cd89"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5eb404d89131ec9b4f748fa5cfb5346802e5ee8836f57d516576e61f304f3b7b"}, + {file = "wrapt-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9090c9e676d5236a6948330e83cb89969f433b1943a558968f659ead07cb3b36"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94265b00870aa407bd0cbcfd536f17ecde43b94fb8d228560a1e9d3041462d73"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f2058f813d4f2b5e3a9eb2eb3faf8f1d99b81c3e51aeda4b168406443e8ba809"}, + {file = "wrapt-1.16.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98b5e1f498a8ca1858a1cdbffb023bfd954da4e3fa2c0cb5853d40014557248b"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:14d7dc606219cdd7405133c713f2c218d4252f2a469003f8c46bb92d5d095d81"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:49aac49dc4782cb04f58986e81ea0b4768e4ff197b57324dcbd7699c5dfb40b9"}, + {file = "wrapt-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:418abb18146475c310d7a6dc71143d6f7adec5b004ac9ce08dc7a34e2babdc5c"}, + {file = "wrapt-1.16.0-cp312-cp312-win32.whl", hash = "sha256:685f568fa5e627e93f3b52fda002c7ed2fa1800b50ce51f6ed1d572d8ab3e7fc"}, + {file = "wrapt-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:dcdba5c86e368442528f7060039eda390cc4091bfd1dca41e8046af7c910dda8"}, + {file = "wrapt-1.16.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:d462f28826f4657968ae51d2181a074dfe03c200d6131690b7d65d55b0f360f8"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a33a747400b94b6d6b8a165e4480264a64a78c8a4c734b62136062e9a248dd39"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3646eefa23daeba62643a58aac816945cadc0afaf21800a1421eeba5f6cfb9c"}, + {file = "wrapt-1.16.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ebf019be5c09d400cf7b024aa52b1f3aeebeff51550d007e92c3c1c4afc2a40"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0d2691979e93d06a95a26257adb7bfd0c93818e89b1406f5a28f36e0d8c1e1fc"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:1acd723ee2a8826f3d53910255643e33673e1d11db84ce5880675954183ec47e"}, + {file = "wrapt-1.16.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc57efac2da352a51cc4658878a68d2b1b67dbe9d33c36cb826ca449d80a8465"}, + {file = "wrapt-1.16.0-cp36-cp36m-win32.whl", hash = "sha256:da4813f751142436b075ed7aa012a8778aa43a99f7b36afe9b742d3ed8bdc95e"}, + {file = "wrapt-1.16.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f6eac2360f2d543cc875a0e5efd413b6cbd483cb3ad7ebf888884a6e0d2e966"}, + {file = "wrapt-1.16.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a0ea261ce52b5952bf669684a251a66df239ec6d441ccb59ec7afa882265d593"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7bd2d7ff69a2cac767fbf7a2b206add2e9a210e57947dd7ce03e25d03d2de292"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9159485323798c8dc530a224bd3ffcf76659319ccc7bbd52e01e73bd0241a0c5"}, + {file = "wrapt-1.16.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a86373cf37cd7764f2201b76496aba58a52e76dedfaa698ef9e9688bfd9e41cf"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:73870c364c11f03ed072dda68ff7aea6d2a3a5c3fe250d917a429c7432e15228"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b935ae30c6e7400022b50f8d359c03ed233d45b725cfdd299462f41ee5ffba6f"}, + {file = "wrapt-1.16.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:db98ad84a55eb09b3c32a96c576476777e87c520a34e2519d3e59c44710c002c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win32.whl", hash = "sha256:9153ed35fc5e4fa3b2fe97bddaa7cbec0ed22412b85bcdaf54aeba92ea37428c"}, + {file = "wrapt-1.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:66dfbaa7cfa3eb707bbfcd46dab2bc6207b005cbc9caa2199bcbc81d95071a00"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1dd50a2696ff89f57bd8847647a1c363b687d3d796dc30d4dd4a9d1689a706f0"}, + {file = "wrapt-1.16.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:44a2754372e32ab315734c6c73b24351d06e77ffff6ae27d2ecf14cf3d229202"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e9723528b9f787dc59168369e42ae1c3b0d3fadb2f1a71de14531d321ee05b0"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbed418ba5c3dce92619656802cc5355cb679e58d0d89b50f116e4a9d5a9603e"}, + {file = "wrapt-1.16.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941988b89b4fd6b41c3f0bfb20e92bd23746579736b7343283297c4c8cbae68f"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6a42cd0cfa8ffc1915aef79cb4284f6383d8a3e9dcca70c445dcfdd639d51267"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ca9b6085e4f866bd584fb135a041bfc32cab916e69f714a7d1d397f8c4891ca"}, + {file = "wrapt-1.16.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5e49454f19ef621089e204f862388d29e6e8d8b162efce05208913dde5b9ad6"}, + {file = "wrapt-1.16.0-cp38-cp38-win32.whl", hash = "sha256:c31f72b1b6624c9d863fc095da460802f43a7c6868c5dda140f51da24fd47d7b"}, + {file = "wrapt-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:490b0ee15c1a55be9c1bd8609b8cecd60e325f0575fc98f50058eae366e01f41"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9b201ae332c3637a42f02d1045e1d0cccfdc41f1f2f801dafbaa7e9b4797bfc2"}, + {file = "wrapt-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2076fad65c6736184e77d7d4729b63a6d1ae0b70da4868adeec40989858eb3fb"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5cd603b575ebceca7da5a3a251e69561bec509e0b46e4993e1cac402b7247b8"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b47cfad9e9bbbed2339081f4e346c93ecd7ab504299403320bf85f7f85c7d46c"}, + {file = "wrapt-1.16.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8212564d49c50eb4565e502814f694e240c55551a5f1bc841d4fcaabb0a9b8a"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5f15814a33e42b04e3de432e573aa557f9f0f56458745c2074952f564c50e664"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db2e408d983b0e61e238cf579c09ef7020560441906ca990fe8412153e3b291f"}, + {file = "wrapt-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:edfad1d29c73f9b863ebe7082ae9321374ccb10879eeabc84ba3b69f2579d537"}, + {file = "wrapt-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed867c42c268f876097248e05b6117a65bcd1e63b779e916fe2e33cd6fd0d3c3"}, + {file = "wrapt-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:eb1b046be06b0fce7249f1d025cd359b4b80fc1c3e24ad9eca33e0dcdb2e4a35"}, + {file = "wrapt-1.16.0-py3-none-any.whl", hash = "sha256:6906c4100a8fcbf2fa735f6059214bb13b97f75b1a61777fcf6432121ef12ef1"}, + {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, ] [[package]] @@ -1909,4 +2292,4 @@ type = ["pytest-mypy"] [metadata] lock-version = "2.1" python-versions = "^3.8.1" -content-hash = "7a960df210d794ce6823f024a114c7313215d1c8486b8a723cce104e1c0d6c06" +content-hash = "4d6dc3f0080c62b28f78487a4cbefa724da3af5e9fd100a6abd376bcae3baadf" diff --git a/pyproject.toml b/pyproject.toml index 15173c82..fd831fe0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ mysql-connector-python = "9.0.0" [tool.poetry.group.test.dependencies] boto3 = "^1.34.111" +types-boto3 = "^1.34.111" coverage = "^7.5.1" debugpy = "^1.8.1" pydevd-pycharm = "^233.13763.5" @@ -55,6 +56,7 @@ pytest-html = "^4.1.1" pytest-html-merger = ">=0.0.10,<0.1.1" toxiproxy-python = "^0.1.1" parameterized = "^0.9.0" +tabulate = "^0.9.0" psycopg = "^3.1.19" psycopg-binary = "^3.1.19" mysql-connector-python = "9.0.0" diff --git a/tests/integration/container/conftest.py b/tests/integration/container/conftest.py index eefabcaf..c438561f 100644 --- a/tests/integration/container/conftest.py +++ b/tests/integration/container/conftest.py @@ -84,9 +84,9 @@ def pytest_runtest_setup(item): ProxyHelper.enable_all_connectivity() deployment = request.get_database_engine_deployment() - if DatabaseEngineDeployment.AURORA == deployment or DatabaseEngineDeployment.RDS_MULTI_AZ == deployment: + if DatabaseEngineDeployment.AURORA == deployment or DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER == deployment: rds_utility = RdsTestUtility(info.get_region(), info.get_rds_endpoint()) - rds_utility.wait_until_cluster_has_desired_status(info.get_cluster_name(), "available") + rds_utility.wait_until_cluster_has_desired_status(info.get_db_name(), "available") # Need to ensure that cluster details through API matches topology fetched through SQL # Wait up to 5min diff --git a/tests/integration/container/test_aurora_failover.py b/tests/integration/container/test_aurora_failover.py index 1a303e33..71524bee 100644 --- a/tests/integration/container/test_aurora_failover.py +++ b/tests/integration/container/test_aurora_failover.py @@ -41,8 +41,10 @@ @enable_on_num_instances(min_instances=2) -@enable_on_deployments([DatabaseEngineDeployment.AURORA, DatabaseEngineDeployment.RDS_MULTI_AZ]) -@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, TestEnvironmentFeatures.PERFORMANCE]) +@enable_on_deployments([DatabaseEngineDeployment.AURORA, DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER]) +@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, + TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT, + TestEnvironmentFeatures.PERFORMANCE]) class TestAuroraFailover: IDLE_CONNECTIONS_NUM: int = 5 logger = Logger(__name__) diff --git a/tests/integration/container/test_basic_connectivity.py b/tests/integration/container/test_basic_connectivity.py index ef03614e..6745eac6 100644 --- a/tests/integration/container/test_basic_connectivity.py +++ b/tests/integration/container/test_basic_connectivity.py @@ -36,7 +36,9 @@ from .utils.test_environment_features import TestEnvironmentFeatures -@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, TestEnvironmentFeatures.PERFORMANCE]) +@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, + TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT, + TestEnvironmentFeatures.PERFORMANCE]) class TestBasicConnectivity: @pytest.fixture(scope='class') @@ -125,7 +127,7 @@ def test_proxied_wrapper_connection_failed( assert True @enable_on_num_instances(min_instances=2) - @enable_on_deployments([DatabaseEngineDeployment.AURORA, DatabaseEngineDeployment.RDS_MULTI_AZ]) + @enable_on_deployments([DatabaseEngineDeployment.AURORA, DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER]) @enable_on_features([TestEnvironmentFeatures.ABORT_CONNECTION_SUPPORTED]) def test_wrapper_connection_reader_cluster_with_efm_enabled(self, test_driver: TestDriver, conn_utils): target_driver_connect = DriverHelper.get_connect_func(test_driver) diff --git a/tests/integration/container/test_basic_functionality.py b/tests/integration/container/test_basic_functionality.py index 34f66c62..005fcb6c 100644 --- a/tests/integration/container/test_basic_functionality.py +++ b/tests/integration/container/test_basic_functionality.py @@ -46,7 +46,9 @@ from .utils.test_environment_features import TestEnvironmentFeatures -@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, TestEnvironmentFeatures.PERFORMANCE]) +@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, + TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT, + TestEnvironmentFeatures.PERFORMANCE]) class TestBasicFunctionality: @pytest.fixture(scope='class') diff --git a/tests/integration/container/test_blue_green_deployment.py b/tests/integration/container/test_blue_green_deployment.py new file mode 100644 index 00000000..4a26da18 --- /dev/null +++ b/tests/integration/container/test_blue_green_deployment.py @@ -0,0 +1,1323 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Deque, Dict, List, Optional, Tuple + +import mysql.connector +import psycopg +from mysql.connector import CMySQLConnection, MySQLConnection + +from aws_advanced_python_wrapper.mysql_driver_dialect import MySQLDriverDialect +from aws_advanced_python_wrapper.pg_driver_dialect import PgDriverDialect + +if TYPE_CHECKING: + from aws_advanced_python_wrapper.pep249 import Connection + from .utils.connection_utils import ConnectionUtils + from .utils.test_driver import TestDriver + +import math +import socket +from collections import deque +from dataclasses import dataclass, field +from threading import Event, Thread +from time import perf_counter_ns, sleep + +import pytest +from tabulate import tabulate # type: ignore + +from aws_advanced_python_wrapper import AwsWrapperConnection +from aws_advanced_python_wrapper.blue_green_plugin import (BlueGreenPlugin, + BlueGreenRole) +from aws_advanced_python_wrapper.database_dialect import DialectCode +from aws_advanced_python_wrapper.driver_info import DriverInfo +from aws_advanced_python_wrapper.utils.atomic import AtomicInt +from aws_advanced_python_wrapper.utils.concurrent import (ConcurrentDict, + CountDownLatch) +from aws_advanced_python_wrapper.utils.log import Logger +from aws_advanced_python_wrapper.utils.properties import (Properties, + WrapperProperties) +from aws_advanced_python_wrapper.utils.rdsutils import RdsUtils +from .utils.conditions import enable_on_deployments, enable_on_features +from .utils.database_engine import DatabaseEngine +from .utils.database_engine_deployment import DatabaseEngineDeployment +from .utils.driver_helper import DriverHelper +from .utils.rds_test_utility import RdsTestUtility +from .utils.test_environment import TestEnvironment +from .utils.test_environment_features import TestEnvironmentFeatures + + +@enable_on_deployments([DatabaseEngineDeployment.AURORA, DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE]) +@enable_on_features([TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT]) +class TestBlueGreenDeployment: + logger = Logger(__name__) + + INCLUDE_CLUSTER_ENDPOINTS = False + INCLUDE_WRITER_AND_READER_ONLY = False + TEST_CLUSTER_ID = "test-cluster-id" + MYSQL_BG_STATUS_QUERY = \ + ("SELECT id, SUBSTRING_INDEX(endpoint, '.', 1) as hostId, endpoint, port, role, status, version " + "FROM mysql.rds_topology") + PG_AURORA_BG_STATUS_QUERY = \ + ("SELECT id, SPLIT_PART(endpoint, '.', 1) as hostId, endpoint, port, role, status, version " + "FROM get_blue_green_fast_switchover_metadata('aws_jdbc_driver')") + PG_RDS_BG_STATUS_QUERY = \ + (f"SELECT id, SPLIT_PART(endpoint, '.', 1) as hostId, endpoint, port, role, status, version " + f"FROM rds_tools.show_topology('aws_jdbc_driver-{DriverInfo.DRIVER_VERSION}')") + results: ConcurrentDict[str, BlueGreenResults] = ConcurrentDict() + unhandled_exceptions: Deque[Exception] = deque() + mysql_dialect = MySQLDriverDialect(Properties()) + pg_dialect = PgDriverDialect(Properties()) + + @pytest.fixture(scope='class') + def test_utility(self): + return RdsTestUtility.get_utility() + + @pytest.fixture(scope='class') + def rds_utils(self): + return RdsUtils() + + def test_switchover(self, conn_utils, test_utility, rds_utils, test_environment: TestEnvironment, test_driver): + self.results.clear() + self.unhandled_exceptions.clear() + + iam_enabled = TestEnvironmentFeatures.IAM in test_environment.get_features() + start_time_ns = perf_counter_ns() + stop = Event() + start_latch = CountDownLatch() + finish_latch = CountDownLatch() + thread_count = 0 + thread_finish_count = 0 + threads: List[Thread] = [] + + env = TestEnvironment.get_current() + info = env.get_info() + db_name = conn_utils.dbname + test_instance = env.get_writer() + topology_instances: List[str] = self.get_bg_endpoints( + test_environment, test_utility, rds_utils, info.get_bg_deployment_id()) + topology_instances_str = '\n'.join(topology_instances) + self.logger.debug(f"topology_instances: \n{topology_instances_str}") + + for host in topology_instances: + host_id = host[0:host.index(".")] + assert host_id + + bg_results = BlueGreenResults() + self.results.put(host_id, bg_results) + + if rds_utils.is_not_green_or_old_instance(host): + threads.append(Thread( + target=self.direct_topology_monitor, + args=(test_driver, conn_utils, host_id, host, test_instance.get_port(), db_name, start_latch, stop, + finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.direct_blue_connectivity_monitor, + args=(test_driver, conn_utils, host_id, host, test_instance.get_port(), db_name, start_latch, stop, + finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.direct_blue_idle_connectivity_monitor, + args=(test_driver, conn_utils, host_id, host, test_instance.get_port(), db_name, start_latch, stop, + finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.wrapper_blue_idle_connectivity_monitor, + args=(test_driver, conn_utils, host_id, host, test_instance.get_port(), db_name, start_latch, stop, + finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.wrapper_blue_executing_connectivity_monitor, + args=(test_driver, conn_utils, host_id, host, test_instance.get_port(), db_name, start_latch, stop, + finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.wrapper_blue_new_connection_monitor, + args=(test_driver, conn_utils, host_id, host, test_instance.get_port(), db_name, start_latch, stop, + finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.blue_dns_monitor, + args=(host_id, host, start_latch, stop, finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + if rds_utils.is_green_instance(host): + threads.append(Thread( + target=self.direct_topology_monitor, + args=(test_driver, conn_utils, host_id, host, test_instance.get_port(), db_name, start_latch, stop, + finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.wrapper_green_connectivity_monitor, + args=(test_driver, conn_utils, host_id, host, test_instance.get_port(), db_name, start_latch, stop, + finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.green_dns_monitor, + args=(host_id, host, start_latch, stop, finish_latch, bg_results))) + thread_count += 1 + thread_finish_count += 1 + + if iam_enabled: + rds_client = test_utility.get_rds_client() + + threads.append(Thread( + target=self.green_iam_connectivity_monitor, + args=(test_driver, conn_utils, rds_client, host_id, "BlueHostToken", + rds_utils.remove_green_instance_prefix(host), host, test_instance.get_port(), + db_name, start_latch, stop, finish_latch, bg_results, + bg_results.green_direct_iam_ip_with_blue_node_connect_times, False, True))) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.green_iam_connectivity_monitor, + args=(test_driver, conn_utils, rds_client, host_id, "GreenHostToken", host, host, + test_instance.get_port(), db_name, start_latch, stop, finish_latch, + bg_results, bg_results.green_direct_iam_ip_with_green_node_connect_times, True, False) + )) + thread_count += 1 + thread_finish_count += 1 + + threads.append(Thread( + target=self.bg_switchover_trigger, + args=(test_utility, info.get_bg_deployment_id(), start_latch, finish_latch, self.results))) + thread_count += 1 + thread_finish_count += 1 + + start_latch.set_count(thread_count) + finish_latch.set_count(thread_finish_count) + + for result in self.results.values(): + result.start_time_ns.set(start_time_ns) + + for thread in threads: + thread.start() + + self.logger.debug("All threads started.") + + finish_latch.wait_sec(6 * 60) + self.logger.debug("All threads completed.") + + sleep(12 * 60) + + self.logger.debug("Stopping all threads...") + stop.set() + + for thread in threads: + thread.join(timeout=30) + if thread.is_alive(): + self.logger.debug("Timed out waiting for a thread to stop running...") + + self.logger.debug("Done waiting for threads to stop.") + + for host_id, result in self.results.items(): + assert result.bg_trigger_time_ns.get() > 0, \ + f"bg_trigger_time for {host_id} was {result.bg_trigger_time_ns.get()}" + + self.logger.debug("Test is over.") + self.print_metrics(rds_utils) + + if len(self.unhandled_exceptions) > 0: + self.log_unhandled_exceptions() + pytest.fail("There were unhandled exceptions.") + + self.assert_test() + + self.logger.debug("Completed") + + def get_bg_endpoints( + self, + test_env: TestEnvironment, + test_utility: RdsTestUtility, + rds_utils: RdsUtils, + bg_id: str) -> List[str]: + bg_deployment = test_utility.get_blue_green_deployment(bg_id) + if bg_deployment is None: + pytest.fail(f"Blue/Green deployment with ID '{bg_id}' not found.") + + if test_env.get_deployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE: + blue_instance = test_utility.get_rds_instance_info_by_arn(bg_deployment["Source"]) + if blue_instance is None: + pytest.fail("Blue instance not found.") + + green_instance = test_utility.get_rds_instance_info_by_arn(bg_deployment["Target"]) + if green_instance is None: + pytest.fail("Green instance not found.") + + return [blue_instance["Endpoint"]["Address"], green_instance["Endpoint"]["Address"]] + + elif test_env.get_deployment() == DatabaseEngineDeployment.AURORA: + endpoints = [] + blue_cluster = test_utility.get_cluster_by_arn(bg_deployment["Source"]) + if blue_cluster is None: + pytest.fail("Blue cluster not found.") + + if self.INCLUDE_CLUSTER_ENDPOINTS: + endpoints.append(test_env.get_database_info().get_cluster_endpoint()) + + instances = test_env.get_instances() + if self.INCLUDE_WRITER_AND_READER_ONLY: + endpoints.append(instances[0].get_host()) + if len(instances) > 1: + endpoints.append(instances[1].get_host()) + else: + endpoints.extend([instance_info.get_host() for instance_info in instances]) + + green_cluster = test_utility.get_cluster_by_arn(bg_deployment["Target"]) + if green_cluster is None: + pytest.fail("Green cluster not found.") + + if self.INCLUDE_CLUSTER_ENDPOINTS: + endpoints.append(green_cluster["Endpoint"]) + + instance_ids = test_utility.get_instance_ids(green_cluster["Endpoint"]) + if len(instance_ids) < 1: + pytest.fail("Cannot find green cluster instances.") + + instance_pattern = rds_utils.get_rds_instance_host_pattern(green_cluster["Endpoint"]) + if self.INCLUDE_WRITER_AND_READER_ONLY: + endpoints.append(instance_pattern.replace("?", instance_ids[0])) + if len(instance_ids) > 1: + endpoints.append(instance_pattern.replace("?", instance_ids[1])) + else: + endpoints.extend([instance_pattern.replace("?", instance_id) for instance_id in instance_ids]) + + return endpoints + else: + pytest.fail(f"Unsupported blue/green engine deployment: {test_env.get_deployment()}") + + # Monitor BG status changes + # Can terminate for itself + def direct_topology_monitor( + self, + test_driver: TestDriver, + conn_utils: ConnectionUtils, + host_id: str, + host: str, + port: int, + db: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults): + conn = None + test_env = TestEnvironment.get_current() + engine = test_env.get_engine() + + query = None + if engine == DatabaseEngine.MYSQL: + query = self.MYSQL_BG_STATUS_QUERY + elif engine == DatabaseEngine.PG: + db_deployment = test_env.get_deployment() + if db_deployment == DatabaseEngineDeployment.AURORA: + query = self.PG_AURORA_BG_STATUS_QUERY + elif db_deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE: + query = self.PG_RDS_BG_STATUS_QUERY + else: + pytest.fail(f"Unsupported blue/green database engine deployment: {db_deployment}") + else: + pytest.fail(f"Unsupported database engine: {engine}") + + try: + conn = self.get_direct_connection_with_retry( + test_driver, + **conn_utils.get_connect_params(host=host, port=port, dbname=db)) + self.logger.debug(f"[DirectTopology] @ {host_id}] Connection opened.") + + sleep(1) + + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + self.logger.debug(f"[DirectTopology @ {host_id}] Starting BG status monitoring.") + + end_time_ns = perf_counter_ns() + 15 * 60 * 1_000_000_000 # 15 minutes + while not stop.is_set() and perf_counter_ns() < end_time_ns: + if conn is None: + conn = self.get_direct_connection_with_retry( + test_driver, **conn_utils.get_connect_params(host=host, port=port, dbname=db)) + self.logger.debug(f"[DirectTopology] @ {host_id}] Connection re-opened.") + + try: + with conn.cursor() as cursor: + cursor.execute(query) + for record in cursor: + # columns: id, hostid, endpoint, port, role, status, version + role = record[4] + status = record[5] + version = record[6] + is_green = BlueGreenRole.parse_role(role, version) == BlueGreenRole.TARGET + + def _log_and_return_time(_) -> int: + self.logger.debug(f"[DirectTopology] @ {host_id}] Status changed to: {status}.") + return perf_counter_ns() + + if is_green: + results.green_status_time.compute_if_absent(status, _log_and_return_time) + else: + results.blue_status_time.compute_if_absent(status, _log_and_return_time) + + sleep(0.1) + except Exception as e: + self.logger.debug(f"[DirectTopology] @ {host_id}] Thread exception: {e}.") + self.close_connection(conn) + conn = None + except Exception as e: + self.logger.debug(f"[DirectTopology] @ {host_id}] Thread unhandled exception: {e}.") + self.unhandled_exceptions.append(e) + finally: + self.close_connection(conn) + finish_latch.count_down() + self.logger.debug(f"[DirectTopology] @ {host_id}] Thread is completed.") + + def get_telemetry_params(self) -> Dict[str, Any]: + params: Dict[str, Any] = {} + features = TestEnvironment.get_current().get_features() + if TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED in features \ + or TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED in features: + params[WrapperProperties.ENABLE_TELEMETRY.name] = True + params[WrapperProperties.TELEMETRY_SUBMIT_TOPLEVEL.name] = True + if TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED in features: + params[WrapperProperties.TELEMETRY_TRACES_BACKEND.name] = "XRAY" + if TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED in features: + params[WrapperProperties.TELEMETRY_METRICS_BACKEND.name] = "OTLP" + + return params + + def get_direct_connection_with_retry(self, test_driver: TestDriver, **connect_params) -> AwsWrapperConnection: + conn = None + connect_count = 0 + target_driver_connect = DriverHelper.get_connect_func(test_driver) + while conn is None and connect_count < 10: + try: + conn = target_driver_connect(**connect_params) + except Exception: + # ignore, try to connect again + pass + + connect_count += 1 + + if conn is None: + pytest.fail(f"Cannot connect to {connect_params.get('host')}") + + return conn + + def close_connection(self, conn: Optional[Connection]): + try: + if conn is not None and not self.is_closed(conn): + conn.close() + except Exception: + # do nothing + pass + + def is_closed(self, conn: Connection) -> bool: + if isinstance(conn, psycopg.Connection): + return self.pg_dialect.is_closed(conn) + elif isinstance(conn, CMySQLConnection) or isinstance(conn, MySQLConnection): + return self.mysql_dialect.is_closed(conn) + elif isinstance(conn, AwsWrapperConnection): + return conn.is_closed + else: + pytest.fail( + f"Unable to determine if the connection was closed because it was of an unexpected type: {conn}") + + # Blue node + # Checking: connectivity, SELECT 1 + # Can terminate for itself + def direct_blue_connectivity_monitor( + self, + test_driver: TestDriver, + conn_utils: ConnectionUtils, + host_id: str, + host: str, + port: int, + db: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults): + conn = None + try: + conn = self.get_direct_connection_with_retry( + test_driver, + **conn_utils.get_connect_params(host=host, port=port, dbname=db)) + self.logger.debug(f"[DirectBlueConnectivity @ {host_id}] Connection opened.") + + sleep(1) + + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + self.logger.debug(f"[DirectBlueConnectivity @ {host_id}] Starting connectivity monitoring.") + + while not stop.is_set(): + try: + with conn.cursor() as cursor: + cursor.execute("SELECT 1") + cursor.fetchall() + sleep(1) + except Exception as e: + self.logger.debug(f"[DirectBlueConnectivity @ {host_id}] Thread exception: {e}") + results.direct_blue_lost_connection_time_ns.set(perf_counter_ns()) + break + except Exception as e: + self.logger.debug(f"[DirectBlueConnectivity @ {host_id}] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + self.close_connection(conn) + finish_latch.count_down() + self.logger.debug(f"[DirectBlueConnectivity @ {host_id}] Thread is completed.") + + # Blue node + # Checking: connectivity, is_closed + # Can terminate for itself + def direct_blue_idle_connectivity_monitor( + self, + test_driver: TestDriver, + conn_utils: ConnectionUtils, + host_id: str, + host: str, + port: int, + db: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults): + conn = None + try: + conn = self.get_direct_connection_with_retry( + test_driver, + **conn_utils.get_connect_params(host=host, port=port, dbname=db)) + self.logger.debug(f"[DirectBlueIdleConnectivity @ {host_id}] Connection opened.") + + sleep(1) + + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + self.logger.debug(f"[DirectBlueIdleConnectivity @ {host_id}] Starting connectivity monitoring.") + + while not stop.is_set(): + try: + if self.is_closed(conn): + results.direct_blue_idle_lost_connection_time_ns.set(perf_counter_ns()) + break + + sleep(1) + except Exception as e: + self.logger.debug(f"[DirectBlueIdleConnectivity @ {host_id}] Thread exception: {e}") + results.direct_blue_idle_lost_connection_time_ns.set(perf_counter_ns()) + break + except Exception as e: + self.logger.debug(f"[DirectBlueIdleConnectivity @ {host_id}] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + self.close_connection(conn) + finish_latch.count_down() + self.logger.debug(f"[DirectBlueIdleConnectivity @ {host_id}] Thread is completed.") + + # Blue node + # Check: connectivity, is_closed + # Can terminate for itself + def wrapper_blue_idle_connectivity_monitor( + self, + test_driver: TestDriver, + conn_utils: ConnectionUtils, + host_id: str, + host: str, + port: int, + db: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults): + conn = None + try: + connect_params = self.get_wrapper_connect_params(conn_utils, host, port, db) + conn = self.get_wrapper_connection_with_retry(test_driver, **connect_params) + self.logger.debug(f"[WrapperBlueIdleConnectivity @ {host_id}] Connection opened.") + + sleep(1) + + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + self.logger.debug(f"[WrapperBlueIdleConnectivity @ {host_id}] Starting connectivity monitoring.") + + while not stop.is_set(): + try: + if self.is_closed(conn): + results.wrapper_blue_idle_lost_connection_time_ns.set(perf_counter_ns()) + break + + sleep(1) + except Exception as e: + self.logger.debug(f"[WrapperBlueIdleConnectivity @ {host_id}] Thread exception: {e}") + results.direct_blue_idle_lost_connection_time_ns.set(perf_counter_ns()) + break + except Exception as e: + self.logger.debug(f"[WrapperBlueIdleConnectivity @ {host_id}] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + self.close_connection(conn) + finish_latch.count_down() + self.logger.debug(f"[WrapperBlueIdleConnectivity @ {host_id}] Thread is completed.") + + def get_wrapper_connect_params(self, conn_utils: ConnectionUtils, host: str, port: int, db: str) -> Dict[str, Any]: + params = conn_utils.get_connect_params(host=host, port=port, dbname=db) + params = {**params, **self.get_telemetry_params()} + params[WrapperProperties.CLUSTER_ID.name] = self.TEST_CLUSTER_ID + test_env = TestEnvironment.get_current() + engine = test_env.get_engine() + db_deployment = test_env.get_deployment() + + if db_deployment == DatabaseEngineDeployment.AURORA: + if engine == DatabaseEngine.MYSQL: + params[WrapperProperties.DIALECT.name] = DialectCode.AURORA_MYSQL + elif engine == DatabaseEngine.PG: + params[WrapperProperties.DIALECT.name] = DialectCode.AURORA_PG + elif db_deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE: + if engine == DatabaseEngine.MYSQL: + params[WrapperProperties.DIALECT.name] = DialectCode.RDS_MYSQL + elif engine == DatabaseEngine.PG: + params[WrapperProperties.DIALECT.name] = DialectCode.RDS_PG + + if TestEnvironmentFeatures.IAM in test_env.get_features(): + params[WrapperProperties.PLUGINS.name] = "bg,iam" + params[WrapperProperties.USER.name] = test_env.get_info().get_iam_user_name() + params[WrapperProperties.IAM_REGION.name] = test_env.get_info().get_region() + else: + params[WrapperProperties.PLUGINS.name] = "bg" + + if engine == DatabaseEngine.MYSQL: + params["use_pure"] = False + + return params + + def get_wrapper_connection_with_retry(self, test_driver: TestDriver, **connect_params) -> AwsWrapperConnection: + conn = None + connect_count = 0 + target_driver_connect = DriverHelper.get_connect_func(test_driver) + while conn is None and connect_count < 10: + try: + conn = AwsWrapperConnection.connect(target_driver_connect, **connect_params) + except Exception: + # ignore, try to connect again + pass + + connect_count += 1 + + if conn is None: + pytest.fail(f"Cannot connect to {connect_params.get('host')}") + + return conn + + # Blue node + # Check: connectivity, SELECT sleep(5) + # Expect: long execution time (longer than 5s) during active phase of switchover + # Can terminate for itself + def wrapper_blue_executing_connectivity_monitor( + self, + test_driver: TestDriver, + conn_utils: ConnectionUtils, + host_id: str, + host: str, + port: int, + db: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults): + conn = None + query = None + test_env = TestEnvironment.get_current() + engine = test_env.get_engine() + if engine == DatabaseEngine.MYSQL: + query = "SELECT sleep(5)" + elif engine == DatabaseEngine.PG: + query = "SELECT pg_sleep(5)" + else: + pytest.fail(f"Unsupported database engine: {engine}") + + try: + target_driver_connect = DriverHelper.get_connect_func(test_driver) + connect_params = self.get_wrapper_connect_params(conn_utils, host, port, db) + conn = AwsWrapperConnection.connect(target_driver_connect, **connect_params) + bg_plugin: Optional[BlueGreenPlugin] = conn._unwrap(BlueGreenPlugin) + assert bg_plugin is not None, f"Unable to find blue/green plugin in wrapper connection for {host}." + self.logger.debug(f"[WrapperBlueExecute @ {host_id}] Connection opened.") + + sleep(1) + + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + self.logger.debug(f"[WrapperBlueExecute @ {host_id}] Starting connectivity monitoring.") + + while not stop.is_set(): + start_time_ns = perf_counter_ns() + try: + with conn.cursor() as cursor: + cursor.execute(query) + cursor.fetchall() + end_time_ns = perf_counter_ns() + results.blue_wrapper_execute_times.append( + TimeHolder(start_time_ns, end_time_ns, bg_plugin.get_hold_time_ns())) + except Exception as e: + results.blue_wrapper_execute_times.append( + TimeHolder(start_time_ns, perf_counter_ns(), bg_plugin.get_hold_time_ns(), str(e))) + if self.is_closed(conn): + break + + sleep(1) + except Exception as e: + self.logger.debug(f"[WrapperBlueExecute @ {host_id}] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + self.close_connection(conn) + finish_latch.count_down() + self.logger.debug(f"[WrapperBlueExecute @ {host_id}] Thread is completed.") + + # Blue node + # Check: connectivity, opening a new connection + # Expect: longer opening connection time during active phase of switchover + # Need a stop signal to terminate + def wrapper_blue_new_connection_monitor( + self, + test_driver: TestDriver, + conn_utils: ConnectionUtils, + host_id: str, + host: str, + port: int, + db: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults): + conn = None + try: + target_driver_connect = DriverHelper.get_connect_func(test_driver) + connect_params = self.get_wrapper_connect_params(conn_utils, host, port, db) + + sleep(1) + + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + self.logger.debug(f"[WrapperBlueNewConnection @ {host_id}] Starting connectivity monitoring.") + + while not stop.is_set(): + start_time_ns = perf_counter_ns() + + try: + conn = AwsWrapperConnection.connect(target_driver_connect, **connect_params) + end_time_ns = perf_counter_ns() + bg_plugin: Optional[BlueGreenPlugin] = conn._unwrap(BlueGreenPlugin) + assert bg_plugin is not None, f"Unable to find blue/green plugin in wrapper connection for {host}." + + results.blue_wrapper_connect_times.append( + TimeHolder(start_time_ns, end_time_ns, bg_plugin.get_hold_time_ns())) + except Exception as e: + if self.is_timeout_exception(e): + self.logger.debug(f"[WrapperBlueNewConnection @ {host_id}] Thread timeout exception: {e}") + else: + self.logger.debug(f"[WrapperBlueNewConnection @ {host_id}] Thread exception: {e}") + + end_time_ns = perf_counter_ns() + if conn is not None: + bg_plugin = conn._unwrap(BlueGreenPlugin) + assert bg_plugin is not None, f"Unable to find blue/green plugin in wrapper connection for {host}." + results.blue_wrapper_connect_times.append( + TimeHolder(start_time_ns, end_time_ns, bg_plugin.get_hold_time_ns(), str(e))) + else: + results.blue_wrapper_connect_times.append( + TimeHolder(start_time_ns, end_time_ns, error=str(e))) + + self.close_connection(conn) + conn = None + sleep(1) + + except Exception as e: + self.logger.debug(f"[WrapperBlueNewConnection @ {host_id}] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + self.close_connection(conn) + finish_latch.count_down() + self.logger.debug(f"[WrapperBlueNewConnection @ {host_id}] Thread is completed.") + + def is_timeout_exception(self, exception: Exception) -> bool: + error_message = str(exception).lower() + timeout_keywords = [ + "timeout", "timed out", "statement timeout", + "query execution was interrupted", "canceling statement due to", + "connection timed out", "lost connection", "terminated" + ] + + # Check for timeout keywords in message + if any(keyword in error_message for keyword in timeout_keywords): + return True + + # MySQL-specific checks + if isinstance(exception, mysql.connector.Error): + # MySQL timeout error codes + timeout_error_codes = [1205, 2013, 2006] # Lock timeout, lost connection, server gone away + if hasattr(exception, 'errno') and exception.errno in timeout_error_codes: + return True + + # PostgreSQL-specific checks + if isinstance(exception, psycopg.Error): + # PostgreSQL timeout usually contains specific text + if "canceling statement due to statement timeout" in error_message: + return True + + return False + + # Blue DNS + # Check time of IP address change + # Can terminate for itself + def blue_dns_monitor( + self, + host_id: str, + host: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults): + try: + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + + original_ip = socket.gethostbyname(host) + self.logger.debug(f"[BlueDNS @ {host_id}] {host} -> {original_ip}") + + while not stop.is_set(): + sleep(1) + + try: + current_ip = socket.gethostbyname(host) + if current_ip != original_ip: + results.dns_blue_changed_time_ns.set(perf_counter_ns()) + self.logger.debug(f"[BlueDNS @ {host_id}] {host} -> {current_ip}") + break + except socket.gaierror as e: + self.logger.debug(f"[BlueDNS @ {host_id}] Error: {e}") + results.dns_blue_error = str(e) + results.dns_blue_changed_time_ns.set(perf_counter_ns()) + break + + except Exception as e: + self.logger.debug(f"[BlueDNS @ {host_id}] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + finish_latch.count_down() + self.logger.debug(f"[BlueDNS @ {host_id}] Thread is completed.") + + # Green node + # Check: connectivity, SELECT 1 + # Expect: no interruption, execute takes longer time during BG switchover + # Can terminate for itself + def wrapper_green_connectivity_monitor( + self, + test_driver: TestDriver, + conn_utils: ConnectionUtils, + host_id: str, + host: str, + port: int, + db: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults): + conn = None + try: + connect_params = self.get_wrapper_connect_params(conn_utils, host, port, db) + conn = self.get_wrapper_connection_with_retry(test_driver, **connect_params) + self.logger.debug(f"[WrapperGreenConnectivity @ {host_id}] Connection opened.") + + bg_plugin: Optional[BlueGreenPlugin] = conn._unwrap(BlueGreenPlugin) + assert bg_plugin is not None, f"Unable to find blue/green plugin in wrapper connection for {host}." + + sleep(1) + + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + self.logger.debug(f"[WrapperGreenConnectivity @ {host_id}] Starting connectivity monitoring.") + + start_time_ns = perf_counter_ns() + while not stop.is_set(): + try: + with conn.cursor() as cursor: + start_time_ns = perf_counter_ns() + cursor.execute("SELECT 1") + cursor.fetchall() + end_time_ns = perf_counter_ns() + results.green_wrapper_execute_times.append( + TimeHolder(start_time_ns, end_time_ns, bg_plugin.get_hold_time_ns())) + sleep(1) + except Exception as e: + if self.is_timeout_exception(e): + self.logger.debug(f"[WrapperGreenConnectivity @ {host_id}] Thread timeout exception: {e}") + results.green_wrapper_execute_times.append( + TimeHolder(start_time_ns, perf_counter_ns(), bg_plugin.get_hold_time_ns(), str(e))) + if self.is_closed(conn): + results.wrapper_green_lost_connection_time_ns.set(perf_counter_ns()) + break + else: + self.logger.debug(f"[WrapperGreenConnectivity @ {host_id}] Thread exception: {e}") + results.wrapper_green_lost_connection_time_ns.set(perf_counter_ns()) + break + except Exception as e: + self.logger.debug(f"[WrapperGreenConnectivity @ {host_id}] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + self.close_connection(conn) + finish_latch.count_down() + self.logger.debug(f"[WrapperGreenConnectivity @ {host_id}] Thread is completed.") + + # Green node + # Check: DNS record presence + # Expect: DNS record is deleted during/after switchover + # Can terminate by itself + def green_dns_monitor( + self, + host_id: str, + host: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults): + try: + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + + ip = socket.gethostbyname(host) + self.logger.debug(f"[GreenDNS @ {host_id}] {host} -> {ip}") + + while not stop.is_set(): + sleep(1) + + try: + socket.gethostbyname(host) + except socket.gaierror: + results.dns_green_removed_time_ns.set(perf_counter_ns()) + break + + except Exception as e: + self.logger.debug(f"[GreenDNS @ {host_id}] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + finish_latch.count_down() + self.logger.debug(f"[GreenDNS @ {host_id}] Thread is completed.") + + # Green node + # Check: connectivity (opening a new connection) with IAM when using node IP address + # Expect: lose connectivity after green node changes its name (green prefix to no prefix) + # Can terminate for itself + def green_iam_connectivity_monitor( + self, + test_driver, + conn_utils: ConnectionUtils, + rds_client, + host_id: str, + thread_prefix: str, + iam_token_host: str, + connect_host: str, + port: int, + db: str, + start_latch: CountDownLatch, + stop: Event, + finish_latch: CountDownLatch, + results: BlueGreenResults, + result_queue: Deque[TimeHolder], + notify_on_first_error: bool, + exit_on_first_success: bool): + conn = None + try: + test_env = TestEnvironment.get_current() + iam_user = test_env.get_info().get_iam_user_name() + green_ip = socket.gethostbyname(connect_host) + connect_params = conn_utils.get_connect_params(host=green_ip, port=port, user=iam_user, dbname=db) + connect_params[WrapperProperties.CONNECT_TIMEOUT_SEC.name] = 10 + if test_env.get_engine() == DatabaseEngine.MYSQL: + # Required to connect with IAM using the regular mysql driver + connect_params["auth_plugin"] = "mysql_clear_password" + connect_params["use_pure"] = False + + sleep(1) + + # Notify that this thread is ready for work + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + self.logger.debug( + f"[DirectGreenIamIp{thread_prefix} @ {host_id}] Starting connectivity monitoring {iam_token_host}") + + while not stop.is_set(): + token = rds_client.generate_db_auth_token(DBHostname=iam_token_host, Port=port, DBUsername=iam_user) + connect_params[WrapperProperties.PASSWORD.name] = token + + start_ns = perf_counter_ns() + try: + target_driver_conn = DriverHelper.get_connect_func(test_driver) + conn = target_driver_conn(**connect_params) + end_ns = perf_counter_ns() + result_queue.append(TimeHolder(start_ns, end_ns)) + + if exit_on_first_success: + results.green_node_changed_name_time_ns.compare_and_set(0, perf_counter_ns()) + self.logger.debug( + f"[DirectGreenIamIp{thread_prefix} @ {host_id}] Successfully connected. Exiting thread...") + return + except Exception as e: + if self.is_timeout_exception(e): + self.logger.debug(f"[DirectGreenIamIp{thread_prefix} @ {host_id}] Thread exception: {e}") + result_queue.append(TimeHolder(start_ns, perf_counter_ns(), error=str(e))) + else: + self.logger.debug(f"[DirectGreenIamIp{thread_prefix} @ {host_id}] Thread exception: {e}") + result_queue.append(TimeHolder(start_ns, perf_counter_ns(), error=str(e))) + # TODO: is 'Access Denied' the error message in Python as well as JDBC? + if notify_on_first_error and "access denied" in str(e).lower(): + results.green_node_changed_name_time_ns.compare_and_set(0, perf_counter_ns()) + self.logger.debug( + f"[DirectGreenIamIp{thread_prefix} @ {host_id}] " + f"Encountered first 'Access denied' exception. Exiting thread...") + return + + self.close_connection(conn) + conn = None + sleep(1) + + except Exception as e: + self.logger.debug(f"[DirectGreenIamIp{thread_prefix} @ {host_id}] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + self.close_connection(conn) + finish_latch.count_down() + self.logger.debug(f"[DirectGreenIamIp{thread_prefix} @ {host_id}] Thread is completed.") + + # Trigger BG switchover using RDS API + # Can terminate for itself + def bg_switchover_trigger( + self, + test_utility: RdsTestUtility, + bg_id: str, + start_latch: CountDownLatch, + finish_latch: CountDownLatch, + results: Dict[str, BlueGreenResults]): + try: + start_latch.count_down() + + # Wait until other threads are ready to start the test + start_latch.wait_sec(5 * 60) + + sync_time_ns = perf_counter_ns() + for result in results.values(): + result.threads_sync_time.set(sync_time_ns) + + sleep(30) + test_utility.switchover_blue_green_deployment(bg_id) + + bg_trigger_time_ns = perf_counter_ns() + for result in results.values(): + result.bg_trigger_time_ns.set(bg_trigger_time_ns) + except Exception as e: + self.logger.debug(f"[Switchover] Thread unhandled exception: {e}") + self.unhandled_exceptions.append(e) + finally: + finish_latch.count_down() + self.logger.debug("[Switchover] Thread is completed.") + + def print_metrics(self, rds_utils: RdsUtils): + bg_trigger_time_ns = next((result.bg_trigger_time_ns.get() for result in self.results.values()), None) + assert bg_trigger_time_ns is not None, "Cannot get bg_trigger_time" + + table = [] + headers = [ + "Instance/endpoint", + "Start time", + "Threads sync", + "direct Blue conn dropped (idle)", + "direct Blue conn dropped (SELECT 1)", + "wrapper Blue conn dropped (idle)", + "wrapper Green conn dropped (SELECT 1)", + "Blue DNS updated", + "Green DNS removed", + "Green node certificate change" + ] + + def entry_green_comparator(result_entry: Tuple[str, BlueGreenResults]): + return 1 if rds_utils.is_green_instance(result_entry[0] + ".") else 0 + + def entry_name_comparator(result_entry: Tuple[str, BlueGreenResults]): + rds_utils.remove_green_instance_prefix(result_entry[0]).lower() + + sorted_entries: List[Tuple[str, BlueGreenResults]] = sorted( + self.results.items(), + key=lambda result_entry: ( + entry_green_comparator(result_entry), + entry_name_comparator(result_entry) + ) + ) + + if not sorted_entries: + table.append(["No entries"]) + + for entry in sorted_entries: + results = entry[1] + start_time_ms = (results.start_time_ns.get() - bg_trigger_time_ns) // 1_000_000 + threads_sync_time_ms = (results.threads_sync_time.get() - bg_trigger_time_ns) // 1_000_000 + direct_blue_idle_lost_connection_time_ms = ( + self.get_formatted_time_ns_to_ms(results.direct_blue_idle_lost_connection_time_ns, bg_trigger_time_ns)) + direct_blue_lost_connection_time_ms = ( + self.get_formatted_time_ns_to_ms(results.direct_blue_lost_connection_time_ns, bg_trigger_time_ns)) + wrapper_blue_idle_lost_connection_time_ms = ( + self.get_formatted_time_ns_to_ms(results.wrapper_blue_idle_lost_connection_time_ns, bg_trigger_time_ns)) + wrapper_green_lost_connection_time_ms = ( + self.get_formatted_time_ns_to_ms(results.wrapper_green_lost_connection_time_ns, bg_trigger_time_ns)) + dns_blue_changed_time_ms = ( + self.get_formatted_time_ns_to_ms(results.dns_blue_changed_time_ns, bg_trigger_time_ns)) + dns_green_removed_time_ms = ( + self.get_formatted_time_ns_to_ms(results.dns_green_removed_time_ns, bg_trigger_time_ns)) + green_node_changed_name_time_ms = ( + self.get_formatted_time_ns_to_ms(results.green_node_changed_name_time_ns, bg_trigger_time_ns)) + + table.append([ + entry[0], + start_time_ms, + threads_sync_time_ms, + direct_blue_idle_lost_connection_time_ms, + direct_blue_lost_connection_time_ms, + wrapper_blue_idle_lost_connection_time_ms, + wrapper_green_lost_connection_time_ms, + dns_blue_changed_time_ms, + dns_green_removed_time_ms, + green_node_changed_name_time_ms]) + + self.logger.debug(f"\n{tabulate(table, headers=headers)}") + + for entry in sorted_entries: + if not entry[1].blue_status_time and not entry[1].green_status_time: + continue + self.print_node_status_times(entry[0], entry[1], bg_trigger_time_ns) + + for entry in sorted_entries: + if not entry[1].blue_wrapper_connect_times: + continue + self.print_duration_times( + entry[0], "Wrapper connection time (ms) to Blue", + entry[1].blue_wrapper_connect_times, bg_trigger_time_ns) + + for entry in sorted_entries: + if not entry[1].green_direct_iam_ip_with_green_node_connect_times: + continue + self.print_duration_times( + entry[0], "Wrapper IAM (green token) connection time (ms) to Green", + entry[1].green_direct_iam_ip_with_green_node_connect_times, bg_trigger_time_ns) + + for entry in sorted_entries: + if not entry[1].blue_wrapper_execute_times: + continue + self.print_duration_times( + entry[0], "Wrapper execution time (ms) to Blue", + entry[1].blue_wrapper_execute_times, bg_trigger_time_ns) + + for entry in sorted_entries: + if not entry[1].green_wrapper_execute_times: + continue + self.print_duration_times( + entry[0], "Wrapper execution time (ms) to Green", + entry[1].green_wrapper_execute_times, bg_trigger_time_ns) + + def get_formatted_time_ns_to_ms(self, atomic_end_time_ns: AtomicInt, time_zero_ns: int) -> str: + return "-" if atomic_end_time_ns.get() == 0 else f"{(atomic_end_time_ns.get() - time_zero_ns) // 1_000_000} ms" + + def print_node_status_times(self, node: str, results: BlueGreenResults, time_zero_ns: int): + status_map: ConcurrentDict[str, int] = results.blue_status_time + status_map.put_all(results.green_status_time) + table = [] + headers = ["Status", "SOURCE", "TARGET"] + sorted_status_names = [k for k, v in sorted(status_map.items(), key=lambda x: x[1])] + for status in sorted_status_names: + blue_status_time_ns = results.blue_status_time.get(status) + if blue_status_time_ns: + source_time_ms_str = f"{(blue_status_time_ns - time_zero_ns) // 1_000_000} ms" + else: + source_time_ms_str = "" + + green_status_time_ns = results.green_status_time.get(status) + if green_status_time_ns: + target_time_ms_str = f"{(green_status_time_ns - time_zero_ns) // 1_000_000} ms" + else: + target_time_ms_str = "" + + table.append([status, source_time_ms_str, target_time_ms_str]) + + self.logger.debug(f"\n{node}:\n{tabulate(table, headers=headers)}") + + def print_duration_times(self, node: str, title: str, times: Deque[TimeHolder], time_zero_ns: int): + table = [] + headers = ["Connect at (ms)", "Connect time/duration (ms)", "Error"] + p99_ns = self.get_percentile([time.end_time_ns - time.start_time_ns for time in times], 99.0) + p99_ms = p99_ns // 1_000_000 + table.append(["p99", p99_ms, ""]) + first_connect = times[0] + table.append([ + (first_connect.start_time_ns - time_zero_ns) // 1_000_000, + (first_connect.end_time_ns - first_connect.start_time_ns) // 1_000_000, + self.get_formatted_error(first_connect.error) + ]) + + for time_holder in times: + duration_ms = (time_holder.end_time_ns - time_holder.start_time_ns) // 1_000_000 + if duration_ms > p99_ms: + table.append([ + (time_holder.start_time_ns - time_zero_ns) // 1_000_000, + (time_holder.end_time_ns - time_holder.start_time_ns) // 1_000_000, + self.get_formatted_error(time_holder.error) + ]) + + last_connect = times[-1] + table.append([ + (last_connect.start_time_ns - time_zero_ns) // 1_000_000, + (last_connect.end_time_ns - last_connect.start_time_ns) // 1_000_000, + self.get_formatted_error(last_connect.error) + ]) + + self.logger.debug(f"\n{node}: {title}\n{tabulate(table, headers=headers)}") + + def get_formatted_error(self, error: Optional[str]) -> str: + return "" if error is None else error[0:min(len(error), 100)].replace("\n", " ") + "..." + + def get_percentile(self, input_data: List[int], percentile: float): + if not input_data: + return 0 + + sorted_list = sorted(input_data) + rank = 1 if percentile == 0 else math.ceil(percentile / 100.0 * len(input_data)) + return sorted_list[rank - 1] + + def log_unhandled_exceptions(self): + for exception in self.unhandled_exceptions: + self.logger.debug(f"Unhandled exception: {exception}") + + def assert_test(self): + bg_trigger_time_ns = next((result.bg_trigger_time_ns.get() for result in self.results.values()), None) + assert bg_trigger_time_ns is not None, "Cannot get bg_trigger_time" + + max_green_node_changed_name_time_ms = max( + (0 if result.green_node_changed_name_time_ns.get() == 0 + else (result.green_node_changed_name_time_ns.get() - bg_trigger_time_ns) // 1_000_000 + for result in self.results.values()), + default=0 + ) + self.logger.debug(f"max_green_node_changed_name_time: {max_green_node_changed_name_time_ms} ms") + + switchover_complete_time_ms = max( + (0 if x == 0 + else (x - bg_trigger_time_ns) // 1_000_000 + for result in self.results.values() + if result.green_status_time + for x in [result.green_status_time.get("SWITCHOVER_COMPLETED", 0)]), + default=0 + ) + self.logger.debug(f"switchover_complete_time: {switchover_complete_time_ms} ms") + + # Assertions + assert switchover_complete_time_ms != 0, "BG switchover hasn't completed." + assert switchover_complete_time_ms >= max_green_node_changed_name_time_ms, \ + "Green node changed name after SWITCHOVER_COMPLETED." + + +@dataclass +class TimeHolder: + start_time_ns: int + end_time_ns: int + hold_ns: int = 0 + error: Optional[str] = None + + +@dataclass +class BlueGreenResults: + start_time_ns: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + threads_sync_time: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + bg_trigger_time_ns: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + direct_blue_lost_connection_time_ns: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + direct_blue_idle_lost_connection_time_ns: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + wrapper_blue_idle_lost_connection_time_ns: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + wrapper_green_lost_connection_time_ns: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + dns_blue_changed_time_ns: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + dns_blue_error: Optional[str] = None + dns_green_removed_time_ns: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + green_node_changed_name_time_ns: AtomicInt = field(default_factory=lambda: AtomicInt(0)) + blue_status_time: ConcurrentDict[str, int] = field(default_factory=ConcurrentDict) + green_status_time: ConcurrentDict[str, int] = field(default_factory=ConcurrentDict) + blue_wrapper_connect_times: Deque[TimeHolder] = field(default_factory=deque) + blue_wrapper_execute_times: Deque[TimeHolder] = field(default_factory=deque) + green_wrapper_execute_times: Deque[TimeHolder] = field(default_factory=deque) + green_direct_iam_ip_with_blue_node_connect_times: Deque[TimeHolder] = field(default_factory=deque) + green_direct_iam_ip_with_green_node_connect_times: Deque[TimeHolder] = field(default_factory=deque) diff --git a/tests/integration/container/test_custom_endpoint.py b/tests/integration/container/test_custom_endpoint.py index ee33bcfa..57162437 100644 --- a/tests/integration/container/test_custom_endpoint.py +++ b/tests/integration/container/test_custom_endpoint.py @@ -45,7 +45,9 @@ @enable_on_num_instances(min_instances=3) @enable_on_deployments([DatabaseEngineDeployment.AURORA]) -@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, TestEnvironmentFeatures.PERFORMANCE]) +@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, + TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT, + TestEnvironmentFeatures.PERFORMANCE]) class TestCustomEndpoint: logger: ClassVar[Logger] = Logger(__name__) endpoint_id: ClassVar[str] = f"test-endpoint-1-{uuid4()}" diff --git a/tests/integration/container/test_iam_authentication.py b/tests/integration/container/test_iam_authentication.py index 0e4e2e01..4bc6ee3d 100644 --- a/tests/integration/container/test_iam_authentication.py +++ b/tests/integration/container/test_iam_authentication.py @@ -39,7 +39,9 @@ @enable_on_features([TestEnvironmentFeatures.IAM]) -@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, TestEnvironmentFeatures.PERFORMANCE]) +@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, + TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT, + TestEnvironmentFeatures.PERFORMANCE]) class TestAwsIamAuthentication: @pytest.fixture(scope='class') diff --git a/tests/integration/container/test_read_write_splitting.py b/tests/integration/container/test_read_write_splitting.py index 86ad049a..de75badb 100644 --- a/tests/integration/container/test_read_write_splitting.py +++ b/tests/integration/container/test_read_write_splitting.py @@ -42,8 +42,12 @@ @enable_on_num_instances(min_instances=2) -@enable_on_deployments([DatabaseEngineDeployment.AURORA, DatabaseEngineDeployment.RDS_MULTI_AZ]) -@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, TestEnvironmentFeatures.PERFORMANCE]) +@enable_on_deployments([DatabaseEngineDeployment.AURORA, + DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER, + DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE]) +@disable_on_features([TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY, + TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT, + TestEnvironmentFeatures.PERFORMANCE]) class TestReadWriteSplitting: @pytest.fixture(scope='class') def rds_utils(self): diff --git a/tests/integration/container/utils/database_engine_deployment.py b/tests/integration/container/utils/database_engine_deployment.py index 3f542745..c58817a3 100644 --- a/tests/integration/container/utils/database_engine_deployment.py +++ b/tests/integration/container/utils/database_engine_deployment.py @@ -18,5 +18,6 @@ class DatabaseEngineDeployment(str, Enum): DOCKER = "DOCKER" RDS = "RDS" - RDS_MULTI_AZ = "RDS_MULTI_AZ" + RDS_MULTI_AZ_CLUSTER = "RDS_MULTI_AZ_CLUSTER" + RDS_MULTI_AZ_INSTANCE = "RDS_MULTI_AZ_INSTANCE" AURORA = "AURORA" diff --git a/tests/integration/container/utils/rds_test_utility.py b/tests/integration/container/utils/rds_test_utility.py index 27d48aab..16fa13fc 100644 --- a/tests/integration/container/utils/rds_test_utility.py +++ b/tests/integration/container/utils/rds_test_utility.py @@ -17,6 +17,8 @@ from contextlib import closing from typing import TYPE_CHECKING, Any, Dict, List, Optional, cast +import botocore.exceptions + if TYPE_CHECKING: from aws_advanced_python_wrapper.pep249 import Connection from .test_database_info import TestDatabaseInfo @@ -28,7 +30,6 @@ import boto3 import pytest -from botocore.config import Config from aws_advanced_python_wrapper.driver_info import DriverInfo from aws_advanced_python_wrapper.errors import UnsupportedOperationError @@ -48,8 +49,15 @@ class RdsTestUtility: _client: Any def __init__(self, region: str, endpoint: Optional[str] = None): - config = Config(region_name=region, endpoint_url=endpoint) if endpoint else Config(region_name=region) - self._client = boto3.client('rds', config=config) + if endpoint: + self._client = boto3.client(service_name='rds', region_name=region, endpoint_url=endpoint) + else: + self._client = boto3.client(service_name='rds', region_name=region) + + @staticmethod + def get_utility() -> RdsTestUtility: + test_info = TestEnvironment.get_current().get_info() + return RdsTestUtility(test_info.get_region(), test_info.get_rds_endpoint()) def get_db_instance(self, instance_id: str) -> Optional[Dict[str, Any]]: filters = [{'Name': "db-instance-id", 'Values': [f"{instance_id}"]}] @@ -60,6 +68,14 @@ def get_db_instance(self, instance_id: str) -> Optional[Dict[str, Any]]: return None return instances[0] + def get_rds_client(self): + test_info = TestEnvironment.get_current().get_info() + endpoint = test_info.get_rds_endpoint() + if endpoint: + return boto3.client(service_name='rds', region_name=test_info.get_region(), endpoint_url=endpoint) + else: + return boto3.client(service_name='rds', region_name=test_info.get_region()) + def does_db_instance_exist(self, instance_id: str) -> bool: try: instance = self.get_db_instance(instance_id) @@ -78,7 +94,7 @@ def create_db_instance(self, instance_id: str) -> TestInstanceInfo: self.delete_db_instance(instance_id) self._client.create_db_instance( - DBClusterIdentifier=environment.get_info().get_cluster_name(), + DBClusterIdentifier=environment.get_info().get_db_name(), DBInstanceIdentifier=instance_id, DBInstanceClass="db.r5.large", Engine=self.get_aurora_engine_name(environment.get_engine()), @@ -133,12 +149,12 @@ def failover_cluster_and_wait_until_writer_changed( cluster_id: Optional[str] = None, target_id: Optional[str] = None) -> None: deployment = TestEnvironment.get_current().get_deployment() - if DatabaseEngineDeployment.RDS_MULTI_AZ == deployment and target_id is not None: + if DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER == deployment and target_id is not None: raise Exception(Messages.get_formatted("RdsTestUtility.FailoverToTargetNotSupported", target_id, deployment)) start = perf_counter_ns() if cluster_id is None: - cluster_id = TestEnvironment.get_current().get_info().get_cluster_name() + cluster_id = TestEnvironment.get_current().get_info().get_db_name() if initial_writer_id is None: initial_writer_id = self.get_cluster_writer_instance_id(cluster_id) @@ -170,7 +186,7 @@ def failover_cluster_and_wait_until_writer_changed( def failover_cluster(self, cluster_id: Optional[str] = None, target_id: Optional[str] = None) -> None: if cluster_id is None: - cluster_id = TestEnvironment.get_current().get_info().get_cluster_name() + cluster_id = TestEnvironment.get_current().get_info().get_db_name() self.wait_until_cluster_has_desired_status(cluster_id, "available") @@ -227,7 +243,7 @@ def query_instance_id( if DatabaseEngineDeployment.AURORA == database_deployment: return self._query_aurora_instance_id(conn, database_engine) - elif DatabaseEngineDeployment.RDS_MULTI_AZ == database_deployment: + elif DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER == database_deployment: return self._query_multi_az_instance_id(conn, database_engine) else: raise RuntimeError(Messages.get_formatted( @@ -268,7 +284,7 @@ def _query_multi_az_instance_id(self, conn: Connection, engine: DatabaseEngine): def is_db_instance_writer(self, instance_id: str, cluster_id: Optional[str] = None) -> bool: if cluster_id is None: - cluster_id = TestEnvironment.get_current().get_info().get_cluster_name() + cluster_id = TestEnvironment.get_current().get_info().get_db_name() cluster_info = self.get_db_cluster(cluster_id) members = cluster_info.get("DBClusterMembers") for m in members: @@ -278,7 +294,7 @@ def is_db_instance_writer(self, instance_id: str, cluster_id: Optional[str] = No def get_cluster_writer_instance_id(self, cluster_id: Optional[str] = None) -> str: if cluster_id is None: - cluster_id = TestEnvironment.get_current().get_info().get_cluster_name() + cluster_id = TestEnvironment.get_current().get_info().get_db_name() cluster_info = self.get_db_cluster(cluster_id) members = cluster_info.get("DBClusterMembers") for m in members: @@ -286,23 +302,23 @@ def get_cluster_writer_instance_id(self, cluster_id: Optional[str] = None) -> st return cast('str', m.get("DBInstanceIdentifier")) raise Exception(Messages.get_formatted("RdsTestUtility.WriterInstanceNotFound", cluster_id)) - def get_instance_ids(self) -> List[str]: + def get_instance_ids(self, host: Optional[str] = None) -> List[str]: test_environment: TestEnvironment = TestEnvironment.get_current() deployment: DatabaseEngineDeployment = test_environment.get_deployment() if DatabaseEngineDeployment.AURORA == deployment: - return self._get_aurora_instance_ids() - elif DatabaseEngineDeployment.RDS_MULTI_AZ == deployment: - return self._get_multi_az_instance_ids() + return self._get_aurora_instance_ids(host) + elif DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER == deployment: + return self._get_multi_az_instance_ids(host) else: raise RuntimeError("RdsTestUtility.MethodNotSupportedForDeployment", "get_instance_ids", deployment) - def _get_aurora_instance_ids(self) -> List[str]: + def _get_aurora_instance_ids(self, host: Optional[str] = None) -> List[str]: test_environment: TestEnvironment = TestEnvironment.get_current() engine: DatabaseEngine = test_environment.get_engine() instance_info: TestInstanceInfo = test_environment.get_writer() sql = self._get_aurora_topology_sql(engine) - with self._open_connection(instance_info) as conn, conn.cursor() as cursor: + with self._open_connection(instance_info, host) as conn, conn.cursor() as cursor: cursor.execute(sql) records = cursor.fetchall() @@ -312,7 +328,7 @@ def _get_aurora_instance_ids(self) -> List[str]: return result - def _get_multi_az_instance_ids(self) -> List[str]: + def _get_multi_az_instance_ids(self, host: Optional[str] = None) -> List[str]: test_environment: TestEnvironment = TestEnvironment.get_current() engine: DatabaseEngine = test_environment.get_engine() cluster_endpoint_instance_info: TestInstanceInfo = TestInstanceInfo({ @@ -322,7 +338,7 @@ def _get_multi_az_instance_ids(self) -> List[str]: self.logger.debug("Testing._get_multi_az_instance_ids_connecting", cluster_endpoint_instance_info.get_host()) - conn = self._open_connection(cluster_endpoint_instance_info) + conn = self._open_connection(cluster_endpoint_instance_info, host) cursor = conn.cursor() get_writer_id_query = self._get_multi_az_writer_sql(engine) cursor.execute(get_writer_id_query) @@ -350,7 +366,7 @@ def _get_multi_az_instance_ids(self) -> List[str]: return result - def _open_connection(self, instance_info: TestInstanceInfo) -> Any: + def _open_connection(self, instance_info: TestInstanceInfo, host: Optional[str] = None) -> Any: env: TestEnvironment = TestEnvironment.get_current() database_engine: DatabaseEngine = env.get_engine() @@ -358,12 +374,12 @@ def _open_connection(self, instance_info: TestInstanceInfo) -> Any: target_driver_connect = DriverHelper.get_connect_func(test_driver) + host = host if host is not None else instance_info.get_host() user = env.get_database_info().get_username() password = env.get_database_info().get_password() db = env.get_database_info().get_default_db_name() - conn_params = DriverHelper.get_connect_params( - instance_info.get_host(), instance_info.get_port(), user, password, db, test_driver) + conn_params = DriverHelper.get_connect_params(host, instance_info.get_port(), user, password, db, test_driver) conn = target_driver_connect(**conn_params, connect_timeout=10) return conn @@ -449,3 +465,47 @@ def get_aurora_engine_name(engine: DatabaseEngine): return "aurora-mysql" raise RuntimeError(Messages.get_formatted("RdsTestUtility.InvalidDatabaseEngine", engine.value)) + + def get_cluster_by_arn(self, cluster_arn: str) -> Optional[Any]: + response = self._client.describe_db_clusters(Filters=[{'Name': 'db-cluster-id', 'Values': [cluster_arn]}]) + clusters = response["DBClusters"] + if len(clusters) < 1: + return None + + return clusters[0] + + def get_rds_instance_info_by_arn(self, instance_arn: str) -> Optional[Any]: + response = self._client.describe_db_instances(Filters=[{'Name': 'db-instance-id', 'Values': [instance_arn]}]) + instances = response["DBInstances"] + if len(instances) < 1: + return None + + return instances[0] + + def get_blue_green_deployment(self, bg_id: str) -> Optional[Any]: + try: + response: Any = self._client.describe_blue_green_deployments(BlueGreenDeploymentIdentifier=bg_id) + deployments = response.get("BlueGreenDeployments") + if len(deployments) < 1: + return None + + return deployments[0] + except self._client.exceptions.BlueGreenDeploymentNotFoundFault: + return None + + def switchover_blue_green_deployment(self, bg_id: str): + try: + self._client.switchover_blue_green_deployment(BlueGreenDeploymentIdentifier=bg_id) + self.logger.debug("switchover_blue_green_deployment request is sent.") + except botocore.exceptions.ClientError as e: + error_info = e.response['Error'] + self.logger.debug( + f"switchover_blue_green_deployment error: code={error_info['Code']}, message={error_info['Message']}") + + if error_info['Message']: + error_message = error_info['Message'] + else: + error_message = (f"The switchover_blue_green_deployment request for the blue/green deployment with " + f"ID '{bg_id}'failed for an unspecified reason") + + raise Exception(error_message) diff --git a/tests/integration/container/utils/test_environment.py b/tests/integration/container/utils/test_environment.py index c1c9d749..e547dba2 100644 --- a/tests/integration/container/utils/test_environment.py +++ b/tests/integration/container/utils/test_environment.py @@ -203,7 +203,7 @@ def get_writer(self) -> TestInstanceInfo: return self.get_instances()[0] def get_cluster_name(self) -> str: - return self.get_info().get_cluster_name() + return self.get_info().get_db_name() def get_proxy_database_info(self) -> TestProxyDatabaseInfo: return self.get_info().get_proxy_database_info() diff --git a/tests/integration/container/utils/test_environment_features.py b/tests/integration/container/utils/test_environment_features.py index dfbb7fd9..ec42d197 100644 --- a/tests/integration/container/utils/test_environment_features.py +++ b/tests/integration/container/utils/test_environment_features.py @@ -26,6 +26,7 @@ class TestEnvironmentFeatures(Enum): AWS_CREDENTIALS_ENABLED = "AWS_CREDENTIALS_ENABLED" PERFORMANCE = "PERFORMANCE" RUN_AUTOSCALING_TESTS_ONLY = "RUN_AUTOSCALING_TESTS_ONLY" + BLUE_GREEN_DEPLOYMENT = "BLUE_GREEN_DEPLOYMENT" SKIP_MYSQL_DRIVER_TESTS = "SKIP_MYSQL_DRIVER_TESTS" SKIP_PG_DRIVER_TESTS = "SKIP_PG_DRIVER_TESTS" TELEMETRY_TRACES_ENABLED = "TELEMETRY_TRACES_ENABLED" diff --git a/tests/integration/container/utils/test_environment_info.py b/tests/integration/container/utils/test_environment_info.py index d1f5966c..14afdbcb 100644 --- a/tests/integration/container/utils/test_environment_info.py +++ b/tests/integration/container/utils/test_environment_info.py @@ -30,8 +30,11 @@ class TestEnvironmentInfo: _aws_session_token: str _region: str _rds_endpoint: str - _cluster_name: str + _db_name: str _iam_user_name: str + _bg_deployment_id: str + _cluster_parameter_group: str + _random_base: str _database_info: TestDatabaseInfo _proxy_database_info: TestProxyDatabaseInfo _traces_telemetry_info: TestTelemetryInfo @@ -50,8 +53,11 @@ def __init__(self, test_info: Dict[str, Any]) -> None: self._aws_session_token = typing.cast('str', test_info.get("awsSessionToken")) self._region = typing.cast('str', test_info.get("region")) self._rds_endpoint = typing.cast('str', test_info.get("rdsEndpoint")) - self._cluster_name = typing.cast('str', test_info.get("clusterName")) + self._db_name = typing.cast('str', test_info.get("rdsDbName")) self._iam_user_name = typing.cast('str', test_info.get("iamUsername")) + self._bg_deployment_id = typing.cast('str', test_info.get("blueGreenDeploymentId")) + self._cluster_parameter_group = typing.cast('str', test_info.get("clusterParameterGroupName")) + self._random_base = typing.cast('str', test_info.get("randomBase")) database_info_dict: Dict[str, Any] = typing.cast('Dict[str, Any]', test_info.get("databaseInfo")) if database_info_dict is not None: @@ -95,12 +101,21 @@ def get_region(self) -> str: def get_rds_endpoint(self) -> str: return self._rds_endpoint - def get_cluster_name(self) -> str: - return self._cluster_name + def get_db_name(self) -> str: + return self._db_name def get_iam_user_name(self) -> str: return self._iam_user_name + def get_bg_deployment_id(self) -> str: + return self._bg_deployment_id + + def get_cluster_parameter_group(self) -> str: + return self._cluster_parameter_group + + def get_random_base(self) -> str: + return self._random_base + def get_traces_telemetry_info(self) -> TestTelemetryInfo: return self._traces_telemetry_info diff --git a/tests/integration/host/build.gradle.kts b/tests/integration/host/build.gradle.kts index 7351d814..b746eb64 100644 --- a/tests/integration/host/build.gradle.kts +++ b/tests/integration/host/build.gradle.kts @@ -69,7 +69,9 @@ tasks.register("test-python-3.11-mysql") { doFirst { systemProperty("exclude-performance", "true") systemProperty("exclude-python-38", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") } @@ -81,7 +83,9 @@ tasks.register("test-python-3.8-mysql") { doFirst { systemProperty("exclude-performance", "true") systemProperty("exclude-python-311", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") } @@ -93,7 +97,9 @@ tasks.register("test-python-3.11-pg") { doFirst { systemProperty("exclude-performance", "true") systemProperty("exclude-python-38", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") systemProperty("exclude-mariadb-driver", "true") @@ -107,7 +113,9 @@ tasks.register("test-python-3.8-pg") { doFirst { systemProperty("exclude-performance", "true") systemProperty("exclude-python-311", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") systemProperty("exclude-mariadb-driver", "true") @@ -120,7 +128,9 @@ tasks.register("test-docker") { filter.includeTestsMatching("integration.host.TestRunner.runTests") doFirst { systemProperty("exclude-aurora", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-python-38", "true") } @@ -131,7 +141,9 @@ tasks.register("test-aurora") { filter.includeTestsMatching("integration.host.TestRunner.runTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-python-38", "true") } @@ -142,7 +154,9 @@ tasks.register("test-pg-aurora") { filter.includeTestsMatching("integration.host.TestRunner.runTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") @@ -156,7 +170,9 @@ tasks.register("test-mysql-aurora") { filter.includeTestsMatching("integration.host.TestRunner.runTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") @@ -171,6 +187,7 @@ tasks.register("test-multi-az") { systemProperty("exclude-performance", "true") systemProperty("exclude-aurora", "true") systemProperty("exclude-python-38", "true") + systemProperty("exclude-bg", "true") } } @@ -185,6 +202,7 @@ tasks.register("test-pg-multi-az") { systemProperty("exclude-mysql-engine", "true") systemProperty("exclude-mariadb-driver", "true") systemProperty("exclude-mariadb-engine", "true") + systemProperty("exclude-bg", "true") } } @@ -197,6 +215,7 @@ tasks.register("test-mysql-multi-az") { systemProperty("exclude-aurora", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-bg", "true") } } @@ -209,6 +228,7 @@ tasks.register("test-autoscaling") { systemProperty("exclude-performance", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-bg", "true") } } @@ -217,7 +237,9 @@ tasks.register("test-pg-aurora-performance") { filter.includeTestsMatching("integration.host.TestRunner.runTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-iam", "true") systemProperty("exclude-secrets-manager", "true") systemProperty("exclude-mysql-driver", "true") @@ -232,7 +254,9 @@ tasks.register("test-mysql-aurora-performance") { filter.includeTestsMatching("integration.host.TestRunner.runTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-iam", "true") systemProperty("exclude-secrets-manager", "true") systemProperty("exclude-pg-driver", "true") @@ -240,6 +264,88 @@ tasks.register("test-mysql-aurora-performance") { } } +tasks.register("test-bgd-mysql-instance") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-pg-driver", "true") + systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-python-38", "true") + systemProperty("exclude-aurora", "true") + systemProperty("exclude-failover", "true") + systemProperty("exclude-secrets-manager", "true") + systemProperty("exclude-instances-2", "true") + systemProperty("exclude-instances-3", "true") + systemProperty("exclude-instances-5", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("test-bg-only", "true") + } +} + +tasks.register("test-bgd-mysql-aurora") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-pg-driver", "true") + systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-python-38", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-failover", "true") + systemProperty("exclude-secrets-manager", "true") + systemProperty("exclude-instances-1", "true") + systemProperty("exclude-instances-3", "true") + systemProperty("exclude-instances-5", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("test-bg-only", "true") + + } +} + +tasks.register("test-bgd-pg-instance") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-mysql-driver", "true") + systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-python-38", "true") + systemProperty("exclude-aurora", "true") + systemProperty("exclude-failover", "true") + systemProperty("exclude-secrets-manager", "true") + systemProperty("exclude-instances-2", "true") + systemProperty("exclude-instances-3", "true") + systemProperty("exclude-instances-5", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("test-bg-only", "true") + } +} + +tasks.register("test-bgd-pg-aurora") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-mysql-driver", "true") + systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-python-38", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-failover", "true") + systemProperty("exclude-secrets-manager", "true") + systemProperty("exclude-instances-1", "true") + systemProperty("exclude-instances-3", "true") + systemProperty("exclude-instances-5", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("test-bg-only", "true") + + } +} + // Debug tasks.register("debug-all-environments") { @@ -248,6 +354,7 @@ tasks.register("debug-all-environments") { doFirst { systemProperty("exclude-performance", "true") systemProperty("exclude-python-38", "true") + systemProperty("exclude-bg", "true") } } @@ -256,7 +363,9 @@ tasks.register("debug-docker") { filter.includeTestsMatching("integration.host.TestRunner.debugTests") doFirst { systemProperty("exclude-aurora", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-python-38", "true") } @@ -267,7 +376,9 @@ tasks.register("debug-aurora") { filter.includeTestsMatching("integration.host.TestRunner.debugTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-python-38", "true") } @@ -278,7 +389,9 @@ tasks.register("debug-pg-aurora") { filter.includeTestsMatching("integration.host.TestRunner.debugTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") @@ -290,7 +403,9 @@ tasks.register("debug-mysql-aurora") { filter.includeTestsMatching("integration.host.TestRunner.debugTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") @@ -303,7 +418,9 @@ tasks.register("debug-autoscaling") { doFirst { systemProperty("test-autoscaling", "true") systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") @@ -315,7 +432,9 @@ tasks.register("debug-pg-aurora-performance") { filter.includeTestsMatching("integration.host.TestRunner.debugTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-iam", "true") systemProperty("exclude-secrets-manager", "true") systemProperty("exclude-mysql-driver", "true") @@ -330,7 +449,9 @@ tasks.register("debug-mysql-aurora-performance") { filter.includeTestsMatching("integration.host.TestRunner.debugTests") doFirst { systemProperty("exclude-docker", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-iam", "true") systemProperty("exclude-secrets-manager", "true") systemProperty("exclude-pg-driver", "true") @@ -346,6 +467,7 @@ tasks.register("debug-multi-az") { systemProperty("exclude-aurora", "true") systemProperty("exclude-performance", "true") systemProperty("exclude-python-38", "true") + systemProperty("exclude-bg", "true") } } @@ -358,6 +480,7 @@ tasks.register("debug-pg-multi-az") { systemProperty("exclude-performance", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-bg", "true") } } @@ -370,5 +493,86 @@ tasks.register("debug-mysql-multi-az") { systemProperty("exclude-performance", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-bg", "true") + } +} + +tasks.register("debug-bgd-pg-aurora") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.debugTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-mysql-driver", "true") + systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-python-38", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-failover", "true") + systemProperty("exclude-secrets-manager", "true") + systemProperty("exclude-instances-1", "true") + systemProperty("exclude-instances-3", "true") + systemProperty("exclude-instances-5", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("test-bg-only", "true") + } +} + +tasks.register("debug-bgd-mysql-aurora") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.debugTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-pg-driver", "true") + systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-python-38", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-failover", "true") + systemProperty("exclude-secrets-manager", "true") + systemProperty("exclude-instances-1", "true") + systemProperty("exclude-instances-3", "true") + systemProperty("exclude-instances-5", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("test-bg-only", "true") + } +} + +tasks.register("debug-bgd-mysql-instance") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.debugTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-pg-driver", "true") + systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-python-38", "true") + systemProperty("exclude-aurora", "true") + systemProperty("exclude-failover", "true") + systemProperty("exclude-secrets-manager", "true") + systemProperty("exclude-instances-2", "true") + systemProperty("exclude-instances-3", "true") + systemProperty("exclude-instances-5", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("test-bg-only", "true") + } +} + +tasks.register("debug-bgd-pg-instance") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.debugTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-mysql-driver", "true") + systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-python-38", "true") + systemProperty("exclude-aurora", "true") + systemProperty("exclude-failover", "true") + systemProperty("exclude-secrets-manager", "true") + systemProperty("exclude-instances-2", "true") + systemProperty("exclude-instances-3", "true") + systemProperty("exclude-instances-5", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("test-bg-only", "true") } } diff --git a/tests/integration/host/src/test/java/integration/DatabaseEngineDeployment.java b/tests/integration/host/src/test/java/integration/DatabaseEngineDeployment.java index 0126e0f2..d7273e30 100644 --- a/tests/integration/host/src/test/java/integration/DatabaseEngineDeployment.java +++ b/tests/integration/host/src/test/java/integration/DatabaseEngineDeployment.java @@ -19,6 +19,7 @@ public enum DatabaseEngineDeployment { DOCKER, RDS, - RDS_MULTI_AZ, + RDS_MULTI_AZ_CLUSTER, + RDS_MULTI_AZ_INSTANCE, AURORA } diff --git a/tests/integration/host/src/test/java/integration/DriverHelper.java b/tests/integration/host/src/test/java/integration/DriverHelper.java index a06d0687..b4c4f679 100644 --- a/tests/integration/host/src/test/java/integration/DriverHelper.java +++ b/tests/integration/host/src/test/java/integration/DriverHelper.java @@ -16,18 +16,10 @@ package integration; -import com.mysql.cj.conf.PropertyKey; import java.sql.Connection; -import java.sql.Driver; import java.sql.DriverManager; import java.sql.SQLException; -import java.util.Collections; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.TimeUnit; -import java.util.logging.Level; import java.util.logging.Logger; -import org.postgresql.PGProperty; import org.testcontainers.shaded.org.apache.commons.lang3.NotImplementedException; public class DriverHelper { @@ -45,51 +37,6 @@ public static String getDriverProtocol(DatabaseEngine databaseEngine) { } } - public static Connection getDriverConnection(TestEnvironmentInfo info) throws SQLException { - final String url = - String.format( - "%s%s:%d/%s", - DriverHelper.getDriverProtocol(info.getRequest().getDatabaseEngine()), - info.getDatabaseInfo().getClusterEndpoint(), - info.getDatabaseInfo().getClusterEndpointPort(), - info.getDatabaseInfo().getDefaultDbName()); - return DriverManager.getConnection(url, info.getDatabaseInfo().getUsername(), info.getDatabaseInfo().getPassword()); - } - - public static String getDriverProtocol(DatabaseEngine databaseEngine, TestDriver testDriver) { - switch (testDriver) { - case MYSQL: - return "jdbc:mysql://"; - case PG: - return "jdbc:postgresql://"; - default: - throw new NotImplementedException(testDriver.toString()); - } - } - - public static void registerDriver(DatabaseEngine engine) { - try { - Class.forName(DriverHelper.getDriverClassname(engine)); - } catch (ClassNotFoundException e) { - throw new RuntimeException( - "Driver not found: " - + DriverHelper.getDriverClassname(engine), - e); - } - } - - public static String getWrapperDriverProtocol( - DatabaseEngine databaseEngine, TestDriver testDriver) { - switch (testDriver) { - case MYSQL: - return "jdbc:aws-wrapper:mysql://"; - case PG: - return "jdbc:aws-wrapper:postgresql://"; - default: - throw new NotImplementedException(testDriver.toString()); - } - } - public static String getDriverClassname(DatabaseEngine databaseEngine) { switch (databaseEngine) { case MYSQL: @@ -112,95 +59,40 @@ public static String getDriverClassname(TestDriver testDriver) { } } - public static String getHostnameSql(DatabaseEngine databaseEngine) { - switch (databaseEngine) { - case MYSQL: - return "SELECT @@hostname"; - case PG: - return "SELECT inet_server_addr()"; - default: - throw new NotImplementedException(databaseEngine.toString()); - } - } - - public static void setConnectTimeout( - TestDriver testDriver, Properties props, long timeout, TimeUnit timeUnit) { - switch (testDriver) { - case MYSQL: - props.setProperty( - PropertyKey.connectTimeout.getKeyName(), String.valueOf(timeUnit.toMillis(timeout))); - break; - case PG: - props.setProperty( - PGProperty.CONNECT_TIMEOUT.getName(), String.valueOf(timeUnit.toSeconds(timeout))); - break; - default: - throw new NotImplementedException(testDriver.toString()); - } - } - - public static void setSocketTimeout( - TestDriver testDriver, Properties props, long timeout, TimeUnit timeUnit) { - switch (testDriver) { - case MYSQL: - props.setProperty( - PropertyKey.socketTimeout.getKeyName(), String.valueOf(timeUnit.toMillis(timeout))); - break; - case PG: - props.setProperty( - PGProperty.SOCKET_TIMEOUT.getName(), String.valueOf(timeUnit.toSeconds(timeout))); - break; - default: - throw new NotImplementedException(testDriver.toString()); - } - } - - public static void setTcpKeepAlive(TestDriver testDriver, Properties props, boolean enabled) { - switch (testDriver) { - case MYSQL: - props.setProperty(PropertyKey.tcpKeepAlive.getKeyName(), String.valueOf(enabled)); - break; - case PG: - props.setProperty(PGProperty.TCP_KEEP_ALIVE.getName(), String.valueOf(enabled)); - break; - default: - throw new NotImplementedException(testDriver.toString()); - } - } - - public static void setMonitoringConnectTimeout( - TestDriver testDriver, Properties props, long timeout, TimeUnit timeUnit) { - switch (testDriver) { - case MYSQL: - props.setProperty( - "monitoring-" + PropertyKey.connectTimeout.getKeyName(), - String.valueOf(timeUnit.toMillis(timeout))); - break; - case PG: - props.setProperty( - "monitoring-" + PGProperty.CONNECT_TIMEOUT.getName(), - String.valueOf(timeUnit.toSeconds(timeout))); - break; - default: - throw new NotImplementedException(testDriver.toString()); + public static void registerDriver(DatabaseEngine engine) { + try { + Class.forName(DriverHelper.getDriverClassname(engine)); + } catch (ClassNotFoundException e) { + throw new RuntimeException( + "Driver not found: " + + DriverHelper.getDriverClassname(engine), + e); } } - public static void setMonitoringSocketTimeout( - TestDriver testDriver, Properties props, long timeout, TimeUnit timeUnit) { - switch (testDriver) { - case MYSQL: - props.setProperty( - "monitoring-" + PropertyKey.socketTimeout.getKeyName(), - String.valueOf(timeUnit.toMillis(timeout))); + public static Connection getDriverConnection(TestEnvironmentInfo info) throws SQLException { + String url; + switch (info.getRequest().getDatabaseEngineDeployment()) { + case AURORA: + case RDS_MULTI_AZ_CLUSTER: + url = String.format( + "%s%s:%d/%s", + DriverHelper.getDriverProtocol(info.getRequest().getDatabaseEngine()), + info.getDatabaseInfo().getClusterEndpoint(), + info.getDatabaseInfo().getClusterEndpointPort(), + info.getDatabaseInfo().getDefaultDbName()); break; - case PG: - props.setProperty( - "monitoring-" + PGProperty.SOCKET_TIMEOUT.getName(), - String.valueOf(timeUnit.toSeconds(timeout))); + case RDS_MULTI_AZ_INSTANCE: + url = String.format( + "%s%s:%d/%s", + DriverHelper.getDriverProtocol(info.getRequest().getDatabaseEngine()), + info.getDatabaseInfo().getInstances().get(0).getHost(), + info.getDatabaseInfo().getInstances().get(0).getPort(), + info.getDatabaseInfo().getDefaultDbName()); break; default: - throw new NotImplementedException(testDriver.toString()); + throw new UnsupportedOperationException(info.getRequest().getDatabaseEngineDeployment().toString()); } + return DriverManager.getConnection(url, info.getDatabaseInfo().getUsername(), info.getDatabaseInfo().getPassword()); } } diff --git a/tests/integration/host/src/test/java/integration/TestEnvironmentFeatures.java b/tests/integration/host/src/test/java/integration/TestEnvironmentFeatures.java index 6cf8514a..a80defb9 100644 --- a/tests/integration/host/src/test/java/integration/TestEnvironmentFeatures.java +++ b/tests/integration/host/src/test/java/integration/TestEnvironmentFeatures.java @@ -24,9 +24,10 @@ public enum TestEnvironmentFeatures { NETWORK_OUTAGES_ENABLED, AWS_CREDENTIALS_ENABLED, PERFORMANCE, - RUN_AUTOSCALING_TESTS_ONLY, SKIP_MYSQL_DRIVER_TESTS, SKIP_PG_DRIVER_TESTS, + RUN_AUTOSCALING_TESTS_ONLY, TELEMETRY_TRACES_ENABLED, - TELEMETRY_METRICS_ENABLED + TELEMETRY_METRICS_ENABLED, + BLUE_GREEN_DEPLOYMENT } diff --git a/tests/integration/host/src/test/java/integration/TestEnvironmentInfo.java b/tests/integration/host/src/test/java/integration/TestEnvironmentInfo.java index 81d05483..6d1a1ee1 100644 --- a/tests/integration/host/src/test/java/integration/TestEnvironmentInfo.java +++ b/tests/integration/host/src/test/java/integration/TestEnvironmentInfo.java @@ -26,7 +26,7 @@ public class TestEnvironmentInfo { private String region; private String rdsEndpoint; - private String clusterName; + private String rdsDbName; private String iamUsername; private TestDatabaseInfo databaseInfo; @@ -36,6 +36,13 @@ public class TestEnvironmentInfo { private TestTelemetryInfo tracesTelemetryInfo; private TestTelemetryInfo metricsTelemetryInfo; + private String blueGreenDeploymentId; + + private String clusterParameterGroupName = null; + + // Random alphanumeric combination that is used to form a test cluster name or an instance name. + private String randomBase = null; + public TestDatabaseInfo getDatabaseInfo() { return this.databaseInfo; } @@ -84,8 +91,8 @@ public String getRdsEndpoint() { return this.rdsEndpoint; } - public String getClusterName() { - return this.clusterName; + public String getRdsDbName() { + return this.rdsDbName; } public String getIamUsername() { @@ -104,8 +111,8 @@ public void setRdsEndpoint(String rdsEndpoint) { this.rdsEndpoint = rdsEndpoint; } - public void setClusterName(String clusterName) { - this.clusterName = clusterName; + public void setRdsDbName(String auroraClusterName) { + this.rdsDbName = auroraClusterName; } public void setDatabaseInfo(TestDatabaseInfo databaseInfo) { @@ -147,4 +154,28 @@ public void setAwsSessionToken(String awsSessionToken) { public void setIamUsername(String iamUsername) { this.iamUsername = iamUsername; } + + public String getBlueGreenDeploymentId() { + return this.blueGreenDeploymentId; + } + + public void setBlueGreenDeploymentId(final String blueGreenDeploymentId) { + this.blueGreenDeploymentId = blueGreenDeploymentId; + } + + public String getClusterParameterGroupName() { + return this.clusterParameterGroupName; + } + + public void setClusterParameterGroupName(String clusterParameterGroupName) { + this.clusterParameterGroupName = clusterParameterGroupName; + } + + public String getRandomBase() { + return this.randomBase; + } + + public void setRandomBase(String randomBase) { + this.randomBase = randomBase; + } } diff --git a/tests/integration/host/src/test/java/integration/TestInstanceInfo.java b/tests/integration/host/src/test/java/integration/TestInstanceInfo.java index 256006e0..250d1932 100644 --- a/tests/integration/host/src/test/java/integration/TestInstanceInfo.java +++ b/tests/integration/host/src/test/java/integration/TestInstanceInfo.java @@ -49,10 +49,6 @@ public int getPort() { } public String getUrl() { - String url = host + ":" + port; - if (!url.endsWith("/")) { - url += "/"; - } - return url; + return host + ":" + port + "/"; } } diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironment.java b/tests/integration/host/src/test/java/integration/host/TestEnvironment.java index 7cabd4b2..06a4fa30 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironment.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironment.java @@ -16,8 +16,6 @@ package integration.host; -import static org.junit.jupiter.api.Assertions.assertEquals; - import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import eu.rekawek.toxiproxy.ToxiproxyClient; @@ -34,24 +32,28 @@ import integration.host.TestEnvironmentProvider.EnvPreCreateInfo; import integration.util.AuroraTestUtility; import integration.util.ContainerHelper; +import integration.util.StringUtils; import java.io.IOException; -import java.net.URISyntaxException; import java.net.UnknownHostException; import java.sql.Connection; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.List; +import java.util.Random; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Logger; import org.testcontainers.containers.GenericContainer; import org.testcontainers.containers.Network; import org.testcontainers.containers.ToxiproxyContainer; import org.testcontainers.shaded.org.apache.commons.lang3.NotImplementedException; -import integration.util.StringUtils; +import software.amazon.awssdk.services.rds.model.BlueGreenDeployment; import software.amazon.awssdk.services.rds.model.DBCluster; +import software.amazon.awssdk.services.rds.model.DBInstance; public class TestEnvironment implements AutoCloseable { @@ -69,7 +71,9 @@ public class TestEnvironment implements AutoCloseable { protected static final int PROXY_PORT = 8666; private static final TestEnvironmentConfiguration config = new TestEnvironmentConfiguration(); - private static final boolean USE_OTLP_CONTAINER_FOR_TRACES = false; + private static final boolean USE_OTLP_CONTAINER_FOR_TRACES = true; + + private static final AtomicInteger ipAddressUsageRefCount = new AtomicInteger(0); private final TestEnvironmentInfo info = new TestEnvironmentInfo(); // only this info is passed to test container @@ -78,9 +82,9 @@ public class TestEnvironment implements AutoCloseable { // test container. private int numOfInstances; - private boolean reuseAuroraDbCluster; - private String auroraClusterName; // "cluster-mysql" - private String auroraClusterDomain; // "XYZ.us-west-2.rds.amazonaws.com" + private boolean reuseDb; + private String rdsDbName; // "cluster-mysql", "instance-name", "rds-multi-az-cluster-name" + private String rdsDbDomain; // "XYZ.us-west-2.rds.amazonaws.com" private String rdsEndpoint; // "https://rds-int.amazon.com" private String awsAccessKeyId; @@ -103,13 +107,15 @@ private TestEnvironment(TestEnvironmentRequest request) { this.info.setRequest(request); } - public static TestEnvironment build(TestEnvironmentRequest request) throws IOException, URISyntaxException { + public static TestEnvironment build(TestEnvironmentRequest request) throws IOException { + LOGGER.finest("Building test env: " + request.getEnvPreCreateIndex()); preCreateEnvironment(request.getEnvPreCreateIndex()); TestEnvironment env; - switch (request.getDatabaseEngineDeployment()) { + DatabaseEngineDeployment deployment = request.getDatabaseEngineDeployment(); + switch (deployment) { case DOCKER: env = new TestEnvironment(request); initDatabaseParams(env); @@ -124,11 +130,21 @@ public static TestEnvironment build(TestEnvironmentRequest request) throws IOExc TestEnvironmentFeatures.FAILOVER_SUPPORTED.toString()); } + if (request.getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT)) { + throw new UnsupportedOperationException( + TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT.toString()); + } + break; case AURORA: - case RDS_MULTI_AZ: + case RDS_MULTI_AZ_CLUSTER: + case RDS_MULTI_AZ_INSTANCE: + env = createAuroraOrMultiAzEnvironment(request); - authorizeIP(env); + + if (request.getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT)) { + createBlueGreenDeployment(env); + } break; @@ -156,7 +172,27 @@ public static TestEnvironment build(TestEnvironmentRequest request) throws IOExc return env; } - private static TestEnvironment createAuroraOrMultiAzEnvironment(TestEnvironmentRequest request) throws URISyntaxException { + private static void authorizeRunnerIpAddress(TestEnvironment env) { + DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); + if (deployment == DatabaseEngineDeployment.AURORA + || deployment == DatabaseEngineDeployment.RDS + || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE + || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { + // These environment require creating external database cluster that should be publicly available. + // Corresponding AWS Security Groups should be configured and the test task runner IP address + // should be whitelisted. + + if (env.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)) { + if (ipAddressUsageRefCount.incrementAndGet() == 1) { + authorizeIP(env); + } else { + LOGGER.finest("IP usage count: " + ipAddressUsageRefCount.get()); + } + } + } + } + + private static TestEnvironment createAuroraOrMultiAzEnvironment(TestEnvironmentRequest request) { EnvPreCreateInfo preCreateInfo = TestEnvironmentProvider.preCreateInfos.get(request.getEnvPreCreateIndex()); @@ -189,8 +225,16 @@ private static TestEnvironment createAuroraOrMultiAzEnvironment(TestEnvironmentR } if (result instanceof TestEnvironment) { TestEnvironment resultTestEnvironment = (TestEnvironment) result; - LOGGER.finer(() -> String.format("Use pre-created DB cluster: %s.cluster-%s", - resultTestEnvironment.auroraClusterName, resultTestEnvironment.auroraClusterDomain)); + final DatabaseEngineDeployment deployment = + resultTestEnvironment.info.getRequest().getDatabaseEngineDeployment(); + if (deployment == DatabaseEngineDeployment.AURORA + || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { + LOGGER.finer(() -> String.format("Use pre-created DB cluster: %s.cluster-%s", + resultTestEnvironment.rdsDbName, resultTestEnvironment.rdsDbDomain)); + } else { + LOGGER.finer(() -> String.format("Use pre-created DB : %s.%s", + resultTestEnvironment.rdsDbName, resultTestEnvironment.rdsDbDomain)); + } return resultTestEnvironment; } @@ -199,19 +243,148 @@ private static TestEnvironment createAuroraOrMultiAzEnvironment(TestEnvironmentR } else { TestEnvironment env = new TestEnvironment(request); + initRandomBase(env); initDatabaseParams(env); - createDbCluster(env); + initAwsCredentials(env); + + switch (request.getDatabaseEngineDeployment()) { + case RDS_MULTI_AZ_INSTANCE: + initEnv(env); + authorizeRunnerIpAddress(env); + createMultiAzInstance(env); + configureIamAccess(env); + break; + case RDS_MULTI_AZ_CLUSTER: + initEnv(env); + authorizeRunnerIpAddress(env); + createDbCluster(env); + configureIamAccess(env); + break; + case AURORA: + initEnv(env); + authorizeRunnerIpAddress(env); + + if (!env.reuseDb + && env.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT)) { + createCustomClusterParameterGroup(env); + } + createDbCluster(env); + configureIamAccess(env); + break; + default: + throw new NotImplementedException(request.getDatabaseEngineDeployment().toString()); + } + + return env; + } + + } + + private static void createBlueGreenDeployment(TestEnvironment env) { - if (request.getFeatures().contains(TestEnvironmentFeatures.IAM)) { - if (request.getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ) { - throw new RuntimeException("IAM isn't supported by " + DatabaseEngineDeployment.RDS_MULTI_AZ); + if (env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.AURORA) { + DBCluster clusterInfo = env.auroraUtil.getClusterInfo(env.rdsDbName); + if (env.reuseDb) { + BlueGreenDeployment bgDeployment = env.auroraUtil.getBlueGreenDeploymentBySource(clusterInfo.dbClusterArn()); + if (bgDeployment != null) { + env.info.setBlueGreenDeploymentId(bgDeployment.blueGreenDeploymentIdentifier()); + waitForBlueGreenClustersHaveRightState(env, bgDeployment); + return; } - configureIamAccess(env); } - return env; + // otherwise, create a new BG deployment + final String blueGreenId = env.auroraUtil.createBlueGreenDeployment( + env.rdsDbName, clusterInfo.dbClusterArn()); + env.info.setBlueGreenDeploymentId(blueGreenId); + + BlueGreenDeployment bgDeployment = env.auroraUtil.getBlueGreenDeployment(blueGreenId); + if (bgDeployment != null) { + waitForBlueGreenClustersHaveRightState(env, bgDeployment); + } + + } else if (env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE) { + DBInstance instanceInfo = env.auroraUtil.getRdsInstanceInfo(env.rdsDbName); + if (env.reuseDb) { + BlueGreenDeployment bgDeployment = env.auroraUtil.getBlueGreenDeploymentBySource(instanceInfo.dbInstanceArn()); + if (bgDeployment != null) { + env.info.setBlueGreenDeploymentId(bgDeployment.blueGreenDeploymentIdentifier()); + waitForBlueGreenInstancesHaveRightState(env, bgDeployment); + return; + } + } + + // otherwise, create a new BG deployment + final String blueGreenId = env.auroraUtil.createBlueGreenDeployment( + env.rdsDbName, instanceInfo.dbInstanceArn()); + env.info.setBlueGreenDeploymentId(blueGreenId); + + BlueGreenDeployment bgDeployment = env.auroraUtil.getBlueGreenDeployment(blueGreenId); + if (bgDeployment != null) { + waitForBlueGreenInstancesHaveRightState(env, bgDeployment); + } + + } else { + LOGGER.warning("BG Deployments are supported for RDS MultiAz Instances and Aurora clusters only." + + " Proceed without creating BG Deployment."); } + } + private static void waitForBlueGreenClustersHaveRightState(TestEnvironment env, BlueGreenDeployment bgDeployment) { + + DBCluster blueClusterInfo = env.auroraUtil.getClusterByArn(bgDeployment.source()); + if (blueClusterInfo != null) { + try { + env.auroraUtil.waitUntilClusterHasRightState(blueClusterInfo.dbClusterIdentifier()); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + } + + DBCluster greenClusterInfo = env.auroraUtil.getClusterByArn(bgDeployment.target()); + if (greenClusterInfo != null) { + try { + env.auroraUtil.waitUntilClusterHasRightState(greenClusterInfo.dbClusterIdentifier()); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + } + } + + private static void waitForBlueGreenInstancesHaveRightState(TestEnvironment env, BlueGreenDeployment bgDeployment) { + + DBInstance blueInstanceInfo = env.auroraUtil.getRdsInstanceInfoByArn(bgDeployment.source()); + if (blueInstanceInfo != null) { + try { + env.auroraUtil.waitUntilInstanceHasRightState( + blueInstanceInfo.dbInstanceIdentifier(), "available"); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + } + + DBInstance greenInstanceInfo = env.auroraUtil.getRdsInstanceInfoByArn(bgDeployment.target()); + if (greenInstanceInfo != null) { + try { + env.auroraUtil.waitUntilInstanceHasRightState( + greenInstanceInfo.dbInstanceIdentifier(), "available"); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + } + } + + private static void createCustomClusterParameterGroup(TestEnvironment env) { + String groupName = String.format("test-cpg-%s", env.info.getRandomBase()); + String engine = getDbEngine(env.info.getRequest()); + String engineVersion = getDbEngineVersion(engine, env); + env.auroraUtil.createCustomClusterParameterGroup( + groupName, engine, engineVersion, env.info.getRequest().getDatabaseEngine()); + env.info.setClusterParameterGroupName(groupName); } private static void createDatabaseContainers(TestEnvironment env) { @@ -284,7 +457,7 @@ private static void createDatabaseContainers(TestEnvironment env) { } } - private static void createDbCluster(TestEnvironment env) throws URISyntaxException { + private static void createDbCluster(TestEnvironment env) { switch (env.info.getRequest().getDatabaseInstances()) { case SINGLE_INSTANCE: @@ -296,11 +469,21 @@ private static void createDbCluster(TestEnvironment env) throws URISyntaxExcepti initAwsCredentials(env); env.numOfInstances = env.info.getRequest().getNumOfInstances(); - if (env.numOfInstances < 1 || env.numOfInstances > 15) { - LOGGER.warning( - env.numOfInstances + " instances were requested but the requested number must be " - + "between 1 and 15. 5 instances will be used as a default."); - env.numOfInstances = 5; + if (env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.AURORA) { + if (env.numOfInstances < 1 || env.numOfInstances > 15) { + LOGGER.warning( + env.numOfInstances + " instances were requested but the requested number must be " + + "between 1 and 15. 5 instances will be used as a default."); + env.numOfInstances = 5; + } + } + if (env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { + if (env.numOfInstances != 3) { + LOGGER.warning( + env.numOfInstances + " instances were requested but the requested number must be 3. " + + "3 instances will be used as a default."); + env.numOfInstances = 3; + } } createDbCluster(env, env.numOfInstances); @@ -310,18 +493,173 @@ private static void createDbCluster(TestEnvironment env) throws URISyntaxExcepti } } - private static void createDbCluster(TestEnvironment env, int numOfInstances) throws URISyntaxException { + private static void createDbCluster(TestEnvironment env, int numOfInstances) { + + if (env.reuseDb) { + if (StringUtils.isNullOrEmpty(env.rdsDbDomain)) { + throw new RuntimeException("Environment variable RDS_DB_DOMAIN is required."); + } + if (StringUtils.isNullOrEmpty(env.rdsDbName)) { + throw new RuntimeException("Environment variable RDS_DB_NAME is required."); + } + + if (!env.auroraUtil.doesClusterExist(env.rdsDbName)) { + throw new RuntimeException( + "It's requested to reuse existing DB cluster but it doesn't exist: " + + env.rdsDbName + + ".cluster-" + + env.rdsDbDomain); + } + LOGGER.finer( + "Reuse existing cluster " + env.rdsDbName + ".cluster-" + env.rdsDbDomain); + + DBCluster clusterInfo = env.auroraUtil.getClusterInfo(env.rdsDbName); + + DatabaseEngine existingClusterDatabaseEngine = env.auroraUtil.getClusterEngine(clusterInfo); + if (existingClusterDatabaseEngine != env.info.getRequest().getDatabaseEngine()) { + throw new RuntimeException( + "Existing cluster is " + + existingClusterDatabaseEngine + + " cluster. " + + env.info.getRequest().getDatabaseEngine() + + " is expected."); + } + + env.info.setDatabaseEngine(clusterInfo.engine()); + env.info.setDatabaseEngineVersion(clusterInfo.engineVersion()); + } else { + if (StringUtils.isNullOrEmpty(env.rdsDbName)) { + int remainingTries = 5; + boolean clusterExists = false; + while (remainingTries-- > 0) { + env.rdsDbName = getRandomName(env); + if (env.auroraUtil.doesClusterExist(env.rdsDbName)) { + clusterExists = true; + env.info.setRandomBase(null); + initRandomBase(env); + LOGGER.finest("Cluster " + env.rdsDbName + " already exists. Pick up another name."); + } else { + clusterExists = false; + LOGGER.finer("Cluster to create: " + env.rdsDbName); + break; + } + } + if (clusterExists) { + throw new RuntimeException("Can't pick up a cluster name."); + } + } + + try { + String engine = getDbEngine(env.info.getRequest()); + String engineVersion = getDbEngineVersion(engine, env); + if (StringUtils.isNullOrEmpty(engineVersion)) { + throw new RuntimeException("Failed to get engine version."); + } + String instanceClass = env.auroraUtil.getDbInstanceClass(env.info.getRequest()); + + LOGGER.finer("Using " + engine + " " + engineVersion); + + env.auroraUtil.createCluster( + env.info.getDatabaseInfo().getUsername(), + env.info.getDatabaseInfo().getPassword(), + env.info.getDatabaseInfo().getDefaultDbName(), + env.rdsDbName, + env.info.getRequest().getDatabaseEngineDeployment(), + env.info.getRegion(), + engine, + instanceClass, + engineVersion, + env.info.getClusterParameterGroupName(), + numOfInstances); + + List dbInstances = env.auroraUtil.getDBInstances(env.rdsDbName); + if (dbInstances.isEmpty()) { + throw new RuntimeException("Failed to get instance information for cluster " + env.rdsDbName); + } + + final String instanceEndpoint = dbInstances.get(0).endpoint().address(); + env.rdsDbDomain = instanceEndpoint.substring(instanceEndpoint.indexOf(".") + 1); + env.info.setDatabaseEngine(engine); + env.info.setDatabaseEngineVersion(engineVersion); + LOGGER.finer( + "Created a new cluster " + env.rdsDbName + ".cluster-" + env.rdsDbDomain); + } catch (Exception e) { + + LOGGER.finer("Error creating a cluster " + env.rdsDbName + ". " + e.getMessage()); + + // remove cluster and instances + LOGGER.finer("Deleting cluster " + env.rdsDbName); + env.auroraUtil.deleteCluster(env.rdsDbName, env.info.getRequest().getDatabaseEngineDeployment(), false); + LOGGER.finer("Deleted cluster " + env.rdsDbName); + + throw new RuntimeException(e); + } + } + + env.info.setRdsDbName(env.rdsDbName); + + int port = getPort(env.info.getRequest()); + + env.info + .getDatabaseInfo() + .setClusterEndpoint(env.rdsDbName + ".cluster-" + env.rdsDbDomain, port); + env.info + .getDatabaseInfo() + .setClusterReadOnlyEndpoint( + env.rdsDbName + ".cluster-ro-" + env.rdsDbDomain, port); + env.info.getDatabaseInfo().setInstanceEndpointSuffix(env.rdsDbDomain, port); + + List instances = env.auroraUtil.getTestInstancesInfo(env.rdsDbName); + env.info.getDatabaseInfo().getInstances().clear(); + env.info.getDatabaseInfo().getInstances().addAll(instances); + + // Make sure the cluster is available and accessible. + try { + env.auroraUtil.waitUntilClusterHasRightState(env.rdsDbName); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + + // Create an 'rds_tools' extension for RDS PG + final DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); + final DatabaseEngine engine = env.info.getRequest().getDatabaseEngine(); + if ((DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER.equals(deployment) + || DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE.equals(deployment)) + && DatabaseEngine.PG.equals(engine)) { + DriverHelper.registerDriver(engine); + + try (Connection conn = DriverHelper.getDriverConnection(env.info); + Statement stmt = conn.createStatement()) { + stmt.execute("CREATE EXTENSION IF NOT EXISTS rds_tools"); + } catch (SQLException e) { + throw new RuntimeException("An exception occurred while creating the rds_tools extension.", e); + } + } + } + private static void initEnv(TestEnvironment env) { env.info.setRegion( !StringUtils.isNullOrEmpty(config.rdsDbRegion) ? config.rdsDbRegion : "us-east-2"); - env.reuseAuroraDbCluster = config.reuseRdsCluster; - env.auroraClusterName = config.rdsClusterName; // "cluster-mysql" - env.auroraClusterDomain = config.rdsClusterDomain; // "XYZ.us-west-2.rds.amazonaws.com" - env.rdsEndpoint = config.rdsEndpoint; // "https://rds-int.amazon.com" - env.info.setRdsEndpoint(env.rdsEndpoint); + env.reuseDb = config.reuseRdsDb; + env.rdsDbName = config.rdsDbName; // "cluster-mysql" + env.rdsDbDomain = config.rdsDbDomain; // "XYZ.us-west-2.rds.amazonaws.com" + env.rdsEndpoint = config.rdsEndpoint; // "XYZ.us-west-2.rds.amazonaws.com" + env.info.setRdsEndpoint(env.rdsEndpoint); // "https://rds-int.amazon.com" + + env.auroraUtil = + new AuroraTestUtility( + env.info.getRegion(), + env.rdsEndpoint, + env.awsAccessKeyId, + env.awsSecretAccessKey, + env.awsSessionToken); + } + + private static void createMultiAzInstance(TestEnvironment env) { env.auroraUtil = new AuroraTestUtility( @@ -333,101 +671,99 @@ private static void createDbCluster(TestEnvironment env, int numOfInstances) thr ArrayList instances = new ArrayList<>(); - if (env.reuseAuroraDbCluster) { - if (StringUtils.isNullOrEmpty(env.auroraClusterDomain)) { - throw new RuntimeException("Environment variable AURORA_CLUSTER_DOMAIN is required."); + if (env.reuseDb) { + if (StringUtils.isNullOrEmpty(env.rdsDbDomain)) { + throw new RuntimeException("Environment variable RDS_DB_DOMAIN is required."); + } + if (StringUtils.isNullOrEmpty(env.rdsDbName)) { + throw new RuntimeException("Environment variable RDS_DB_NAME is required."); } - if (!env.auroraUtil.doesClusterExist(env.auroraClusterName)) { + if (!env.auroraUtil.doesInstanceExist(env.rdsDbName)) { throw new RuntimeException( - "It's requested to reuse existing DB cluster but it doesn't exist: " - + env.auroraClusterName + "It's requested to reuse existing RDS instance but it doesn't exist: " + + env.rdsDbName + "." - + env.auroraClusterDomain); + + env.rdsDbDomain); } LOGGER.finer( - "Reuse existing cluster " + env.auroraClusterName + ".cluster-" + env.auroraClusterDomain); + "Reuse existing RDS Instance " + env.rdsDbName + "." + env.rdsDbDomain); - DBCluster clusterInfo = env.auroraUtil.getClusterInfo(env.auroraClusterName); + DBInstance instanceInfo = env.auroraUtil.getRdsInstanceInfo(env.rdsDbName); - DatabaseEngine existingClusterDatabaseEngine = env.auroraUtil.getClusterEngine(clusterInfo); - if (existingClusterDatabaseEngine != env.info.getRequest().getDatabaseEngine()) { + DatabaseEngine existingRdsInstanceDatabaseEngine = env.auroraUtil.getRdsInstanceEngine(instanceInfo); + if (existingRdsInstanceDatabaseEngine != env.info.getRequest().getDatabaseEngine()) { throw new RuntimeException( - "Existing cluster is " - + existingClusterDatabaseEngine - + " cluster. " + "Existing RDS Instance is " + + existingRdsInstanceDatabaseEngine + + " instance. " + env.info.getRequest().getDatabaseEngine() + " is expected."); } - env.info.setDatabaseEngine(clusterInfo.engine()); - env.info.setDatabaseEngineVersion(clusterInfo.engineVersion()); - instances.addAll(env.auroraUtil.getClusterInstanceIds(env.auroraClusterName)); + env.info.setDatabaseEngine(instanceInfo.engine()); + env.info.setDatabaseEngineVersion(instanceInfo.engineVersion()); + instances.add(new TestInstanceInfo( + instanceInfo.dbInstanceIdentifier(), + instanceInfo.endpoint().address(), + instanceInfo.endpoint().port())); } else { - if (StringUtils.isNullOrEmpty(env.auroraClusterName)) { - env.auroraClusterName = getRandomName(env.info.getRequest()); - LOGGER.finer("Cluster to create: " + env.auroraClusterName); + if (StringUtils.isNullOrEmpty(env.rdsDbName)) { + env.rdsDbName = getRandomName(env); + LOGGER.finer("RDS Instance to create: " + env.rdsDbName); } try { String engine = getDbEngine(env.info.getRequest()); - String engineVersion = getDbEngineVersion(env); + String engineVersion = getDbEngineVersion(engine, env); if (StringUtils.isNullOrEmpty(engineVersion)) { throw new RuntimeException("Failed to get engine version."); } - String instanceClass = getDbInstanceClass(env.info.getRequest()); + String instanceClass = env.auroraUtil.getDbInstanceClass(env.info.getRequest()); + + LOGGER.finer("Using " + engine + " " + engineVersion); - env.auroraClusterDomain = - env.auroraUtil.createCluster( + env.rdsDbDomain = + env.auroraUtil.createMultiAzInstance( env.info.getDatabaseInfo().getUsername(), env.info.getDatabaseInfo().getPassword(), env.info.getDatabaseInfo().getDefaultDbName(), - env.auroraClusterName, + env.rdsDbName, env.info.getRequest().getDatabaseEngineDeployment(), engine, instanceClass, engineVersion, - numOfInstances, instances); + env.info.setDatabaseEngine(engine); env.info.setDatabaseEngineVersion(engineVersion); LOGGER.finer( - "Created a new cluster " + env.auroraClusterName + ".cluster-" + env.auroraClusterDomain); + "Created a new RDS Instance " + env.rdsDbName + "." + env.rdsDbDomain); } catch (Exception e) { - LOGGER.finer("Error creating a cluster " + env.auroraClusterName + ". " + e.getMessage()); + LOGGER.finer("Error creating a RDS Instance " + env.rdsDbName + ". " + e); - // remove cluster and instances - LOGGER.finer("Deleting cluster " + env.auroraClusterName); - env.auroraUtil.deleteCluster(env.auroraClusterName); - LOGGER.finer("Deleted cluster " + env.auroraClusterName); + // remove RDS instance + LOGGER.finer("Deleting RDS Instance " + env.rdsDbName); + env.auroraUtil.deleteMultiAzInstance(env.rdsDbName, false); + LOGGER.finer("Deleted RDS Instance " + env.rdsDbName); throw new RuntimeException(e); } } - env.info.setClusterName(env.auroraClusterName); - int port = getPort(env.info.getRequest()); - - env.info - .getDatabaseInfo() - .setClusterEndpoint(env.auroraClusterName + ".cluster-" + env.auroraClusterDomain, port); - env.info - .getDatabaseInfo() - .setClusterReadOnlyEndpoint( - env.auroraClusterName + ".cluster-ro-" + env.auroraClusterDomain, port); - env.info.getDatabaseInfo().setInstanceEndpointSuffix(env.auroraClusterDomain, port); + env.info.getDatabaseInfo().setInstanceEndpointSuffix(env.rdsDbDomain, port); env.info.getDatabaseInfo().getInstances().clear(); env.info.getDatabaseInfo().getInstances().addAll(instances); - authorizeIP(env); + final DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); + final DatabaseEngine engine = env.info.getRequest().getDatabaseEngine(); - DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); - DatabaseEngine engine = env.info.getRequest().getDatabaseEngine(); - if (DatabaseEngineDeployment.RDS_MULTI_AZ.equals(deployment) && DatabaseEngine.PG.equals(engine)) { + // Create 'rds_tools' extension for RDS Instance. + if (DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE.equals(deployment) && DatabaseEngine.PG.equals(engine)) { DriverHelper.registerDriver(engine); try (Connection conn = DriverHelper.getDriverConnection(env.info); @@ -442,29 +778,78 @@ private static void createDbCluster(TestEnvironment env, int numOfInstances) thr private static void authorizeIP(TestEnvironment env) { try { env.runnerIP = env.auroraUtil.getPublicIPAddress(); + LOGGER.finest("Test runner IP: " + env.runnerIP); } catch (UnknownHostException e) { throw new RuntimeException(e); } env.auroraUtil.ec2AuthorizeIP(env.runnerIP); + LOGGER.finest(String.format("Test runner IP %s authorized. Usage count: %d", + env.runnerIP, ipAddressUsageRefCount.get())); } - private static String getRandomName(TestEnvironmentRequest request) { - switch (request.getDatabaseEngine()) { + private static void deAuthorizeIP(TestEnvironment env) { + if (ipAddressUsageRefCount.decrementAndGet() == 0) { + if (env.runnerIP == null) { + try { + env.runnerIP = env.auroraUtil.getPublicIPAddress(); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } + + if (!env.reuseDb) { + env.auroraUtil.ec2DeauthorizesIP(env.runnerIP); + LOGGER.finest(String.format("Test runner IP %s de-authorized. Usage count: %d", + env.runnerIP, ipAddressUsageRefCount.get())); + } else { + LOGGER.finest("The IP address usage count hit 0, but the REUSE_RDS_DB was set to true, so IP " + + "de-authorization was skipped."); + } + } else { + LOGGER.finest("IP usage count: " + ipAddressUsageRefCount.get()); + } + } + + private static void initRandomBase(TestEnvironment env) { + String randomBase = env.info.getRandomBase(); + if (StringUtils.isNullOrEmpty(randomBase)) { + env.info.setRandomBase(generateRandom(10)); + } + } + + private static String getRandomName(TestEnvironment env) { + + switch (env.info.getRequest().getDatabaseEngine()) { case MYSQL: - return "test-mysql-" + System.nanoTime(); + return "test-mysql-" + env.info.getRandomBase(); case PG: - return "test-pg-" + System.nanoTime(); + return "test-pg-" + env.info.getRandomBase(); default: - return String.valueOf(System.nanoTime()); + return env.info.getRandomBase(); } } + private static String generateRandom(int length) { + String alphabet = "0123456789abcdefghijklmnopqrstuvwxyz"; + + int n = alphabet.length(); + StringBuilder result = new StringBuilder(); + Random r = new Random(); + + for (int i = 0; i < length; i++) { + result.append(alphabet.charAt(r.nextInt(n))); + } + + return result.toString(); + } + private static String getDbEngine(TestEnvironmentRequest request) { switch (request.getDatabaseEngineDeployment()) { case AURORA: return getAuroraDbEngine(request); case RDS: - case RDS_MULTI_AZ: + case RDS_MULTI_AZ_CLUSTER: + case RDS_MULTI_AZ_INSTANCE: return getRdsEngine(request); default: throw new NotImplementedException(request.getDatabaseEngineDeployment().toString()); @@ -493,49 +878,33 @@ private static String getRdsEngine(TestEnvironmentRequest request) { } } - private static String getDbEngineVersion(TestEnvironment env) { - final TestEnvironmentRequest request = env.info.getRequest(); - switch (request.getDatabaseEngineDeployment()) { - case AURORA: - return getAuroraDbEngineVersion(env); - case RDS: - case RDS_MULTI_AZ: - return getRdsEngineVersion(request); - default: - throw new NotImplementedException(request.getDatabaseEngineDeployment().toString()); - } - } - - private static String getAuroraDbEngineVersion(TestEnvironment env) { - String engineName; + private static String getDbEngineVersion(String engineName, TestEnvironment env) { String systemPropertyVersion; TestEnvironmentRequest request = env.info.getRequest(); switch (request.getDatabaseEngine()) { case MYSQL: - engineName = "aurora-mysql"; - systemPropertyVersion = config.auroraMySqlDbEngineVersion; + systemPropertyVersion = config.mysqlVersion; break; case PG: - engineName = "aurora-postgresql"; - systemPropertyVersion = config.auroraPgDbEngineVersion; + systemPropertyVersion = config.pgVersion; break; default: throw new NotImplementedException(request.getDatabaseEngine().toString()); } - return findAuroraDbEngineVersion(env, engineName, systemPropertyVersion); + return findEngineVersion(env, engineName, systemPropertyVersion); } - private static String findAuroraDbEngineVersion( - TestEnvironment env, - String engineName, - String systemPropertyVersion) { + private static String findEngineVersion( + TestEnvironment env, + String engineName, + String systemPropertyVersion) { if (StringUtils.isNullOrEmpty(systemPropertyVersion)) { - return env.auroraUtil.getLTSVersion(engineName); + return env.auroraUtil.getDefaultVersion(engineName); } switch (systemPropertyVersion.toLowerCase()) { - case "lts": - return env.auroraUtil.getLTSVersion(engineName); + case "default": + return env.auroraUtil.getDefaultVersion(engineName); case "latest": return env.auroraUtil.getLatestVersion(engineName); default: @@ -543,29 +912,6 @@ private static String findAuroraDbEngineVersion( } } - private static String getRdsEngineVersion(TestEnvironmentRequest request) { - switch (request.getDatabaseEngine()) { - case MYSQL: - return "8.0.33"; - case PG: - return "15.4"; - default: - throw new NotImplementedException(request.getDatabaseEngine().toString()); - } - } - - private static String getDbInstanceClass(TestEnvironmentRequest request) { - switch (request.getDatabaseEngineDeployment()) { - case AURORA: - return "db.r6g.large"; - case RDS: - case RDS_MULTI_AZ: - return "db.m5d.large"; - default: - throw new NotImplementedException(request.getDatabaseEngine().toString()); - } - } - private static int getPort(TestEnvironmentRequest request) { switch (request.getDatabaseEngine()) { case MYSQL: @@ -579,9 +925,10 @@ private static int getPort(TestEnvironmentRequest request) { private static void initDatabaseParams(TestEnvironment env) { final String dbName = - !StringUtils.isNullOrEmpty(config.dbName) - ? config.dbName - : "test_database"; + config.dbName == null + ? "test_database" + : config.dbName.trim(); + final String dbUsername = !StringUtils.isNullOrEmpty(config.dbUsername) ? config.dbUsername @@ -805,17 +1152,18 @@ private static String getContainerBaseImageName(TestEnvironmentRequest request) private static void configureIamAccess(TestEnvironment env) { - if (env.info.getRequest().getDatabaseEngineDeployment() != DatabaseEngineDeployment.AURORA) { - throw new UnsupportedOperationException( - env.info.getRequest().getDatabaseEngineDeployment().toString()); + if (!env.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.IAM)) { + return; } + final DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); + env.info.setIamUsername( !StringUtils.isNullOrEmpty(config.iamUser) ? config.iamUser : "jane_doe"); - if (!env.reuseAuroraDbCluster) { + if (!env.reuseDb) { try { Class.forName(DriverHelper.getDriverClassname(env.info.getRequest().getDatabaseEngine())); } catch (ClassNotFoundException e) { @@ -825,22 +1173,42 @@ private static void configureIamAccess(TestEnvironment env) { e); } - final String url = - String.format( + String url; + switch (deployment) { + case AURORA: + case RDS_MULTI_AZ_CLUSTER: + url = String.format( "%s%s:%d/%s", DriverHelper.getDriverProtocol(env.info.getRequest().getDatabaseEngine()), env.info.getDatabaseInfo().getClusterEndpoint(), env.info.getDatabaseInfo().getClusterEndpointPort(), env.info.getDatabaseInfo().getDefaultDbName()); + break; + case RDS_MULTI_AZ_INSTANCE: + url = String.format( + "%s%s:%d/%s", + DriverHelper.getDriverProtocol(env.info.getRequest().getDatabaseEngine()), + env.info.getDatabaseInfo().getInstances().get(0).getHost(), + env.info.getDatabaseInfo().getInstances().get(0).getPort(), + env.info.getDatabaseInfo().getDefaultDbName()); + break; + default: + throw new UnsupportedOperationException(deployment.toString()); + } try { + final boolean useRdsTools = env.info.getRequest().getFeatures() + .contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT) + && env.info.getRequest().getDatabaseEngine() == DatabaseEngine.PG + && env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE; env.auroraUtil.addAuroraAwsIamUser( env.info.getRequest().getDatabaseEngine(), url, env.info.getDatabaseInfo().getUsername(), env.info.getDatabaseInfo().getPassword(), env.info.getIamUsername(), - env.info.getDatabaseInfo().getDefaultDbName()); + env.info.getDatabaseInfo().getDefaultDbName(), + useRdsTools); } catch (SQLException e) { throw new RuntimeException("Error configuring IAM access.", e); @@ -882,21 +1250,14 @@ public void debugTests(String taskName) throws IOException, InterruptedException @Override public void close() throws Exception { - if (this.databaseContainers != null) { - for (GenericContainer container : this.databaseContainers) { - try { - container.stop(); - } catch (Exception ex) { - // ignore - } + for (GenericContainer container : this.databaseContainers) { + try { + container.stop(); + } catch (Exception ex) { + // ignore } - this.databaseContainers.clear(); - } - - if (this.testContainer != null) { - this.testContainer.stop(); - this.testContainer = null; } + this.databaseContainers.clear(); if (this.telemetryXRayContainer != null) { this.telemetryXRayContainer.stop(); @@ -908,6 +1269,11 @@ public void close() throws Exception { this.telemetryOtlpContainer = null; } + if (this.testContainer != null) { + this.testContainer.stop(); + this.testContainer = null; + } + if (this.proxyContainers != null) { for (ToxiproxyContainer proxyContainer : this.proxyContainers) { proxyContainer.stop(); @@ -917,25 +1283,155 @@ public void close() throws Exception { switch (this.info.getRequest().getDatabaseEngineDeployment()) { case AURORA: - case RDS_MULTI_AZ: - deleteDbCluster(); + if (this.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT) + && !StringUtils.isNullOrEmpty(this.info.getBlueGreenDeploymentId())) { + deleteBlueGreenDeployment(); + deleteDbCluster(true); + deleteCustomClusterParameterGroup(this.info.getClusterParameterGroupName()); + } else { + deleteDbCluster(false); + } + deAuthorizeIP(this); + break; + case RDS_MULTI_AZ_CLUSTER: + deleteDbCluster(false); + deAuthorizeIP(this); + break; + case RDS_MULTI_AZ_INSTANCE: + if (this.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT) + && !StringUtils.isNullOrEmpty(this.info.getBlueGreenDeploymentId())) { + deleteBlueGreenDeployment(); + } + deleteMultiAzInstance(); + deAuthorizeIP(this); break; case RDS: - throw new NotImplementedException(this.info.getRequest().getDatabaseEngineDeployment().toString()); - default: + // not in use at the moment + break; + case DOCKER: + // no external resources to dispose // do nothing + break; + default: + throw new NotImplementedException(this.info.getRequest().getDatabaseEngineDeployment().toString()); + } + } + + private void deleteDbCluster(boolean waitForCompletion) { + if (!this.reuseDb) { + LOGGER.finest("Deleting cluster " + this.rdsDbName + ".cluster-" + this.rdsDbDomain); + auroraUtil.deleteCluster( + this.rdsDbName, this.info.getRequest().getDatabaseEngineDeployment(), waitForCompletion); + LOGGER.finest("Deleted cluster " + this.rdsDbName + ".cluster-" + this.rdsDbDomain); } } - private void deleteDbCluster() { - if (!this.reuseAuroraDbCluster && !StringUtils.isNullOrEmpty(this.runnerIP)) { - auroraUtil.ec2DeauthorizesIP(runnerIP); + private void deleteMultiAzInstance() { + if (!this.reuseDb) { + LOGGER.finest("Deleting MultiAz Instance " + this.rdsDbName + "." + this.rdsDbDomain); + auroraUtil.deleteMultiAzInstance(this.rdsDbName, false); + LOGGER.finest("Deleted MultiAz Instance " + this.rdsDbName + "." + this.rdsDbDomain); } + } + + private void deleteBlueGreenDeployment() throws InterruptedException { + + BlueGreenDeployment blueGreenDeployment; + + switch (this.info.getRequest().getDatabaseEngineDeployment()) { + case AURORA: + if (this.reuseDb) { + break; + } - if (!this.reuseAuroraDbCluster) { - LOGGER.finest("Deleting cluster " + this.auroraClusterName + ".cluster-" + this.auroraClusterDomain); - auroraUtil.deleteCluster(this.auroraClusterName); - LOGGER.finest("Deleted cluster " + this.auroraClusterName + ".cluster-" + this.auroraClusterDomain); + blueGreenDeployment = auroraUtil.getBlueGreenDeployment(this.info.getBlueGreenDeploymentId()); + + if (blueGreenDeployment == null) { + return; + } + + auroraUtil.deleteBlueGreenDeployment(this.info.getBlueGreenDeploymentId(), true); + + // Remove extra DB cluster + + // For BGD in AVAILABLE status: source = blue, target = green + // For BGD in SWITCHOVER_COMPLETED: source = old1, target = blue + LOGGER.finest("BG source: " + blueGreenDeployment.source()); + LOGGER.finest("BG target: " + blueGreenDeployment.target()); + + if ("SWITCHOVER_COMPLETED".equals(blueGreenDeployment.status())) { + // Delete old1 cluster + DBCluster old1ClusterInfo = auroraUtil.getClusterByArn(blueGreenDeployment.source()); + if (old1ClusterInfo != null) { + auroraUtil.waitUntilClusterHasRightState(old1ClusterInfo.dbClusterIdentifier(), "available"); + LOGGER.finest("Deleting Aurora cluster " + old1ClusterInfo.dbClusterIdentifier()); + auroraUtil.deleteCluster( + old1ClusterInfo.dbClusterIdentifier(), + this.info.getRequest().getDatabaseEngineDeployment(), + true); + LOGGER.finest("Deleted Aurora cluster " + old1ClusterInfo.dbClusterIdentifier()); + } + } else { + // Delete green cluster + DBCluster greenClusterInfo = auroraUtil.getClusterByArn(blueGreenDeployment.target()); + if (greenClusterInfo != null) { + auroraUtil.promoteClusterToStandalone(blueGreenDeployment.target()); + LOGGER.finest("Deleting Aurora cluster " + greenClusterInfo.dbClusterIdentifier()); + auroraUtil.deleteCluster( + greenClusterInfo.dbClusterIdentifier(), + this.info.getRequest().getDatabaseEngineDeployment(), + true); + LOGGER.finest("Deleted Aurora cluster " + greenClusterInfo.dbClusterIdentifier()); + } + } + break; + case RDS_MULTI_AZ_INSTANCE: + if (this.reuseDb) { + break; + } + + blueGreenDeployment = auroraUtil.getBlueGreenDeployment(this.info.getBlueGreenDeploymentId()); + + if (blueGreenDeployment == null) { + return; + } + + auroraUtil.deleteBlueGreenDeployment(this.info.getBlueGreenDeploymentId(), true); + + // For BGD in AVAILABLE status: source = blue, target = green + // For BGD in SWITCHOVER_COMPLETED: source = old1, target = blue + LOGGER.finest("BG source: " + blueGreenDeployment.source()); + LOGGER.finest("BG target: " + blueGreenDeployment.target()); + + if ("SWITCHOVER_COMPLETED".equals(blueGreenDeployment.status())) { + // Delete old1 cluster + DBInstance old1InstanceInfo = auroraUtil.getRdsInstanceInfoByArn(blueGreenDeployment.source()); + if (old1InstanceInfo != null) { + LOGGER.finest("Deleting MultiAz Instance " + old1InstanceInfo.dbInstanceIdentifier()); + auroraUtil.deleteMultiAzInstance(old1InstanceInfo.dbInstanceIdentifier(), true); + LOGGER.finest("Deleted MultiAz Instance " + old1InstanceInfo.dbInstanceIdentifier()); + } + } else { + // Delete green cluster + DBInstance greenInstanceInfo = auroraUtil.getRdsInstanceInfoByArn(blueGreenDeployment.target()); + if (greenInstanceInfo != null) { + auroraUtil.promoteInstanceToStandalone(blueGreenDeployment.target()); + LOGGER.finest("Deleting MultiAz Instance " + greenInstanceInfo.dbInstanceIdentifier()); + auroraUtil.deleteMultiAzInstance(greenInstanceInfo.dbInstanceIdentifier(), true); + LOGGER.finest("Deleted MultiAz Instance " + greenInstanceInfo.dbInstanceIdentifier()); + } + } + break; + default: + throw new RuntimeException("Unsupported " + this.info.getRequest().getDatabaseEngineDeployment()); + } + } + + private void deleteCustomClusterParameterGroup(String groupName) { + try { + this.auroraUtil.deleteCustomClusterParameterGroup(groupName); + } catch (Exception ex) { + LOGGER.finest(String.format("Error deleting cluster parameter group %s. %s", groupName, ex)); } } @@ -951,8 +1447,8 @@ private static void preCreateEnvironment(int currentEnvIndex) { if (preCreateInfo.envPreCreateFuture == null && (preCreateInfo.request.getDatabaseEngineDeployment() == DatabaseEngineDeployment.AURORA - || preCreateInfo.request.getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS - || preCreateInfo.request.getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ)) { + || preCreateInfo.request.getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS + || preCreateInfo.request.getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER)) { // run environment creation in advance int finalIndex = index; @@ -964,13 +1460,35 @@ private static void preCreateEnvironment(int currentEnvIndex) { preCreateInfo.envPreCreateFuture = envPreCreateExecutor.submit(() -> { final long startTime = System.nanoTime(); try { + initRandomBase(env); initDatabaseParams(env); - createDbCluster(env); - if (env.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.IAM)) { - if (env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ) { - throw new RuntimeException("IAM isn't supported by " + DatabaseEngineDeployment.RDS_MULTI_AZ); - } - configureIamAccess(env); + initAwsCredentials(env); + + switch (env.info.getRequest().getDatabaseEngineDeployment()) { + case RDS_MULTI_AZ_INSTANCE: + initEnv(env); + authorizeRunnerIpAddress(env); + createMultiAzInstance(env); + configureIamAccess(env); + break; + case RDS_MULTI_AZ_CLUSTER: + initEnv(env); + authorizeRunnerIpAddress(env); + createDbCluster(env); + configureIamAccess(env); + break; + case AURORA: + initEnv(env); + authorizeRunnerIpAddress(env); + + if (env.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT)) { + createCustomClusterParameterGroup(env); + } + createDbCluster(env); + configureIamAccess(env); + break; + default: + throw new NotImplementedException(env.info.getRequest().getDatabaseEngineDeployment().toString()); } return env; diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfiguration.java b/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfiguration.java index 6789df0b..f36f8af8 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfiguration.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfiguration.java @@ -24,8 +24,10 @@ public class TestEnvironmentConfiguration { Boolean.parseBoolean(System.getProperty("exclude-docker", "false")); public boolean excludeAurora = Boolean.parseBoolean(System.getProperty("exclude-aurora", "false")); - public boolean excludeMultiAz = - Boolean.parseBoolean(System.getProperty("exclude-multi-az", "false")); + public boolean excludeMultiAzCluster = + Boolean.parseBoolean(System.getProperty("exclude-multi-az-cluster", "false")); + public boolean excludeMultiAzInstance = + Boolean.parseBoolean(System.getProperty("exclude-multi-az-instance", "false")); public boolean excludePerformance = Boolean.parseBoolean(System.getProperty("exclude-performance", "false")); public boolean excludeMysqlEngine = @@ -58,27 +60,34 @@ public class TestEnvironmentConfiguration { Boolean.parseBoolean(System.getProperty("exclude-traces-telemetry", "false")); public boolean excludeMetricsTelemetry = Boolean.parseBoolean(System.getProperty("exclude-metrics-telemetry", "false")); + public boolean excludeBlueGreen = + Boolean.parseBoolean(System.getProperty("exclude-bg", "true")); + public boolean testBlueGreenOnly = + Boolean.parseBoolean(System.getProperty("test-bg-only", "false")); public boolean excludePython38 = Boolean.parseBoolean(System.getProperty("exclude-python-38", "false")); public boolean excludePython311 = Boolean.parseBoolean(System.getProperty("exclude-python-311", "false")); - public String testFilter = System.getProperty("FILTER"); + public String testFilter = System.getenv("FILTER"); public String rdsDbRegion = System.getenv("RDS_DB_REGION"); - public boolean reuseRdsCluster = Boolean.parseBoolean(System.getenv("REUSE_RDS_CLUSTER")); - public String rdsClusterName = System.getenv("RDS_CLUSTER_NAME"); // "cluster-mysql" - public String rdsClusterDomain = - System.getenv("RDS_CLUSTER_DOMAIN"); // "XYZ.us-west-2.rds.amazonaws.com" + public boolean reuseRdsDb = Boolean.parseBoolean(System.getenv("REUSE_RDS_DB")); + public String rdsDbName = System.getenv("RDS_DB_NAME"); // "cluster-mysql", "instance-name", "cluster-multi-az-name" + public String rdsDbDomain = + System.getenv("RDS_DB_DOMAIN"); // "XYZ.us-west-2.rds.amazonaws.com" + public String rdsEndpoint = - System.getenv("RDS_ENDPOINT"); // "https://rds-int.amazon.com" + System.getenv("RDS_ENDPOINT"); // "https://rds-int.amazon.com" - // Expected values: "latest", "lts", or engine version, for example, "15.4" - // If left as empty, will use LTS version - public String auroraMySqlDbEngineVersion = System.getenv("AURORA_MYSQL_DB_ENGINE_VERSION"); - public String auroraPgDbEngineVersion = System.getenv("AURORA_PG_ENGINE_VERSION"); + // Expected values: "latest", "default", or engine version, for example, "15.4" + // If left as empty, will use default version + public String mysqlVersion = + System.getenv("MYSQL_VERSION"); + public String pgVersion = + System.getenv("PG_VERSION"); public String dbName = System.getenv("DB_DATABASE_NAME"); public String dbUsername = System.getenv("DB_USERNAME"); diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java b/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java index d5cd972d..15011003 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java @@ -65,7 +65,10 @@ public Stream provideTestTemplateInvocationContex // Not in use. continue; } - if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ && config.excludeMultiAz) { + if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER && config.excludeMultiAzCluster) { + continue; + } + if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE && config.excludeMultiAzInstance) { continue; } @@ -96,13 +99,20 @@ public Stream provideTestTemplateInvocationContex if (numOfInstances == 2 && config.excludeInstances2) { continue; } + if (numOfInstances == 3 && config.excludeInstances3) { + continue; + } if (numOfInstances == 5 && config.excludeInstances5) { continue; } - if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ && numOfInstances != 3) { + if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER && numOfInstances != 3) { // Multi-AZ clusters supports only 3 instances continue; } + if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE && numOfInstances != 1) { + // Multi-AZ Instances supports only 1 instance + continue; + } if (deployment == DatabaseEngineDeployment.AURORA && numOfInstances == 3) { // Aurora supports clusters with 3 instances but running such tests is similar // to running tests on 5-instance cluster. @@ -118,36 +128,55 @@ public Stream provideTestTemplateInvocationContex continue; } - resultContextList.add( - getEnvironment( - new TestEnvironmentRequest( - engine, - instances, - instances == DatabaseInstances.SINGLE_INSTANCE ? 1 : numOfInstances, - deployment, - targetPythonVersion, - TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, - engine == DatabaseEngine.PG ? TestEnvironmentFeatures.ABORT_CONNECTION_SUPPORTED : null, - deployment == DatabaseEngineDeployment.DOCKER - && config.excludeTracesTelemetry - && config.excludeMetricsTelemetry - ? null - : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, - deployment == DatabaseEngineDeployment.DOCKER || config.excludeFailover - ? null - : TestEnvironmentFeatures.FAILOVER_SUPPORTED, - deployment == DatabaseEngineDeployment.DOCKER - || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ - || config.excludeIam - ? null - : TestEnvironmentFeatures.IAM, - config.excludeSecretsManager ? null : TestEnvironmentFeatures.SECRETS_MANAGER, - config.excludePerformance ? null : TestEnvironmentFeatures.PERFORMANCE, - config.excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, - config.excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - config.testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null, - config.excludeTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED, - config.excludeMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED))); + for (boolean withBlueGreenFeature : Arrays.asList(true, false)) { + if (!withBlueGreenFeature) { + if (config.testBlueGreenOnly) { + continue; + } + } + if (withBlueGreenFeature) { + if (config.excludeBlueGreen && !config.testBlueGreenOnly) { + continue; + } + // Run BlueGreen test only for MultiAz Instances with 1 node or for Aurora + if (deployment != DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE + && deployment != DatabaseEngineDeployment.AURORA) { + continue; + } + } + + resultContextList.add( + getEnvironment( + new TestEnvironmentRequest( + engine, + instances, + instances == DatabaseInstances.SINGLE_INSTANCE ? 1 : numOfInstances, + deployment, + targetPythonVersion, + TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, + engine == DatabaseEngine.PG ? TestEnvironmentFeatures.ABORT_CONNECTION_SUPPORTED : null, + deployment == DatabaseEngineDeployment.DOCKER + && config.excludeTracesTelemetry + && config.excludeMetricsTelemetry + ? null + : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, + deployment == DatabaseEngineDeployment.DOCKER || config.excludeFailover + ? null + : TestEnvironmentFeatures.FAILOVER_SUPPORTED, + deployment == DatabaseEngineDeployment.DOCKER + || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER + || config.excludeIam + ? null + : TestEnvironmentFeatures.IAM, + config.excludeSecretsManager ? null : TestEnvironmentFeatures.SECRETS_MANAGER, + config.excludePerformance ? null : TestEnvironmentFeatures.PERFORMANCE, + config.excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, + config.excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, + config.testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null, + config.excludeTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED, + config.excludeMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED, + withBlueGreenFeature ? TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT : null))); + } } } } diff --git a/tests/integration/host/src/test/java/integration/util/AuroraTestUtility.java b/tests/integration/host/src/test/java/integration/util/AuroraTestUtility.java index 919737b8..a04d44a9 100644 --- a/tests/integration/host/src/test/java/integration/util/AuroraTestUtility.java +++ b/tests/integration/host/src/test/java/integration/util/AuroraTestUtility.java @@ -16,10 +16,14 @@ package integration.util; +import static integration.DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE; +import static org.junit.jupiter.api.Assertions.fail; + import integration.DatabaseEngine; import integration.DatabaseEngineDeployment; +import integration.TestEnvironmentFeatures; +import integration.TestEnvironmentRequest; import integration.TestInstanceInfo; - import java.io.BufferedReader; import java.io.InputStreamReader; import java.net.URI; @@ -28,31 +32,45 @@ import java.net.UnknownHostException; import java.sql.Connection; import java.sql.DriverManager; -import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; import java.time.Duration; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; +import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Optional; -import java.util.Random; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import java.util.stream.Collectors; - +import org.checkerframework.checker.nullness.qual.Nullable; +import org.testcontainers.shaded.org.apache.commons.lang3.NotImplementedException; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; -import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.waiters.WaiterResponse; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.ec2.Ec2Client; import software.amazon.awssdk.services.ec2.model.DescribeSecurityGroupsResponse; import software.amazon.awssdk.services.ec2.model.Ec2Exception; +import software.amazon.awssdk.services.ec2.model.IpPermission; +import software.amazon.awssdk.services.ec2.model.IpRange; import software.amazon.awssdk.services.rds.RdsClient; import software.amazon.awssdk.services.rds.RdsClientBuilder; +import software.amazon.awssdk.services.rds.model.ApplyMethod; +import software.amazon.awssdk.services.rds.model.BlueGreenDeployment; +import software.amazon.awssdk.services.rds.model.BlueGreenDeploymentNotFoundException; +import software.amazon.awssdk.services.rds.model.CreateBlueGreenDeploymentRequest; +import software.amazon.awssdk.services.rds.model.CreateBlueGreenDeploymentResponse; +import software.amazon.awssdk.services.rds.model.CreateDbClusterParameterGroupRequest; +import software.amazon.awssdk.services.rds.model.CreateDbClusterParameterGroupResponse; import software.amazon.awssdk.services.rds.model.CreateDbClusterRequest; import software.amazon.awssdk.services.rds.model.CreateDbInstanceRequest; import software.amazon.awssdk.services.rds.model.DBCluster; @@ -60,90 +78,95 @@ import software.amazon.awssdk.services.rds.model.DBEngineVersion; import software.amazon.awssdk.services.rds.model.DBInstance; import software.amazon.awssdk.services.rds.model.DbClusterNotFoundException; +import software.amazon.awssdk.services.rds.model.DbInstanceNotFoundException; +import software.amazon.awssdk.services.rds.model.DeleteBlueGreenDeploymentRequest; +import software.amazon.awssdk.services.rds.model.DeleteBlueGreenDeploymentResponse; +import software.amazon.awssdk.services.rds.model.DeleteDbClusterParameterGroupRequest; import software.amazon.awssdk.services.rds.model.DeleteDbClusterResponse; import software.amazon.awssdk.services.rds.model.DeleteDbInstanceRequest; +import software.amazon.awssdk.services.rds.model.DeleteDbInstanceResponse; +import software.amazon.awssdk.services.rds.model.DescribeBlueGreenDeploymentsResponse; import software.amazon.awssdk.services.rds.model.DescribeDbClustersRequest; import software.amazon.awssdk.services.rds.model.DescribeDbClustersResponse; import software.amazon.awssdk.services.rds.model.DescribeDbEngineVersionsRequest; import software.amazon.awssdk.services.rds.model.DescribeDbEngineVersionsResponse; +import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest; import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse; import software.amazon.awssdk.services.rds.model.Filter; +import software.amazon.awssdk.services.rds.model.InvalidDbClusterStateException; +import software.amazon.awssdk.services.rds.model.InvalidDbInstanceStateException; +import software.amazon.awssdk.services.rds.model.ModifyDbClusterParameterGroupRequest; +import software.amazon.awssdk.services.rds.model.ModifyDbClusterParameterGroupResponse; +import software.amazon.awssdk.services.rds.model.Parameter; +import software.amazon.awssdk.services.rds.model.PromoteReadReplicaDbClusterRequest; +import software.amazon.awssdk.services.rds.model.PromoteReadReplicaDbClusterResponse; +import software.amazon.awssdk.services.rds.model.PromoteReadReplicaRequest; +import software.amazon.awssdk.services.rds.model.PromoteReadReplicaResponse; +import software.amazon.awssdk.services.rds.model.RdsException; import software.amazon.awssdk.services.rds.model.Tag; import software.amazon.awssdk.services.rds.waiters.RdsWaiter; /** - * Creates and destroys AWS RDS Clusters and Instances. To use this functionality the following environment variables + * Provides useful functions for RDS integration testing. To use this functionality the following environment variables * must be defined: - AWS_ACCESS_KEY_ID - AWS_SECRET_ACCESS_KEY */ public class AuroraTestUtility { private static final Logger LOGGER = Logger.getLogger(AuroraTestUtility.class.getName()); - - // Default values - private String dbUsername = "my_test_username"; - private String dbPassword = "my_test_password"; - private String dbName = "test"; - private String dbIdentifier = "test-identifier"; - private DatabaseEngineDeployment dbEngineDeployment; - private String dbEngine = "aurora-postgresql"; - private String dbEngineVersion = "13.9"; - private String dbInstanceClass = "db.r5.large"; - private final String storageType = "io1"; - private final int allocatedStorage = 100; - private final int iops = 1000; - private final Region dbRegion; - private final String dbSecGroup = "default"; - private int numOfInstances = 5; - private ArrayList instances = new ArrayList<>(); + private static final String DUPLICATE_IP_ERROR_CODE = "InvalidPermission.Duplicate"; + private static final String DEFAULT_SECURITY_GROUP = "default"; + private static final String DEFAULT_STORAGE_TYPE = "gp3"; + private static final int DEFAULT_IOPS = 64000; + private static final int DEFAULT_ALLOCATED_STORAGE = 400; + private static final int MULTI_AZ_SIZE = 3; private final RdsClient rdsClient; private final Ec2Client ec2Client; - private static final Random rand = new Random(); - - private static final String DUPLICATE_IP_ERROR_CODE = "InvalidPermission.Duplicate"; public AuroraTestUtility( - String region, String rdsEndpoint, String awsAccessKeyId, String awsSecretAccessKey, String awsSessionToken) - throws URISyntaxException { + String region, String rdsEndpoint, String awsAccessKeyId, String awsSecretAccessKey, String awsSessionToken) { this( - getRegionInternal(region), - rdsEndpoint, - StaticCredentialsProvider.create( - StringUtils.isNullOrEmpty(awsSessionToken) - ? AwsBasicCredentials.create(awsAccessKeyId, awsSecretAccessKey) - : AwsSessionCredentials.create(awsAccessKeyId, awsSecretAccessKey, awsSessionToken))); + getRegionInternal(region), + rdsEndpoint, + StaticCredentialsProvider.create( + StringUtils.isNullOrEmpty(awsSessionToken) + ? AwsBasicCredentials.create(awsAccessKeyId, awsSecretAccessKey) + : AwsSessionCredentials.create(awsAccessKeyId, awsSecretAccessKey, awsSessionToken))); } /** - * Initializes an AmazonRDS & AmazonEC2 client. + * Creates a TestUtility instance. As part of the creation, an RdsClient and Ec2Client are initialized. * - * @param region define AWS Regions, refer to + * @param region The AWS region for the cluster(s) the tests will be running against, refer to * Regions, - * Availability Zones, and Local Zones - * @param credentialsProvider Specific AWS credential provider + * href="https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts + * .RegionsAndAvailabilityZones.html"> + * Regions, Availability Zones, and Local Zones + * @param credentialsProvider The AWS credential provider to use to initialize the RdsClient and Ec2Client. */ - public AuroraTestUtility(Region region, String rdsEndpoint, AwsCredentialsProvider credentialsProvider) - throws URISyntaxException { - dbRegion = region; + public AuroraTestUtility(Region region, String rdsEndpoint, AwsCredentialsProvider credentialsProvider) { final RdsClientBuilder rdsClientBuilder = RdsClient.builder() - .region(dbRegion) - .credentialsProvider(credentialsProvider); + .region(region) + .credentialsProvider(credentialsProvider); if (!StringUtils.isNullOrEmpty(rdsEndpoint)) { - rdsClientBuilder.endpointOverride(new URI(rdsEndpoint)); + try { + rdsClientBuilder.endpointOverride(new URI(rdsEndpoint)); + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } } rdsClient = rdsClientBuilder.build(); ec2Client = Ec2Client.builder() - .region(dbRegion) - .credentialsProvider(credentialsProvider) - .build(); + .region(region) + .credentialsProvider(credentialsProvider) + .build(); } protected static Region getRegionInternal(String rdsRegion) { Optional regionOptional = - Region.regions().stream().filter(r -> r.id().equalsIgnoreCase(rdsRegion)).findFirst(); + Region.regions().stream().filter(r -> r.id().equalsIgnoreCase(rdsRegion)).findFirst(); if (regionOptional.isPresent()) { return regionOptional.get(); @@ -152,91 +175,186 @@ protected static Region getRegionInternal(String rdsRegion) { } /** - * Creates RDS Cluster/Instances and waits until they are up, and proper IP whitelisting for databases. + * Creates an RDS cluster based on the passed in details. After the cluster is created, this method will wait + * until it is available, adds the current IP address to the default security group, and create a database with the + * given name within the cluster. * - * @param username Master username for access to database - * @param password Master password for access to database - * @param dbName Database name - * @param identifier Database cluster identifier - * @param engine Database engine to use, refer to - * https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html - * @param instanceClass instance class, refer to - * https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html + * @param username the master username for access to the database + * @param password the master password for access to the database + * @param dbName the database to create within the cluster + * @param identifier the cluster identifier + * @param deployment the engine deployment to use + * @param region the region that the cluster should be created in + * @param engine the engine to use, refer to + * CreateDbClusterRequest.engine + * @param instanceClass the instance class, refer to + * Supported instance classes * @param version the database engine's version - * @return An endpoint for one of the instances + * @param numInstances the number of instances to create for the cluster * @throws InterruptedException when clusters have not started after 30 minutes */ - public String createCluster( + public void createCluster( String username, String password, String dbName, String identifier, DatabaseEngineDeployment deployment, + String region, String engine, String instanceClass, String version, - int numOfInstances, - ArrayList instances) + @Nullable String clusterParameterGroupName, + int numInstances) throws InterruptedException { - this.dbUsername = username; - this.dbPassword = password; - this.dbName = dbName; - this.dbIdentifier = identifier; - this.dbEngineDeployment = deployment; - this.dbEngine = engine; - this.dbInstanceClass = instanceClass; - this.dbEngineVersion = version; - this.numOfInstances = numOfInstances; - this.instances = instances; - - switch (this.dbEngineDeployment) { + + switch (deployment) { case AURORA: - return createAuroraCluster(); - case RDS_MULTI_AZ: - return createMultiAzCluster(); + createAuroraCluster( + username, password, dbName, identifier, region, engine, instanceClass, + version, clusterParameterGroupName, numInstances); + break; + case RDS_MULTI_AZ_CLUSTER: + if (numInstances != MULTI_AZ_SIZE) { + throw new RuntimeException( + "A multi-az cluster with " + numInstances + " instances was requested, but multi-az clusters must have " + + MULTI_AZ_SIZE + " instances."); + } + createMultiAzCluster( + username, password, dbName, identifier, region, engine, instanceClass, version); + break; default: - throw new UnsupportedOperationException(this.dbEngineDeployment.toString()); + throw new UnsupportedOperationException(deployment.toString()); + } + } + + public String createMultiAzInstance( + String username, + String password, + String dbName, + String identifier, + DatabaseEngineDeployment deployment, + String engine, + String instanceClass, + String version, + ArrayList instances) { + + if (deployment != RDS_MULTI_AZ_INSTANCE) { + throw new UnsupportedOperationException(deployment.toString()); + } + + rdsClient.createDBInstance(CreateDbInstanceRequest.builder() + .dbInstanceIdentifier(identifier) + .publiclyAccessible(true) + .dbName(dbName) + .masterUsername(username) + .masterUserPassword(password) + .enableIAMDatabaseAuthentication(true) + .multiAZ(true) + .engine(engine) + .engineVersion(version) + .dbInstanceClass(instanceClass) + .enablePerformanceInsights(false) + .backupRetentionPeriod(1) + .storageEncrypted(true) + .storageType(DEFAULT_STORAGE_TYPE) + .allocatedStorage(DEFAULT_ALLOCATED_STORAGE) + .iops(DEFAULT_IOPS) + .tags(this.getTag()) + .build()); + + // Wait for all instances to be up + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBInstanceAvailable( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-instance-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.maxAttempts(240).waitTimeout(Duration.ofMinutes(240))); + + if (waiterResponse.matched().exception().isPresent()) { + deleteMultiAzInstance(identifier, false); + throw new RuntimeException( + "Unable to start AWS RDS Instance after waiting for 240 minutes"); + } + + DescribeDbInstancesResponse dbInstancesResult = waiterResponse.matched().response().orElse(null); + if (dbInstancesResult == null) { + throw new RuntimeException("Unable to get instance details."); + } + + final String endpoint = dbInstancesResult.dbInstances().get(0).endpoint().address(); + final String rdsDomainPrefix = endpoint.substring(endpoint.indexOf('.') + 1); + + for (DBInstance instance : dbInstancesResult.dbInstances()) { + instances.add( + new TestInstanceInfo( + instance.dbInstanceIdentifier(), + instance.endpoint().address(), + instance.endpoint().port())); } + + return rdsDomainPrefix; } /** - * Creates RDS Cluster/Instances and waits until they are up, and proper IP whitelisting for databases. + * Creates an RDS Aurora cluster based on the passed in details. After the cluster is created, this method will wait + * until it is available, adds the current IP address to the default security group, and create a database with the + * given name within the cluster. * - * @return An endpoint for one of the instances + * @param username the master username for access to the database + * @param password the master password for access to the database + * @param dbName the database to create within the cluster + * @param identifier the cluster identifier + * @param region the region that the cluster should be created in + * @param engine the engine to use, refer to + * CreateDbClusterRequest.engine + * @param instanceClass the instance class, refer to + * Supported instance classes + * @param version the database engine's version + * @param numInstances the number of instances to create for the cluster * @throws InterruptedException when clusters have not started after 30 minutes */ - public String createAuroraCluster() throws InterruptedException { - // Create Cluster - final Tag testRunnerTag = Tag.builder().key("env").value("test-runner").build(); - + public void createAuroraCluster( + String username, + String password, + String dbName, + String identifier, + String region, + String engine, + String instanceClass, + String version, + @Nullable String clusterParameterGroupName, + int numInstances) + throws InterruptedException { final CreateDbClusterRequest dbClusterRequest = CreateDbClusterRequest.builder() - .dbClusterIdentifier(dbIdentifier) + .dbClusterIdentifier(identifier) .databaseName(dbName) - .masterUsername(dbUsername) - .masterUserPassword(dbPassword) - .sourceRegion(dbRegion.id()) + .masterUsername(username) + .masterUserPassword(password) + .sourceRegion(region) .enableIAMDatabaseAuthentication(true) - .engine(dbEngine) - .engineVersion(dbEngineVersion) + .engine(engine) + .engineVersion(version) .storageEncrypted(true) - .tags(testRunnerTag) + .tags(this.getTag()) + .dbClusterParameterGroupName(clusterParameterGroupName) .build(); rdsClient.createDBCluster(dbClusterRequest); // Create Instances - for (int i = 1; i <= numOfInstances; i++) { - final String instanceName = dbIdentifier + "-" + i; + for (int i = 1; i <= numInstances; i++) { + final String instanceName = identifier + "-" + i; rdsClient.createDBInstance( CreateDbInstanceRequest.builder() - .dbClusterIdentifier(dbIdentifier) + .dbClusterIdentifier(identifier) .dbInstanceIdentifier(instanceName) - .dbInstanceClass(dbInstanceClass) - .engine(dbEngine) - .engineVersion(dbEngineVersion) + .dbInstanceClass(instanceClass) + .engine(engine) + .engineVersion(version) .publiclyAccessible(true) - .tags(testRunnerTag) + .tags(this.getTag()) .build()); } @@ -246,109 +364,151 @@ public String createAuroraCluster() throws InterruptedException { waiter.waitUntilDBInstanceAvailable( (requestBuilder) -> requestBuilder.filters( - Filter.builder().name("db-cluster-id").values(dbIdentifier).build()), - (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(30))); + Filter.builder().name("db-cluster-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.maxAttempts(480).waitTimeout(Duration.ofMinutes(240))); if (waiterResponse.matched().exception().isPresent()) { - deleteCluster(); + deleteCluster(identifier, DatabaseEngineDeployment.AURORA, false); throw new InterruptedException( "Unable to start AWS RDS Cluster & Instances after waiting for 30 minutes"); } - - final DescribeDbInstancesResponse dbInstancesResult = - rdsClient.describeDBInstances( - (builder) -> - builder.filters( - Filter.builder().name("db-cluster-id").values(dbIdentifier).build())); - final String endpoint = dbInstancesResult.dbInstances().get(0).endpoint().address(); - final String clusterDomainPrefix = endpoint.substring(endpoint.indexOf('.') + 1); - - for (DBInstance instance : dbInstancesResult.dbInstances()) { - this.instances.add( - new TestInstanceInfo( - instance.dbInstanceIdentifier(), - instance.endpoint().address(), - instance.endpoint().port())); - } - - return clusterDomainPrefix; } /** - * Creates RDS Cluster/Instances and waits until they are up, and proper IP whitelisting for databases. + * Creates an RDS multi-az cluster based on the passed in details. After the cluster is created, this method will wait + * until it is available, adds the current IP address to the default security group, and create a database with the + * given name within the cluster. * - * @return An endpoint for one of the instances + * @param username the master username for access to the database + * @param password the master password for access to the database + * @param dbName the database to create within the cluster + * @param identifier the cluster identifier + * @param region the region that the cluster should be created in + * @param engine the engine to use, refer to + * CreateDbClusterRequest.engine + * @param instanceClass the instance class, refer to + * Supported instance classes + * @param version the database engine's version * @throws InterruptedException when clusters have not started after 30 minutes */ - public String createMultiAzCluster() throws InterruptedException { - // Create Cluster - final Tag testRunnerTag = Tag.builder().key("env").value("test-runner").build(); + public void createMultiAzCluster(String username, + String password, + String dbName, + String identifier, + String region, + String engine, + String instanceClass, + String version) + throws InterruptedException { CreateDbClusterRequest.Builder clusterBuilder = CreateDbClusterRequest.builder() - .dbClusterIdentifier(dbIdentifier) + .dbClusterIdentifier(identifier) + .publiclyAccessible(true) .databaseName(dbName) - .masterUsername(dbUsername) - .masterUserPassword(dbPassword) - .sourceRegion(dbRegion.id()) - .engine(dbEngine) - .engineVersion(dbEngineVersion) + .masterUsername(username) + .masterUserPassword(password) + .sourceRegion(region) + .engine(engine) + .engineVersion(version) + .enablePerformanceInsights(false) + .backupRetentionPeriod(1) .storageEncrypted(true) - .tags(testRunnerTag); - - clusterBuilder = - clusterBuilder.allocatedStorage(allocatedStorage) - .dbClusterInstanceClass(dbInstanceClass) - .storageType(storageType) - .iops(iops); + .tags(this.getTag()) + .allocatedStorage(DEFAULT_ALLOCATED_STORAGE) + .dbClusterInstanceClass(instanceClass) + .storageType(DEFAULT_STORAGE_TYPE) + .iops(DEFAULT_IOPS); rdsClient.createDBCluster(clusterBuilder.build()); - // For multi-AZ deployments, the cluster instances are created automatically. - - // Wait for all instances to be up + // For multi-AZ deployments, the cluster instances are created automatically. Wait for all instances to be up. final RdsWaiter waiter = rdsClient.waiter(); WaiterResponse waiterResponse = waiter.waitUntilDBInstanceAvailable( (requestBuilder) -> requestBuilder.filters( - Filter.builder().name("db-cluster-id").values(dbIdentifier).build()), + Filter.builder().name("db-cluster-id").values(identifier).build()), (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(30))); if (waiterResponse.matched().exception().isPresent()) { - deleteCluster(); + deleteCluster(identifier, DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER, false); throw new InterruptedException( "Unable to start AWS RDS Cluster & Instances after waiting for 30 minutes"); } + } + public List getDBInstances(String clusterId) { final DescribeDbInstancesResponse dbInstancesResult = rdsClient.describeDBInstances( (builder) -> - builder.filters( - Filter.builder().name("db-cluster-id").values(dbIdentifier).build())); - final String endpoint = dbInstancesResult.dbInstances().get(0).endpoint().address(); - final String clusterDomainPrefix = endpoint.substring(endpoint.indexOf('.') + 1); + builder.filters(Filter.builder().name("db-cluster-id").values(clusterId).build())); + return dbInstancesResult.dbInstances(); + } - for (DBInstance instance : dbInstancesResult.dbInstances()) { - this.instances.add( - new TestInstanceInfo( - instance.dbInstanceIdentifier(), - instance.endpoint().address(), - instance.endpoint().port())); + public void createCustomClusterParameterGroup( + String groupName, String engine, String engineVersion, DatabaseEngine databaseEngine) { + CreateDbClusterParameterGroupResponse response = rdsClient.createDBClusterParameterGroup( + CreateDbClusterParameterGroupRequest.builder() + .dbClusterParameterGroupName(groupName) + .description("Test custom cluster parameter group for BGD.") + .dbParameterGroupFamily(this.getAuroraParameterGroupFamily(engine, engineVersion)) + .build()); + + if (!response.sdkHttpResponse().isSuccessful()) { + throw new RuntimeException("Error creating custom cluster parameter group. " + response.sdkHttpResponse()); } - return clusterDomainPrefix; + ModifyDbClusterParameterGroupResponse response2; + switch (databaseEngine) { + case MYSQL: + response2 = rdsClient.modifyDBClusterParameterGroup( + ModifyDbClusterParameterGroupRequest.builder() + .dbClusterParameterGroupName(groupName) + .parameters(Parameter.builder() + .parameterName("binlog_format") + .parameterValue("ROW") + .applyMethod(ApplyMethod.PENDING_REBOOT) + .build()) + .build()); + break; + case PG: + response2 = rdsClient.modifyDBClusterParameterGroup( + ModifyDbClusterParameterGroupRequest.builder() + .dbClusterParameterGroupName(groupName) + .parameters(Parameter.builder() + .parameterName("rds.logical_replication") + .parameterValue("true") + .applyMethod(ApplyMethod.PENDING_REBOOT) + .build()) + .build()); + break; + default: + throw new UnsupportedOperationException(databaseEngine.toString()); + } + + if (!response2.sdkHttpResponse().isSuccessful()) { + throw new RuntimeException("Error updating parameter. " + response2.sdkHttpResponse()); + } + } + + public void deleteCustomClusterParameterGroup(String groupName) { + rdsClient.deleteDBClusterParameterGroup( + DeleteDbClusterParameterGroupRequest.builder() + .dbClusterParameterGroupName(groupName) + .build() + ); } /** - * Gets public IP. + * Gets the public IP address for the current machine. * - * @return public IP of user - * @throws UnknownHostException when checkip host isn't available + * @return the public IP address for the current machine + * @throws UnknownHostException when checkip.amazonaws.com isn't available */ public String getPublicIPAddress() throws UnknownHostException { String ip; try { - URL ipChecker = new URL("http://checkip.amazonaws.com"); + URL ipChecker = new URL("https://checkip.amazonaws.com"); BufferedReader reader = new BufferedReader(new InputStreamReader(ipChecker.openStream())); ip = reader.readLine(); } catch (Exception e) { @@ -358,7 +518,9 @@ public String getPublicIPAddress() throws UnknownHostException { } /** - * Authorizes IP to EC2 Security groups for RDS access. + * Adds the given IP address to the default security group for RDS access. + * + * @param ipAddress the IP address to add to the default security group */ public void ec2AuthorizeIP(String ipAddress) { if (StringUtils.isNullOrEmpty(ipAddress)) { @@ -370,14 +532,18 @@ public void ec2AuthorizeIP(String ipAddress) { } try { + IpRange ipRange = IpRange.builder() + .cidrIp(ipAddress + "/32") + .description("Test run at " + Instant.now()) + .build(); + IpPermission ipPermission = IpPermission.builder() + .ipRanges(ipRange) + .ipProtocol("-1") // All protocols + .fromPort(0) // For all ports + .toPort(65535) + .build(); ec2Client.authorizeSecurityGroupIngress( - (builder) -> - builder - .groupName(dbSecGroup) - .cidrIp(ipAddress + "/32") - .ipProtocol("-1") // All protocols - .fromPort(0) // For all ports - .toPort(65535)); + (builder) -> builder.groupName(DEFAULT_SECURITY_GROUP).ipPermissions(ipPermission)); } catch (Ec2Exception exception) { if (!DUPLICATE_IP_ERROR_CODE.equalsIgnoreCase(exception.awsErrorDetails().errorCode())) { throw exception; @@ -390,7 +556,7 @@ private boolean ipExists(String ipAddress) { ec2Client.describeSecurityGroups( (builder) -> builder - .groupNames(dbSecGroup) + .groupNames(DEFAULT_SECURITY_GROUP) .filters( software.amazon.awssdk.services.ec2.model.Filter.builder() .name("ip-permission.cidr") @@ -401,7 +567,9 @@ private boolean ipExists(String ipAddress) { } /** - * De-authorizes IP from EC2 Security groups. + * Removes the given IP address from the default security group. + * + * @param ipAddress the IP address to remove from the default security group. */ public void ec2DeauthorizesIP(String ipAddress) { if (StringUtils.isNullOrEmpty(ipAddress)) { @@ -411,7 +579,7 @@ public void ec2DeauthorizesIP(String ipAddress) { ec2Client.revokeSecurityGroupIngress( (builder) -> builder - .groupName(dbSecGroup) + .groupName(DEFAULT_SECURITY_GROUP) .cidrIp(ipAddress + "/32") .ipProtocol("-1") // All protocols .fromPort(0) // For all ports @@ -422,46 +590,49 @@ public void ec2DeauthorizesIP(String ipAddress) { } /** - * Destroys all instances and clusters. Removes IP from EC2 whitelist. + * Deletes the specified cluster and removes the current IP address from the default security group. * - * @param identifier database identifier to delete + * @param identifier the cluster identifier for the cluster to delete + * @param deployment the engine deployment for the cluster to delete + * @param waitForCompletion if true, wait for cluster completely deleted */ - public void deleteCluster(String identifier) { - dbIdentifier = identifier; - deleteCluster(); - } - - /** - * Destroys all instances and clusters. Removes IP from EC2 whitelist. - */ - public void deleteCluster() { - - switch (this.dbEngineDeployment) { + public void deleteCluster(String identifier, DatabaseEngineDeployment deployment, boolean waitForCompletion) { + switch (deployment) { case AURORA: - this.deleteAuroraCluster(); + this.deleteAuroraCluster(identifier, waitForCompletion); break; - case RDS_MULTI_AZ: - this.deleteMultiAzCluster(); + case RDS_MULTI_AZ_CLUSTER: + this.deleteMultiAzCluster(identifier, waitForCompletion); break; default: - throw new UnsupportedOperationException(this.dbEngineDeployment.toString()); + throw new UnsupportedOperationException(deployment.toString()); } } /** - * Destroys all instances and clusters. + * Deletes the specified Aurora cluster and removes the current IP address from the default security group. + * + * @param identifier the cluster identifier for the cluster to delete + * @param waitForCompletion if true, wait for cluster completely deleted */ - public void deleteAuroraCluster() { + public void deleteAuroraCluster(String identifier, boolean waitForCompletion) { + DBCluster dbCluster = getDBCluster(identifier); + if (dbCluster == null) { + return; + } + List members = dbCluster.dbClusterMembers(); + // Tear down instances - for (int i = 1; i <= numOfInstances; i++) { + for (DBClusterMember member : members) { try { rdsClient.deleteDBInstance( DeleteDbInstanceRequest.builder() - .dbInstanceIdentifier(dbIdentifier + "-" + i) + .dbInstanceIdentifier(member.dbInstanceIdentifier()) .skipFinalSnapshot(true) .build()); } catch (Exception ex) { - LOGGER.finest("Error deleting instance " + dbIdentifier + "-" + i + ". " + ex.getMessage()); + LOGGER.finest("Error deleting instance '" + + member.dbInstanceIdentifier() + "' of Aurora cluster: " + ex.getMessage()); // Ignore this error and continue with other instances } } @@ -471,7 +642,7 @@ public void deleteAuroraCluster() { while (--remainingAttempts > 0) { try { DeleteDbClusterResponse response = rdsClient.deleteDBCluster( - (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(dbIdentifier))); + (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(identifier))); if (response.sdkHttpResponse().isSuccessful()) { break; } @@ -479,23 +650,44 @@ public void deleteAuroraCluster() { } catch (DbClusterNotFoundException ex) { // ignore + return; + } catch (InvalidDbClusterStateException ex) { + throw new RuntimeException("Error deleting db cluster " + identifier, ex); } catch (Exception ex) { - LOGGER.warning("Error deleting db cluster " + dbIdentifier + ": " + ex); + LOGGER.warning("Error deleting db cluster " + identifier + ": " + ex); + return; + } + } + + if (waitForCompletion) { + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBClusterDeleted( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-cluster-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(60))); + + if (waiterResponse.matched().exception().isPresent()) { + throw new RuntimeException( + "Unable to delete AWS Aurora Cluster after waiting for 60 minutes"); } } } /** - * Destroys all instances and clusters. + * Deletes the specified multi-az cluster and removes the current IP address from the default security group. + * + * @param identifier the cluster identifier for the cluster to delete + * @param waitForCompletion if true, wait for cluster completely deleted */ - public void deleteMultiAzCluster() { - // deleteDBinstance requests are not necessary to delete a multi-az cluster. + public void deleteMultiAzCluster(String identifier, boolean waitForCompletion) { // Tear down cluster int remainingAttempts = 5; while (--remainingAttempts > 0) { try { DeleteDbClusterResponse response = rdsClient.deleteDBCluster( - (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(dbIdentifier))); + (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(identifier))); if (response.sdkHttpResponse().isSuccessful()) { break; } @@ -503,12 +695,114 @@ public void deleteMultiAzCluster() { } catch (DbClusterNotFoundException ex) { // ignore + return; + } catch (Exception ex) { + LOGGER.warning("Error deleting db cluster " + identifier + ": " + ex); + return; + } + } + + if (waitForCompletion) { + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBClusterDeleted( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-cluster-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(60))); + + if (waiterResponse.matched().exception().isPresent()) { + throw new RuntimeException( + "Unable to delete RDS MultiAz Cluster after waiting for 60 minutes"); + } + } + } + + public void deleteMultiAzInstance(final String identifier, boolean waitForCompletion) { + // Tear down MultiAz Instance + int remainingAttempts = 5; + while (--remainingAttempts > 0) { + try { + DeleteDbInstanceResponse response = rdsClient.deleteDBInstance( + builder -> builder.skipFinalSnapshot(true).dbInstanceIdentifier(identifier).build()); + if (response.sdkHttpResponse().isSuccessful()) { + break; + } + TimeUnit.SECONDS.sleep(30); + + } catch (InvalidDbInstanceStateException invalidDbInstanceStateException) { + // Instance is already being deleted. + // ignore it + LOGGER.finest("MultiAz Instance " + identifier + " is already being deleted. " + + invalidDbInstanceStateException); + break; + } catch (DbInstanceNotFoundException ex) { + // ignore + LOGGER.warning("Error deleting db MultiAz Instance " + identifier + ". Instance not found: " + ex); + break; } catch (Exception ex) { - LOGGER.warning("Error deleting db cluster " + dbIdentifier + ": " + ex); + LOGGER.warning("Error deleting db MultiAz Instance " + identifier + ": " + ex); + } + } + + if (waitForCompletion) { + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBInstanceDeleted( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-instance-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(60))); + + if (waiterResponse.matched().exception().isPresent()) { + throw new RuntimeException( + "Unable to delete RDS MultiAz Instance after waiting for 60 minutes"); } } } + public void promoteClusterToStandalone(String clusterArn) { + if (StringUtils.isNullOrEmpty(clusterArn)) { + return; + } + + DBCluster clusterInfo = getClusterByArn(clusterArn); + + if (clusterInfo == null || StringUtils.isNullOrEmpty(clusterInfo.replicationSourceIdentifier())) { + return; + } + + PromoteReadReplicaDbClusterResponse response = rdsClient.promoteReadReplicaDBCluster( + PromoteReadReplicaDbClusterRequest.builder().dbClusterIdentifier(clusterInfo.dbClusterIdentifier()).build()); + if (!response.sdkHttpResponse().isSuccessful()) { + LOGGER.warning("Error promoting DB cluster to standalone cluster: " + + response.sdkHttpResponse().statusCode() + + " " + + response.sdkHttpResponse().statusText().orElse("")); + } + } + + public void promoteInstanceToStandalone(String instanceArn) { + if (StringUtils.isNullOrEmpty(instanceArn)) { + return; + } + + DBInstance instanceInfo = getRdsInstanceInfoByArn(instanceArn); + + if (instanceInfo == null || StringUtils.isNullOrEmpty(instanceInfo.readReplicaSourceDBInstanceIdentifier())) { + return; + } + + PromoteReadReplicaResponse response = rdsClient.promoteReadReplica( + PromoteReadReplicaRequest.builder().dbInstanceIdentifier(instanceInfo.dbInstanceIdentifier()).build()); + if (!response.sdkHttpResponse().isSuccessful()) { + LOGGER.warning("Error promoting DB instance to standalone instance: " + + response.sdkHttpResponse().statusCode() + + " " + + response.sdkHttpResponse().statusText().orElse("")); + } + } + public boolean doesClusterExist(final String clusterId) { final DescribeDbClustersRequest request = DescribeDbClustersRequest.builder().dbClusterIdentifier(clusterId).build(); @@ -520,6 +814,17 @@ public boolean doesClusterExist(final String clusterId) { return true; } + public boolean doesInstanceExist(final String instanceId) { + final DescribeDbInstancesRequest request = + DescribeDbInstancesRequest.builder().dbInstanceIdentifier(instanceId).build(); + try { + DescribeDbInstancesResponse response = rdsClient.describeDBInstances(request); + return response.sdkHttpResponse().isSuccessful(); + } catch (DbInstanceNotFoundException ex) { + return false; + } + } + public DBCluster getClusterInfo(final String clusterId) { final DescribeDbClustersRequest request = DescribeDbClustersRequest.builder().dbClusterIdentifier(clusterId).build(); @@ -531,6 +836,43 @@ public DBCluster getClusterInfo(final String clusterId) { return response.dbClusters().get(0); } + public DBCluster getClusterByArn(final String clusterArn) { + final DescribeDbClustersRequest request = + DescribeDbClustersRequest.builder() + .filters(Filter.builder().name("db-cluster-id").values(clusterArn).build()) + .build(); + final DescribeDbClustersResponse response = rdsClient.describeDBClusters(request); + if (!response.hasDbClusters()) { + return null; + } + + return response.dbClusters().get(0); + } + + public DBInstance getRdsInstanceInfo(final String instanceId) { + final DescribeDbInstancesRequest request = + DescribeDbInstancesRequest.builder().dbInstanceIdentifier(instanceId).build(); + final DescribeDbInstancesResponse response = rdsClient.describeDBInstances(request); + if (!response.hasDbInstances()) { + throw new RuntimeException("RDS Instance " + instanceId + " not found."); + } + + return response.dbInstances().get(0); + } + + public DBInstance getRdsInstanceInfoByArn(final String instanceArn) { + final DescribeDbInstancesRequest request = + DescribeDbInstancesRequest.builder().filters( + Filter.builder().name("db-instance-id").values(instanceArn).build()) + .build(); + final DescribeDbInstancesResponse response = rdsClient.describeDBInstances(request); + if (!response.hasDbInstances()) { + return null; + } + + return response.dbInstances().get(0); + } + public DatabaseEngine getClusterEngine(final DBCluster cluster) { switch (cluster.engine()) { case "aurora-postgresql": @@ -544,91 +886,153 @@ public DatabaseEngine getClusterEngine(final DBCluster cluster) { } } - public List getClusterInstanceIds(final String clusterId) { - final DescribeDbInstancesResponse dbInstancesResult = - rdsClient.describeDBInstances( - (builder) -> - builder.filters(Filter.builder().name("db-cluster-id").values(clusterId).build())); + public String getDbInstanceClass(TestEnvironmentRequest request) { + switch (request.getDatabaseEngineDeployment()) { + case AURORA: + return request.getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT) + ? "db.r7g.2xlarge" + : "db.r5.large"; + case RDS: + case RDS_MULTI_AZ_INSTANCE: + case RDS_MULTI_AZ_CLUSTER: + return "db.m5d.large"; + default: + throw new NotImplementedException(request.getDatabaseEngineDeployment().toString()); + } + } - List result = new ArrayList<>(); - for (DBInstance instance : dbInstancesResult.dbInstances()) { - result.add( + public DatabaseEngine getRdsInstanceEngine(final DBInstance instance) { + switch (instance.engine()) { + case "postgres": + return DatabaseEngine.PG; + case "mysql": + return DatabaseEngine.MYSQL; + default: + throw new UnsupportedOperationException(instance.engine()); + } + } + + public String getAuroraParameterGroupFamily(String engine, String engineVersion) { + switch (engine) { + case "aurora-postgresql": + return "aurora-postgresql16"; + case "aurora-mysql": + if (StringUtils.isNullOrEmpty(engineVersion) || engineVersion.contains("8.0")) { + return "aurora-mysql8.0"; + } + return "aurora-mysql5.7"; + default: + throw new UnsupportedOperationException(engine); + } + } + + public List getTestInstancesInfo(final String clusterId) { + List dbInstances = getDBInstances(clusterId); + List instancesInfo = new ArrayList<>(); + for (DBInstance dbInstance : dbInstances) { + instancesInfo.add( new TestInstanceInfo( - instance.dbInstanceIdentifier(), - instance.endpoint().address(), - instance.endpoint().port())); + dbInstance.dbInstanceIdentifier(), + dbInstance.endpoint().address(), + dbInstance.endpoint().port())); } - return result; + + return instancesInfo; } public void waitUntilClusterHasRightState(String clusterId) throws InterruptedException { + waitUntilClusterHasRightState(clusterId, "available"); + } + + public void waitUntilClusterHasRightState(String clusterId, String... allowedStatuses) throws InterruptedException { String status = getDBCluster(clusterId).status(); - while (!"available".equalsIgnoreCase(status)) { + LOGGER.finest("Cluster status: " + status + ", waiting for status: " + String.join(", ", allowedStatuses)); + final Set allowedStatusSet = Arrays.stream(allowedStatuses) + .map(String::toLowerCase) + .collect(Collectors.toSet()); + final long waitTillNanoTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(15); + while (!allowedStatusSet.contains(status.toLowerCase()) && waitTillNanoTime > System.nanoTime()) { TimeUnit.MILLISECONDS.sleep(1000); - status = getDBCluster(clusterId).status(); + String tmpStatus = getDBCluster(clusterId).status(); + if (!tmpStatus.equalsIgnoreCase(status)) { + LOGGER.finest("Cluster status (waiting): " + tmpStatus); + } + status = tmpStatus; } + LOGGER.finest("Cluster status (after wait): " + status); } public DBCluster getDBCluster(String clusterId) { - final DescribeDbClustersResponse dbClustersResult = - rdsClient.describeDBClusters((builder) -> builder.dbClusterIdentifier(clusterId)); + DescribeDbClustersResponse dbClustersResult = null; + int remainingTries = 5; + while (remainingTries-- > 0) { + try { + dbClustersResult = rdsClient.describeDBClusters((builder) -> builder.dbClusterIdentifier(clusterId)); + break; + } catch (DbClusterNotFoundException ex) { + return null; + } catch (SdkClientException sdkClientException) { + if (remainingTries == 0) { + throw sdkClientException; + } + } + } + + if (dbClustersResult == null) { + fail("Unable to get DB cluster info for cluster with ID " + clusterId); + } + final List dbClusterList = dbClustersResult.dbClusters(); return dbClusterList.get(0); } - public List getAuroraInstanceIds( - DatabaseEngine databaseEngine, String connectionUrl, String userName, String password) - throws SQLException { - - String retrieveTopologySql; - switch (databaseEngine) { - case MYSQL: - retrieveTopologySql = - "SELECT SERVER_ID, SESSION_ID FROM information_schema.replica_host_status " - + "ORDER BY IF(SESSION_ID = 'MASTER_SESSION_ID', 0, 1)"; - break; - case PG: - retrieveTopologySql = - "SELECT SERVER_ID, SESSION_ID FROM aurora_replica_status() " - + "ORDER BY CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN 0 ELSE 1 END"; + public DBInstance getDBInstance(String instanceId) { + DescribeDbInstancesResponse dbInstanceResult = null; + int remainingTries = 5; + while (remainingTries-- > 0) { + try { + dbInstanceResult = rdsClient.describeDBInstances((builder) -> builder.dbInstanceIdentifier(instanceId)); break; - default: - throw new UnsupportedOperationException(databaseEngine.toString()); - } - - ArrayList auroraInstances = new ArrayList<>(); + } catch (SdkClientException sdkClientException) { + if (remainingTries == 0) { + throw sdkClientException; + } - try (final Connection conn = DriverManager.getConnection(connectionUrl, userName, password); - final Statement stmt = conn.createStatement(); - final ResultSet resultSet = stmt.executeQuery(retrieveTopologySql)) { - while (resultSet.next()) { - // Get Instance endpoints - final String hostEndpoint = resultSet.getString("SERVER_ID"); - auroraInstances.add(hostEndpoint); + try { + TimeUnit.SECONDS.sleep(30); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } } } - return auroraInstances; - } - - public Boolean isDBInstanceWriter(String clusterId, String instanceId) { - return getMatchedDBClusterMember(clusterId, instanceId).isClusterWriter(); - } - public DBClusterMember getMatchedDBClusterMember(String clusterId, String instanceId) { - final List matchedMemberList = - getDBClusterMemberList(clusterId).stream() - .filter(dbClusterMember -> dbClusterMember.dbInstanceIdentifier().equals(instanceId)) - .collect(Collectors.toList()); - if (matchedMemberList.isEmpty()) { - throw new RuntimeException( - "Cannot find cluster member whose db instance identifier is " + instanceId); + if (dbInstanceResult == null) { + fail("Unable to get DB instance info for instance with ID " + instanceId); } - return matchedMemberList.get(0); + + final List dbClusterList = dbInstanceResult.dbInstances(); + return dbClusterList.get(0); } - public List getDBClusterMemberList(String clusterId) { - final DBCluster dbCluster = getDBCluster(clusterId); - return dbCluster.dbClusterMembers(); + public void waitUntilInstanceHasRightState(String instanceId, String... allowedStatuses) throws InterruptedException { + + String status = getDBInstance(instanceId).dbInstanceStatus(); + LOGGER.finest("Instance " + instanceId + " status: " + status + + ", waiting for status: " + String.join(", ", allowedStatuses)); + final Set allowedStatusSet = Arrays.stream(allowedStatuses) + .map(String::toLowerCase) + .collect(Collectors.toSet()); + final long waitTillNanoTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(15); + while (!allowedStatusSet.contains(status.toLowerCase()) && waitTillNanoTime > System.nanoTime()) { + TimeUnit.MILLISECONDS.sleep(1000); + String tmpStatus = getDBInstance(instanceId).dbInstanceStatus(); + if (!tmpStatus.equalsIgnoreCase(status)) { + LOGGER.finest("Instance " + instanceId + " status (waiting): " + tmpStatus); + } + status = tmpStatus; + } + LOGGER.finest("Instance " + instanceId + " status (after wait): " + status); } public void addAuroraAwsIamUser( @@ -637,7 +1041,8 @@ public void addAuroraAwsIamUser( String userName, String password, String dbUser, - String databaseName) + String databaseName, + boolean useRdsTools) throws SQLException { try (final Connection conn = DriverManager.getConnection(connectionUrl, userName, password); @@ -648,13 +1053,28 @@ public void addAuroraAwsIamUser( stmt.execute("DROP USER IF EXISTS " + dbUser + ";"); stmt.execute( "CREATE USER " + dbUser + " IDENTIFIED WITH AWSAuthenticationPlugin AS 'RDS';"); - stmt.execute("GRANT ALL PRIVILEGES ON " + databaseName + ".* TO '" + dbUser + "'@'%';"); + if (!StringUtils.isNullOrEmpty(databaseName)) { + stmt.execute("GRANT ALL PRIVILEGES ON " + databaseName + ".* TO '" + dbUser + "'@'%';"); + } else { + stmt.execute("GRANT ALL PRIVILEGES ON `%`.* TO '" + dbUser + "'@'%';"); + } + + // BG switchover status needs it. + stmt.execute("GRANT SELECT ON mysql.* TO '" + dbUser + "'@'%';"); break; case PG: stmt.execute("DROP USER IF EXISTS " + dbUser + ";"); stmt.execute("CREATE USER " + dbUser + ";"); stmt.execute("GRANT rds_iam TO " + dbUser + ";"); - stmt.execute("GRANT ALL PRIVILEGES ON DATABASE " + databaseName + " TO " + dbUser + ";"); + if (!StringUtils.isNullOrEmpty(databaseName)) { + stmt.execute("GRANT ALL PRIVILEGES ON DATABASE " + databaseName + " TO " + dbUser + ";"); + } + + if (useRdsTools) { + // BG switchover status needs it. + stmt.execute("GRANT USAGE ON SCHEMA rds_tools TO " + dbUser + ";"); + stmt.execute("GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA rds_tools TO " + dbUser + ";"); + } break; default: throw new UnsupportedOperationException(databaseEngine.toString()); @@ -665,7 +1085,7 @@ public void addAuroraAwsIamUser( public List getEngineVersions(String engine) { final List res = new ArrayList<>(); final DescribeDbEngineVersionsResponse versions = rdsClient.describeDBEngineVersions( - DescribeDbEngineVersionsRequest.builder().engine(engine).build() + DescribeDbEngineVersionsRequest.builder().engine(engine).build() ); for (DBEngineVersion version : versions.dbEngineVersions()) { if (version.engineVersion().contains("limitless")) { @@ -678,18 +1098,197 @@ public List getEngineVersions(String engine) { } public String getLatestVersion(String engine) { - return getEngineVersions(engine) - .stream().min(Comparator.reverseOrder()) - .orElse(null); + return getEngineVersions(engine).stream() + .filter(version -> !version.contains("limitless")) + .max(Comparator.naturalOrder()) + .orElse(null); } - public String getLTSVersion(String engine) { + public String getDefaultVersion(String engine) { final DescribeDbEngineVersionsResponse versions = rdsClient.describeDBEngineVersions( - DescribeDbEngineVersionsRequest.builder().defaultOnly(true).engine(engine).build() + DescribeDbEngineVersionsRequest.builder().defaultOnly(true).engine(engine).build() ); if (!versions.dbEngineVersions().isEmpty()) { return versions.dbEngineVersions().get(0).engineVersion(); } - throw new RuntimeException("Failed to find LTS version"); + throw new RuntimeException("Failed to find default version"); + } + + public String createBlueGreenDeployment(String name, String sourceArn) { + + final String blueGreenName = "bgd-" + name; + + CreateBlueGreenDeploymentResponse response = null; + int count = 10; + while (response == null && count-- > 0) { + try { + response = rdsClient.createBlueGreenDeployment( + CreateBlueGreenDeploymentRequest.builder() + .blueGreenDeploymentName(blueGreenName) + .source(sourceArn) + .tags(this.getTag()) + .build()); + } catch (RdsException ex) { + if (ex.statusCode() != 500 || count == 0) { + throw ex; + } + + LOGGER.finest("Can't send createBlueGreenDeployment request. Wait 1min and try again."); + + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + } + + if (response == null) { + throw new RuntimeException("Can't send createBlueGreenDeployment request."); + } + + if (!response.sdkHttpResponse().isSuccessful()) { + LOGGER.finest(String.format("createBlueGreenDeployment response: %d, %s", + response.sdkHttpResponse().statusCode(), + response.sdkHttpResponse().statusText())); + throw new RuntimeException(response.sdkHttpResponse().statusText().orElse("Unspecified error.")); + } else { + LOGGER.finest("createBlueGreenDeployment request is sent"); + } + + String blueGreenId = response.blueGreenDeployment().blueGreenDeploymentIdentifier(); + + BlueGreenDeployment blueGreenDeployment = getBlueGreenDeployment(blueGreenId); + long end = System.nanoTime() + TimeUnit.MINUTES.toNanos(240); + while ((blueGreenDeployment == null || !blueGreenDeployment.status().equalsIgnoreCase("available")) + && System.nanoTime() < end) { + try { + TimeUnit.SECONDS.sleep(60); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + blueGreenDeployment = getBlueGreenDeployment(blueGreenId); + } + + if (blueGreenDeployment == null || !blueGreenDeployment.status().equalsIgnoreCase("available")) { + throw new RuntimeException("BlueGreen Deployment " + blueGreenId + " isn't available."); + } + + return blueGreenId; + } + + public void waitUntilBlueGreenDeploymentHasRightState(String blueGreenId, String... allowedStatuses) { + + String status = getBlueGreenDeployment(blueGreenId).status(); + LOGGER.finest("BGD status: " + status + ", waiting for status: " + String.join(", ", allowedStatuses)); + final Set allowedStatusSet = Arrays.stream(allowedStatuses) + .map(String::toLowerCase) + .collect(Collectors.toSet()); + final long waitTillNanoTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(15); + while (!allowedStatusSet.contains(status.toLowerCase()) && waitTillNanoTime > System.nanoTime()) { + try { + TimeUnit.MILLISECONDS.sleep(1000); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + String tmpStatus = getBlueGreenDeployment(blueGreenId).status(); + if (!tmpStatus.equalsIgnoreCase(status)) { + LOGGER.finest("BGD status (waiting): " + tmpStatus); + } + status = tmpStatus; + } + LOGGER.finest("BGD status (after wait): " + status); + + if (!allowedStatusSet.contains(status.toLowerCase())) { + throw new RuntimeException("BlueGreen Deployment " + blueGreenId + " has wrong status."); + } + } + + public boolean doesBlueGreenDeploymentExist(String blueGreenId) { + try { + DescribeBlueGreenDeploymentsResponse response = rdsClient.describeBlueGreenDeployments( + builder -> builder.blueGreenDeploymentIdentifier(blueGreenId)); + return response.blueGreenDeployments() != null && !response.blueGreenDeployments().isEmpty(); + } catch (BlueGreenDeploymentNotFoundException ex) { + LOGGER.finest("blueGreenDeployments not found"); + return false; + } + } + + public BlueGreenDeployment getBlueGreenDeployment(String blueGreenId) { + try { + DescribeBlueGreenDeploymentsResponse response = rdsClient.describeBlueGreenDeployments( + builder -> builder.blueGreenDeploymentIdentifier(blueGreenId)); + if (response.hasBlueGreenDeployments()) { + return response.blueGreenDeployments().get(0); + } + return null; + } catch (BlueGreenDeploymentNotFoundException ex) { + return null; + } + } + + public BlueGreenDeployment getBlueGreenDeploymentBySource(String sourceArn) { + try { + DescribeBlueGreenDeploymentsResponse response = rdsClient.describeBlueGreenDeployments( + builder -> builder.filters(f -> f.name("source").values(sourceArn))); + if (!response.blueGreenDeployments().isEmpty()) { + return response.blueGreenDeployments().get(0); + } + return null; + } catch (BlueGreenDeploymentNotFoundException ex) { + return null; + } + } + + public void deleteBlueGreenDeployment(String blueGreenId, boolean waitForCompletion) { + + if (!doesBlueGreenDeploymentExist(blueGreenId)) { + return; + } + + waitUntilBlueGreenDeploymentHasRightState(blueGreenId, "available", "switchover_completed"); + + DeleteBlueGreenDeploymentResponse response = rdsClient.deleteBlueGreenDeployment( + DeleteBlueGreenDeploymentRequest.builder() + .blueGreenDeploymentIdentifier(blueGreenId) + .build()); + + if (!response.sdkHttpResponse().isSuccessful()) { + LOGGER.finest(String.format("deleteBlueGreenDeployment response: %d, %s", + response.sdkHttpResponse().statusCode(), + response.sdkHttpResponse().statusText())); + throw new RuntimeException(response.sdkHttpResponse().statusText().orElse("Unspecified error.")); + } else { + LOGGER.finest("deleteBlueGreenDeployment request is sent"); + } + + if (waitForCompletion) { + long endTimeNano = System.nanoTime() + TimeUnit.MINUTES.toNanos(120); + while (doesBlueGreenDeploymentExist(blueGreenId) && endTimeNano > System.nanoTime()) { + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + return; + } + } + + if (doesBlueGreenDeploymentExist(blueGreenId)) { + throw new RuntimeException( + "Unable to delete Blue/Green Deployment after waiting for 120 minutes"); + } + } + } + + private Tag getTag() { + ZoneId zoneId = ZoneId.of("America/Los_Angeles"); + ZonedDateTime zdt = Instant.now().atZone(zoneId); + String timeStr = zdt.format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss zzz")); + return Tag.builder() + .key("env").value("test-runner") + .key("created").value(timeStr) + .build(); } } diff --git a/tests/integration/host/src/test/java/integration/util/ContainerHelper.java b/tests/integration/host/src/test/java/integration/util/ContainerHelper.java index 817bd2eb..34e615a2 100644 --- a/tests/integration/host/src/test/java/integration/util/ContainerHelper.java +++ b/tests/integration/host/src/test/java/integration/util/ContainerHelper.java @@ -23,9 +23,10 @@ import com.github.dockerjava.api.command.ExecCreateCmdResponse; import com.github.dockerjava.api.command.InspectContainerResponse; import com.github.dockerjava.api.exception.DockerException; -import integration.DebugEnv; import eu.rekawek.toxiproxy.ToxiproxyClient; +import integration.DebugEnv; import integration.TestInstanceInfo; +import integration.host.TestEnvironmentConfiguration; import java.io.IOException; import java.util.function.Consumer; import java.util.function.Function; @@ -44,14 +45,14 @@ import org.testcontainers.images.builder.dockerfile.DockerfileBuilder; import org.testcontainers.utility.DockerImageName; import org.testcontainers.utility.MountableFile; -import integration.host.TestEnvironmentConfiguration; +import org.testcontainers.utility.TestEnvironment; public class ContainerHelper { private static final String MYSQL_CONTAINER_IMAGE_NAME = "mysql:8.0.36"; private static final String POSTGRES_CONTAINER_IMAGE_NAME = "postgres:latest"; private static final DockerImageName TOXIPROXY_IMAGE = - DockerImageName.parse("shopify/toxiproxy:2.1.4"); + DockerImageName.parse("ghcr.io/shopify/toxiproxy:2.11.0"); private static final int PROXY_CONTROL_PORT = 8474; private static final int PROXY_PORT = 8666; @@ -59,32 +60,6 @@ public class ContainerHelper { private static final String XRAY_TELEMETRY_IMAGE_NAME = "amazon/aws-xray-daemon"; private static final String OTLP_TELEMETRY_IMAGE_NAME = "amazon/aws-otel-collector"; - private static final String RETRIEVE_TOPOLOGY_SQL_POSTGRES = - "SELECT SERVER_ID, SESSION_ID FROM aurora_replica_status() " - + "ORDER BY CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN 0 ELSE 1 END"; - private static final String RETRIEVE_TOPOLOGY_SQL_MYSQL = - "SELECT SERVER_ID, SESSION_ID FROM information_schema.replica_host_status " - + "ORDER BY IF(SESSION_ID = 'MASTER_SESSION_ID', 0, 1)"; - private static final String SERVER_ID = "SERVER_ID"; - - public Long runCmd(GenericContainer container, String... cmd) - throws IOException, InterruptedException { - System.out.println("==== Container console feed ==== >>>>"); - Consumer consumer = new ConsoleConsumer(); - Long exitCode = execInContainer(container, consumer, cmd); - System.out.println("==== Container console feed ==== <<<<"); - return exitCode; - } - - public Long runCmdInDirectory(GenericContainer container, String workingDirectory, String... cmd) - throws IOException, InterruptedException { - System.out.println("==== Container console feed ==== >>>>"); - Consumer consumer = new ConsoleConsumer(); - Long exitCode = execInContainer(container, workingDirectory, consumer, cmd); - System.out.println("==== Container console feed ==== <<<<"); - return exitCode; - } - public void runTest(GenericContainer container, String testFolder, String primaryInfo, TestEnvironmentConfiguration config) throws IOException, InterruptedException { System.out.println("==== Container console feed ==== >>>>"); @@ -267,12 +242,6 @@ public T withFixedExposedPort(int hostPort, int containerPort) { .withPrivilegedMode(true); // Required to control Linux core settings like TcpKeepAlive } - protected Long execInContainer( - GenericContainer container, String workingDirectory, Consumer consumer, String... command) - throws UnsupportedOperationException, IOException, InterruptedException { - return execInContainer(container.getContainerInfo(), consumer, workingDirectory, command); - } - protected Long execInContainer( GenericContainer container, Consumer consumer, @@ -411,11 +380,6 @@ public FixedExposedPortContainer(final DockerImageName dockerImageName) { super(dockerImageName); } - public T withFixedExposedPort(int hostPort, int containerPort, InternetProtocol protocol) { - super.addFixedExposedPort(hostPort, containerPort, protocol); - return self(); - } - public T withExposedPort(Integer port) { super.addExposedPort(port); return self(); diff --git a/tests/unit/test_dialect.py b/tests/unit/test_dialect.py index 4830f291..fb7f658c 100644 --- a/tests/unit/test_dialect.py +++ b/tests/unit/test_dialect.py @@ -19,7 +19,7 @@ from aws_advanced_python_wrapper.database_dialect import ( AuroraMysqlDialect, AuroraPgDialect, DatabaseDialectManager, DialectCode, - MultiAzMysqlDialect, MysqlDatabaseDialect, PgDatabaseDialect, + MultiAzClusterMysqlDialect, MysqlDatabaseDialect, PgDatabaseDialect, RdsMysqlDialect, RdsPgDialect, TargetDriverType, UnknownDatabaseDialect) from aws_advanced_python_wrapper.driver_info import DriverInfo from aws_advanced_python_wrapper.errors import AwsWrapperError @@ -122,13 +122,13 @@ def test_mysql_is_dialect(mock_conn, mock_cursor, mock_session, mysql_dialect, m def test_rds_mysql_is_dialect(mock_super, mock_cursor, mock_conn, rds_mysql_dialect, mock_driver_dialect): mock_super().is_dialect.return_value = True - records = [("some_value", "some_value"), ("some_value", "source distribution")] - mock_cursor.__iter__.return_value = records + records = ("some_value", "source distribution") + mock_cursor.fetchone.return_value = records assert rds_mysql_dialect.is_dialect(mock_conn, mock_driver_dialect) - records = [("some_value", "some_value"), ("some_value", "some_value")] - mock_cursor.__iter__.return_value = records + records = ("some_value", "some_value") + mock_cursor.fetchone.return_value = records assert not rds_mysql_dialect.is_dialect(mock_conn, mock_driver_dialect) @@ -216,7 +216,7 @@ def test_get_dialect_user_setting(mock_driver_dialect): def test_prepare_conn_props__multi_az_mysql(): - dialect = MultiAzMysqlDialect() + dialect = MultiAzClusterMysqlDialect() props = Properties({"host": "some_host"}) expected = Properties({ "host": "some_host", diff --git a/tests/unit/test_failover_plugin.py b/tests/unit/test_failover_plugin.py index 4610b316..a9e0c856 100644 --- a/tests/unit/test_failover_plugin.py +++ b/tests/unit/test_failover_plugin.py @@ -240,7 +240,7 @@ def test_failover_writer(plugin_service_mock, host_list_provider_service_mock, i def test_failover_reader_with_valid_failed_host(plugin_service_mock, host_list_provider_service_mock, init_host_provider_func_mock, conn_mock, reader_failover_handler_mock): host: HostInfo = HostInfo("host") - host._availability = HostAvailability.AVAILABLE + host.availability = HostAvailability.AVAILABLE host._aliases = ["alias1", "alias2"] hosts: Tuple[HostInfo, ...] = (host, ) type(plugin_service_mock).hosts = PropertyMock(return_value=hosts) @@ -262,7 +262,7 @@ def test_failover_reader_with_valid_failed_host(plugin_service_mock, host_list_p def test_failover_reader_with_no_failed_host(plugin_service_mock, host_list_provider_service_mock, init_host_provider_func_mock, reader_failover_handler_mock): host: HostInfo = HostInfo("host") - host._availability = HostAvailability.AVAILABLE + host.availability = HostAvailability.AVAILABLE host._aliases = ["alias1", "alias2"] hosts: Tuple[HostInfo, ...] = (host, ) type(plugin_service_mock).hosts = PropertyMock(return_value=hosts) diff --git a/tests/unit/test_hostinfo.py b/tests/unit/test_hostinfo.py index ad8df033..917103e1 100644 --- a/tests/unit/test_hostinfo.py +++ b/tests/unit/test_hostinfo.py @@ -27,7 +27,7 @@ def test_host_info_defaults(): assert len(host_info.aliases) == 0 assert len(host_info._all_aliases) == 1 assert host_info.role == HostRole.WRITER - assert host_info._availability == HostAvailability.AVAILABLE + assert host_info.availability == HostAvailability.AVAILABLE assert list(host_info._all_aliases)[0] == "testhost" diff --git a/tests/unit/test_reader_failover_handler.py b/tests/unit/test_reader_failover_handler.py index 191fe1bb..f4ccc537 100644 --- a/tests/unit/test_reader_failover_handler.py +++ b/tests/unit/test_reader_failover_handler.py @@ -91,8 +91,8 @@ def force_connect_side_effect(host_info, _) -> Connection: plugin_service_mock.force_connect.side_effect = force_connect_side_effect plugin_service_mock.is_network_exception.return_value = True - hosts[2]._availability = HostAvailability.UNAVAILABLE - hosts[4]._availability = HostAvailability.UNAVAILABLE + hosts[2].availability = HostAvailability.UNAVAILABLE + hosts[4].availability = HostAvailability.UNAVAILABLE target: ReaderFailoverHandler = ReaderFailoverHandlerImpl(plugin_service_mock, props) result: ReaderFailoverResult = target.failover(hosts, current_host) @@ -116,8 +116,8 @@ def force_connect_side_effect(_, __) -> Connection: plugin_service_mock.force_connect.side_effect = force_connect_side_effect - hosts[2]._availability = HostAvailability.UNAVAILABLE - hosts[4]._availability = HostAvailability.UNAVAILABLE + hosts[2].availability = HostAvailability.UNAVAILABLE + hosts[4].availability = HostAvailability.UNAVAILABLE # Set max failover timeout to 5 seconds target: ReaderFailoverHandler = ReaderFailoverHandlerImpl(plugin_service_mock, props, 5, 30) @@ -218,9 +218,9 @@ def force_connect_side_effect(_, __) -> Connection: def test_get_host_tuples_by_priority(plugin_service_mock, connection_mock, default_properties, default_hosts): hosts = default_hosts - hosts[2]._availability = HostAvailability.UNAVAILABLE - hosts[4]._availability = HostAvailability.UNAVAILABLE - hosts[5]._availability = HostAvailability.UNAVAILABLE + hosts[2].availability = HostAvailability.UNAVAILABLE + hosts[4].availability = HostAvailability.UNAVAILABLE + hosts[5].availability = HostAvailability.UNAVAILABLE hosts_by_priority = ReaderFailoverHandlerImpl.get_hosts_by_priority(hosts, False) @@ -229,7 +229,7 @@ def test_get_host_tuples_by_priority(plugin_service_mock, connection_mock, defau # expecting active readers while (i < len(hosts_by_priority) and hosts_by_priority[i].role == HostRole.READER and - hosts_by_priority[i]._availability == HostAvailability.AVAILABLE): + hosts_by_priority[i].availability == HostAvailability.AVAILABLE): i += 1 # expecting a writer @@ -239,7 +239,7 @@ def test_get_host_tuples_by_priority(plugin_service_mock, connection_mock, defau # expecting down readers while (i < len(hosts_by_priority) and hosts_by_priority[i].role == HostRole.READER and - hosts_by_priority[i]._availability == HostAvailability.UNAVAILABLE): + hosts_by_priority[i].availability == HostAvailability.UNAVAILABLE): i += 1 assert i == len(hosts_by_priority) @@ -248,9 +248,9 @@ def test_get_host_tuples_by_priority(plugin_service_mock, connection_mock, defau def test_get_reader_tuples_by_priority(plugin_service_mock, connection_mock, default_properties, default_hosts): hosts = default_hosts - hosts[2]._availability = HostAvailability.UNAVAILABLE - hosts[4]._availability = HostAvailability.UNAVAILABLE - hosts[5]._availability = HostAvailability.UNAVAILABLE + hosts[2].availability = HostAvailability.UNAVAILABLE + hosts[4].availability = HostAvailability.UNAVAILABLE + hosts[5].availability = HostAvailability.UNAVAILABLE hosts_by_priority = ReaderFailoverHandlerImpl.get_reader_hosts_by_priority(hosts) @@ -259,13 +259,13 @@ def test_get_reader_tuples_by_priority(plugin_service_mock, connection_mock, def # expecting active readers while (i < len(hosts_by_priority) and hosts_by_priority[i].role == HostRole.READER and - hosts_by_priority[i]._availability == HostAvailability.AVAILABLE): + hosts_by_priority[i].availability == HostAvailability.AVAILABLE): i += 1 # expecting down readers while (i < len(hosts_by_priority) and hosts_by_priority[i].role == HostRole.READER and - hosts_by_priority[i]._availability == HostAvailability.UNAVAILABLE): + hosts_by_priority[i].availability == HostAvailability.UNAVAILABLE): i += 1 assert i == len(hosts_by_priority) @@ -281,7 +281,7 @@ def test_host_failover_strict_reader_enabled(plugin_service_mock, connection_moc assert hosts_by_priority == (reader, ) # should select the reader even if unavailable - reader._availability = HostAvailability.UNAVAILABLE + reader.availability = HostAvailability.UNAVAILABLE hosts_by_priority = ReaderFailoverHandlerImpl.get_hosts_by_priority(hosts, True) assert hosts_by_priority == (reader,) diff --git a/tests/unit/test_secrets_manager_plugin.py b/tests/unit/test_secrets_manager_plugin.py index 5e4af71e..87a15987 100644 --- a/tests/unit/test_secrets_manager_plugin.py +++ b/tests/unit/test_secrets_manager_plugin.py @@ -26,28 +26,19 @@ from __future__ import annotations -from typing import TYPE_CHECKING - -from aws_advanced_python_wrapper.aws_secrets_manager_plugin import \ - AwsSecretsManagerPlugin -from aws_advanced_python_wrapper.utils.cache_map import CacheMap - -if TYPE_CHECKING: - from boto3 import Session, client - from aws_advanced_python_wrapper.pep249 import Connection - from aws_advanced_python_wrapper.database_dialect import DatabaseDialect - from aws_advanced_python_wrapper.plugin_service import PluginService - from types import SimpleNamespace -from typing import Callable, Tuple +from typing import Tuple from unittest import TestCase from unittest.mock import MagicMock, patch from botocore.exceptions import ClientError from parameterized import param, parameterized +from aws_advanced_python_wrapper.aws_secrets_manager_plugin import \ + AwsSecretsManagerPlugin from aws_advanced_python_wrapper.errors import AwsWrapperError from aws_advanced_python_wrapper.hostinfo import HostInfo +from aws_advanced_python_wrapper.utils.cache_map import CacheMap from aws_advanced_python_wrapper.utils.messages import Messages from aws_advanced_python_wrapper.utils.properties import Properties @@ -81,19 +72,15 @@ class TestAwsSecretsManagerPlugin(TestCase): }, 'ResponseMetadata': { 'HTTPStatusCode': 400, + 'RequestId': 'test-request-id', + 'HostId': 'test-host-id', + 'HTTPHeaders': {}, + 'RetryAttempts': 0 } }, "some_operation") _secrets_cache: CacheMap[Tuple, SimpleNamespace] = CacheMap() - _mock_func: Callable - _mock_plugin_service: PluginService - _mock_dialect: DatabaseDialect - _mock_session: Session - _mock_client: client - _mock_connection: Connection - _pg_properties: Properties - def setUp(self): self._mock_func = MagicMock() self._mock_plugin_service = MagicMock()