diff --git a/lib/charms/grafana_agent/v0/cos_agent.py b/lib/charms/grafana_agent/v0/cos_agent.py
index 582b70c07..cc4da25a8 100644
--- a/lib/charms/grafana_agent/v0/cos_agent.py
+++ b/lib/charms/grafana_agent/v0/cos_agent.py
@@ -22,7 +22,7 @@
Using the `COSAgentProvider` object only requires instantiating it,
typically in the `__init__` method of your charm (the one which sends telemetry).
-The constructor of `COSAgentProvider` has only one required and nine optional parameters:
+The constructor of `COSAgentProvider` has only one required and ten optional parameters:
```python
def __init__(
@@ -36,6 +36,7 @@ def __init__(
log_slots: Optional[List[str]] = None,
dashboard_dirs: Optional[List[str]] = None,
refresh_events: Optional[List] = None,
+ tracing_protocols: Optional[List[str]] = None,
scrape_configs: Optional[Union[List[Dict], Callable]] = None,
):
```
@@ -65,6 +66,8 @@ def __init__(
- `refresh_events`: List of events on which to refresh relation data.
+- `tracing_protocols`: List of requested tracing protocols that the charm requires to send traces.
+
- `scrape_configs`: List of standard scrape_configs dicts or a callable that returns the list in
case the configs need to be generated dynamically. The contents of this list will be merged
with the configs from `metrics_endpoints`.
@@ -108,6 +111,7 @@ def __init__(self, *args):
log_slots=["my-app:slot"],
dashboard_dirs=["./src/dashboards_1", "./src/dashboards_2"],
refresh_events=["update-status", "upgrade-charm"],
+ tracing_protocols=["otlp_http", "otlp_grpc"],
scrape_configs=[
{
"job_name": "custom_job",
@@ -249,7 +253,7 @@ class _MetricsEndpointDict(TypedDict):
LIBID = "dc15fa84cef84ce58155fb84f6c6213a"
LIBAPI = 0
-LIBPATCH = 10
+LIBPATCH = 11
PYDEPS = ["cosl", "pydantic"]
diff --git a/lib/charms/mysql/v0/mysql.py b/lib/charms/mysql/v0/mysql.py
index 147801ce9..a8cddc8fb 100644
--- a/lib/charms/mysql/v0/mysql.py
+++ b/lib/charms/mysql/v0/mysql.py
@@ -134,7 +134,7 @@ def wait_until_mysql_connection(self) -> None:
# Increment this major API version when introducing breaking changes
LIBAPI = 0
-LIBPATCH = 70
+LIBPATCH = 71
UNIT_TEARDOWN_LOCKNAME = "unit-teardown"
UNIT_ADD_LOCKNAME = "unit-add"
@@ -147,6 +147,7 @@ def wait_until_mysql_connection(self) -> None:
GET_MEMBER_STATE_TIME = 10 # seconds
MAX_CONNECTIONS_FLOOR = 10
MIM_MEM_BUFFERS = 200 * BYTES_1MiB
+ADMIN_PORT = 33062
SECRET_INTERNAL_LABEL = "secret-id"
SECRET_DELETED_LABEL = "None"
@@ -882,6 +883,30 @@ def __init__(
self.backups_password,
]
+ def instance_def(self, user: str, host: Optional[str] = None) -> str:
+ """Return instance definition used on mysqlsh.
+
+ Args:
+ user: User name.
+ host: Host name, default to unit address.
+ """
+ password_map = {
+ self.server_config_user: self.server_config_password,
+ self.cluster_admin_user: self.cluster_admin_password,
+ "root": self.root_password,
+ self.backups_user: self.backups_password,
+ }
+ if host and ":" in host:
+ # strip port from address
+ host = host.split(":")[0]
+
+ if user in (self.server_config_user, self.backups_user):
+ # critical operator users use admin address
+ return f"{user}:{password_map[user]}@{host or self.instance_address}:{ADMIN_PORT}"
+ elif host != self.instance_address:
+ return f"{user}:{password_map[user]}@{host}:3306"
+ return f"{user}:{password_map[user]}@{self.socket_uri}"
+
def render_mysqld_configuration( # noqa: C901
self,
*,
@@ -948,6 +973,7 @@ def render_mysqld_configuration( # noqa: C901
config["mysqld"] = {
"bind-address": "0.0.0.0",
"mysqlx-bind-address": "0.0.0.0",
+ "admin_address": self.instance_address,
"report_host": self.instance_address,
"max_connections": str(max_connections),
"innodb_buffer_pool_size": str(innodb_buffer_pool_size),
@@ -1151,13 +1177,13 @@ def configure_mysqlrouter_user(
)
# Using server_config_user as we are sure it has create user grants
create_mysqlrouter_user_commands = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
f"session.run_sql(\"CREATE USER '{username}'@'{hostname}' IDENTIFIED BY '{password}' ATTRIBUTE '{escaped_mysqlrouter_user_attributes}';\")",
)
# Using server_config_user as we are sure it has create user grants
mysqlrouter_user_grant_commands = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
f"session.run_sql(\"GRANT CREATE USER ON *.* TO '{username}'@'{hostname}' WITH GRANT OPTION;\")",
f"session.run_sql(\"GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON mysql_innodb_cluster_metadata.* TO '{username}'@'{hostname}';\")",
f"session.run_sql(\"GRANT SELECT ON mysql.user TO '{username}'@'{hostname}';\")",
@@ -1191,7 +1217,7 @@ def create_application_database_and_scoped_user(
try:
# Using server_config_user as we are sure it has create database grants
connect_command = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
)
create_database_commands = (
f'session.run_sql("CREATE DATABASE IF NOT EXISTS `{database_name}`;")',
@@ -1228,7 +1254,11 @@ def _get_statements_to_delete_users_with_attribute(
(e.g. "'bar'")
"""
return [
- f"session.run_sql(\"SELECT IFNULL(CONCAT('DROP USER ', GROUP_CONCAT(QUOTE(USER), '@', QUOTE(HOST))), 'SELECT 1') INTO @sql FROM INFORMATION_SCHEMA.USER_ATTRIBUTES WHERE ATTRIBUTE->'$.{attribute_name}'={attribute_value}\")",
+ (
+ "session.run_sql(\"SELECT IFNULL(CONCAT('DROP USER ', GROUP_CONCAT(QUOTE(USER),"
+ " '@', QUOTE(HOST))), 'SELECT 1') INTO @sql FROM INFORMATION_SCHEMA.USER_ATTRIBUTES"
+ f" WHERE ATTRIBUTE->'$.{attribute_name}'={attribute_value}\")"
+ ),
'session.run_sql("PREPARE stmt FROM @sql")',
'session.run_sql("EXECUTE stmt")',
'session.run_sql("DEALLOCATE PREPARE stmt")',
@@ -1240,8 +1270,12 @@ def get_mysql_router_users_for_unit(
"""Get users for related MySQL Router unit."""
relation_user = f"relation-{relation_id}"
command = [
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
- f"result = session.run_sql(\"SELECT USER, ATTRIBUTE->>'$.router_id' FROM INFORMATION_SCHEMA.USER_ATTRIBUTES WHERE ATTRIBUTE->'$.created_by_user'='{relation_user}' AND ATTRIBUTE->'$.created_by_juju_unit'='{mysql_router_unit_name}'\")",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
+ (
+ "result = session.run_sql(\"SELECT USER, ATTRIBUTE->>'$.router_id' FROM "
+ f"INFORMATION_SCHEMA.USER_ATTRIBUTES WHERE ATTRIBUTE->'$.created_by_user'='{relation_user}' "
+ f"AND ATTRIBUTE->'$.created_by_juju_unit'='{mysql_router_unit_name}'\")"
+ ),
"print(result.fetch_all())",
]
try:
@@ -1257,7 +1291,7 @@ def get_mysql_router_users_for_unit(
def delete_users_for_unit(self, unit_name: str) -> None:
"""Delete users for a unit."""
drop_users_command = [
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
]
drop_users_command.extend(
self._get_statements_to_delete_users_with_attribute("unit_name", f"'{unit_name}'")
@@ -1271,7 +1305,7 @@ def delete_users_for_unit(self, unit_name: str) -> None:
def delete_users_for_relation(self, username: str) -> None:
"""Delete users for a relation."""
drop_users_command = [
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
f"session.run_sql(\"DROP USER IF EXISTS '{username}'@'%';\")",
]
# If the relation is with a MySQL Router charm application, delete any users
@@ -1288,7 +1322,7 @@ def delete_users_for_relation(self, username: str) -> None:
def delete_user(self, username: str) -> None:
"""Delete user."""
drop_user_command = [
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
f"session.run_sql(\"DROP USER `{username}`@'%'\")",
]
try:
@@ -1300,7 +1334,7 @@ def delete_user(self, username: str) -> None:
def remove_router_from_cluster_metadata(self, router_id: str) -> None:
"""Remove MySQL Router from InnoDB Cluster metadata."""
command = [
- f"shell.connect_to_primary('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.instance_address}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
"cluster = dba.get_cluster()",
f'cluster.remove_router_metadata("{router_id}")',
]
@@ -1318,16 +1352,13 @@ def set_dynamic_variable(
instance_address: Optional[str] = None,
) -> None:
"""Set a dynamic variable value for the instance."""
- if not instance_address:
- instance_address = self.socket_uri
-
# escape variable values when needed
if not re.match(r"^[0-9,a-z,A-Z$_]+$", value):
value = f"`{value}`"
logger.debug(f"Setting {variable=} to {value=}")
set_var_command = [
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{instance_address}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, instance_address)}')",
f"session.run_sql(\"SET {'PERSIST' if persist else 'GLOBAL'} {variable}={value}\")",
]
@@ -1340,7 +1371,7 @@ def set_dynamic_variable(
def get_variable_value(self, variable: str) -> str:
"""Get the value of a variable."""
get_var_command = [
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
f"result = session.run_sql(\"SHOW VARIABLES LIKE '{variable}'\")",
"print(result.fetch_all())",
]
@@ -1348,7 +1379,7 @@ def get_variable_value(self, variable: str) -> str:
try:
output = self._run_mysqlsh_script("\n".join(get_var_command))
except MySQLClientError:
- logger.exception(f"Failed to get variable {variable}")
+ logger.exception(f"Failed to get value for {variable=}")
raise MySQLGetVariableError
rows = json.loads(output)
@@ -1367,7 +1398,7 @@ def configure_instance(self, create_cluster_admin: bool = True) -> None:
})
configure_instance_command = (
- f"dba.configure_instance('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}', {json.dumps(options)})",
+ f"dba.configure_instance('{self.instance_def(self.server_config_user)}', {options})",
)
try:
@@ -1387,8 +1418,8 @@ def create_cluster(self, unit_label: str) -> None:
}
commands = (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
- f"cluster = dba.create_cluster('{self.cluster_name}', {json.dumps(options)})",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
+ f"cluster = dba.create_cluster('{self.cluster_name}', {options})",
f"cluster.set_instance_option('{self.instance_address}', 'label', '{unit_label}')",
)
@@ -1402,7 +1433,7 @@ def create_cluster(self, unit_label: str) -> None:
def create_cluster_set(self) -> None:
"""Create a cluster set for the cluster on cluster primary."""
commands = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
f"cluster.create_cluster_set('{self.cluster_set_name}')",
)
@@ -1434,7 +1465,7 @@ def create_replica_cluster(
options["cloneDonor"] = donor
commands = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
"cs = dba.get_cluster_set()",
f"repl_cluster = cs.create_replica_cluster('{endpoint}','{replica_cluster_name}', {options})",
f"repl_cluster.set_instance_option('{endpoint}', 'label', '{instance_label}')",
@@ -1462,7 +1493,7 @@ def create_replica_cluster(
def promote_cluster_to_primary(self, cluster_name: str, force: bool = False) -> None:
"""Promote a cluster to become the primary cluster on the cluster set."""
commands = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
"cs = dba.get_cluster_set()",
(
f"cs.force_primary_cluster('{cluster_name}')"
@@ -1481,7 +1512,7 @@ def promote_cluster_to_primary(self, cluster_name: str, force: bool = False) ->
def fence_writes(self) -> None:
"""Fence writes on the primary cluster."""
commands = (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
"c = dba.get_cluster()",
"c.fence_writes()",
)
@@ -1495,7 +1526,7 @@ def fence_writes(self) -> None:
def unfence_writes(self) -> None:
"""Unfence writes on the primary cluster and reset read_only flag."""
commands = (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
"c = dba.get_cluster()",
"c.unfence_writes()",
"session.run_sql('SET GLOBAL read_only=OFF')",
@@ -1527,7 +1558,7 @@ def is_cluster_in_cluster_set(self, cluster_name: str) -> Optional[bool]:
def cluster_metadata_exists(self, from_instance: str) -> bool:
"""Check if this cluster metadata exists on database."""
check_cluster_metadata_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{from_instance}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, from_instance)}')",
(
'result = session.run_sql("SELECT cluster_name FROM mysql_innodb_cluster_metadata'
f".clusters where cluster_name = '{self.cluster_name}';\")"
@@ -1537,7 +1568,7 @@ def cluster_metadata_exists(self, from_instance: str) -> bool:
try:
output = self._run_mysqlsh_script(
- "\n".join(check_cluster_metadata_commands), timeout=10
+ "\n".join(check_cluster_metadata_commands), timeout=60
)
except MySQLClientError:
logger.warning(f"Failed to check if cluster metadata exists {from_instance=}")
@@ -1548,7 +1579,7 @@ def cluster_metadata_exists(self, from_instance: str) -> bool:
def rejoin_cluster(self, cluster_name) -> None:
"""Try to rejoin a cluster to the cluster set."""
commands = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
"cs = dba.get_cluster_set()",
f"cs.rejoin_cluster('{cluster_name}')",
)
@@ -1564,7 +1595,7 @@ def rejoin_cluster(self, cluster_name) -> None:
def remove_replica_cluster(self, replica_cluster_name: str, force: bool = False) -> None:
"""Remove a replica cluster from the cluster-set."""
commands = [
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
"cs = dba.get_cluster_set()",
]
if force:
@@ -1632,11 +1663,9 @@ def add_instance_to_cluster(
):
raise MySQLLockAcquisitionError("Lock not acquired")
+ connect_instance = from_instance or self.instance_address
connect_commands = (
- (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}"
- f"@{from_instance or self.instance_address}')"
- ),
+ f"shell.connect('{self.instance_def(self.server_config_user, connect_instance)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
"shell.options['dba.restartWaitTimeout'] = 3600",
)
@@ -1680,7 +1709,7 @@ def is_instance_configured_for_innodb(
) -> bool:
"""Confirm if instance is configured for use in an InnoDB cluster."""
commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{instance_address}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, instance_address)}')",
"instance_configured = dba.check_instance_configuration()['status'] == 'ok'",
'print("INSTANCE_CONFIGURED" if instance_configured else "INSTANCE_NOT_CONFIGURED")',
)
@@ -1702,10 +1731,7 @@ def is_instance_configured_for_innodb(
def are_locks_acquired(self, from_instance: Optional[str] = None) -> bool:
"""Report if any topology change is being executed."""
commands = (
- (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}"
- f"@{from_instance or self.socket_uri}')"
- ),
+ f"shell.connect('{self.instance_def(self.server_config_user, from_instance)}')",
"result = session.run_sql(\"SELECT COUNT(*) FROM mysql.juju_units_operations WHERE status='in-progress';\")",
"print(f'{result.fetch_one()[0]}')",
)
@@ -1734,12 +1760,9 @@ def rescan_cluster(
options["addInstances"] = "auto"
rescan_cluster_commands = (
- (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@"
- f"{from_instance or self.socket_uri}')"
- ),
+ f"shell.connect('{self.instance_def(self.server_config_user, from_instance)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
- f"cluster.rescan({json.dumps(options)})",
+ f"cluster.rescan({options})",
)
try:
logger.debug("Rescanning cluster")
@@ -1751,7 +1774,7 @@ def rescan_cluster(
def is_instance_in_cluster(self, unit_label: str) -> bool:
"""Confirm if instance is in the cluster."""
commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
f"print(cluster.status()['defaultReplicaSet']['topology'].get('{unit_label}', {{}}).get('status', 'NOT_A_MEMBER'))",
)
@@ -1782,7 +1805,7 @@ def get_cluster_status(
"""Get the cluster status dictionary."""
options = {"extended": extended}
status_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{from_instance or self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, from_instance)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
f"print(cluster.status({options}))",
)
@@ -1800,7 +1823,7 @@ def get_cluster_set_status(
"""Get the cluster-set status dictionary."""
options = {"extended": extended}
status_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{from_instance or self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, from_instance)}')",
"cs = dba.get_cluster_set()",
f"print(cs.status({options}))",
)
@@ -1824,7 +1847,7 @@ def get_replica_cluster_status(self, replica_cluster_name: Optional[str] = None)
if not replica_cluster_name:
replica_cluster_name = self.cluster_name
status_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
"cs = dba.get_cluster_set()",
f"print(cs.status(extended=1)['clusters']['{replica_cluster_name}']['globalStatus'])",
)
@@ -1850,8 +1873,7 @@ def get_cluster_node_count(
f" WHERE member_state = '{node_status.value.upper()}'"
)
size_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}"
- f"@{from_instance or self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, from_instance)}')",
f'result = session.run_sql("{query}")',
'print(f"{result.fetch_one()[0]}")',
)
@@ -1895,6 +1917,8 @@ def _get_host_ip(host: str) -> str:
if self.is_cluster_replica():
# replica return global primary address
global_primary = self.get_cluster_set_global_primary_address()
+ if not global_primary:
+ raise MySQLGetClusterEndpointsError("Failed to get global primary address")
rw_endpoints = {_get_host_ip(global_primary) if get_ips else global_primary}
else:
rw_endpoints = {
@@ -1923,7 +1947,7 @@ def execute_remove_instance(
"force": "true" if force else "false",
}
remove_instance_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{connect_instance or self.instance_address}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, connect_instance)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
"cluster.remove_instance("
f"'{self.cluster_admin_user}@{self.instance_address}', {remove_instance_options})",
@@ -2036,7 +2060,7 @@ def dissolve_cluster(self) -> None:
"""Dissolve the cluster independently of the unit teardown process."""
logger.debug(f"Dissolving cluster {self.cluster_name}")
dissolve_cluster_commands = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
"cluster.dissolve({'force': 'true'})",
)
@@ -2049,9 +2073,15 @@ def _acquire_lock(self, primary_address: str, unit_label: str, lock_name: str) -
)
acquire_lock_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{primary_address}')",
- f"session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='{unit_label}', status='in-progress' WHERE task='{lock_name}' AND executor='';\")",
- f"acquired_lock = session.run_sql(\"SELECT count(*) FROM mysql.juju_units_operations WHERE task='{lock_name}' AND executor='{unit_label}';\").fetch_one()[0]",
+ f"shell.connect('{self.instance_def(self.server_config_user, host=primary_address)}')",
+ (
+ f"session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='{unit_label}',"
+ f" status='in-progress' WHERE task='{lock_name}' AND executor='';\")"
+ ),
+ (
+ 'acquired_lock = session.run_sql("SELECT count(*) FROM mysql.juju_units_operations'
+ f" WHERE task='{lock_name}' AND executor='{unit_label}';\").fetch_one()[0]"
+ ),
"print(f'{acquired_lock}')",
)
@@ -2068,23 +2098,32 @@ def _acquire_lock(self, primary_address: str, unit_label: str, lock_name: str) -
def _release_lock(self, primary_address: str, unit_label: str, lock_name: str) -> None:
"""Releases a lock in the mysql.juju_units_operations table."""
- logger.debug(f"Releasing lock {lock_name} on {primary_address} for unit {unit_label}")
+ logger.debug(f"Releasing {lock_name=} @{primary_address=} for {unit_label=}")
release_lock_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{primary_address}')",
- "session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='', status='not-started'"
+ f"shell.connect('{self.instance_def(self.server_config_user, host=primary_address)}')",
+ "r = session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='', status='not-started'"
f" WHERE task='{lock_name}' AND executor='{unit_label}';\")",
+ "print(r.get_affected_items_count())",
)
- self._run_mysqlsh_script("\n".join(release_lock_commands))
+ affected_rows = self._run_mysqlsh_script("\n".join(release_lock_commands))
+ if affected_rows:
+ if int(affected_rows) == 0:
+ logger.warning("No lock to release")
+ else:
+ logger.debug(f"{lock_name=} released for {unit_label=}")
def _get_cluster_member_addresses(self, exclude_unit_labels: List = []) -> Tuple[List, bool]:
"""Get the addresses of the cluster's members."""
logger.debug(f"Getting cluster member addresses, excluding units {exclude_unit_labels}")
get_cluster_members_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
- f"member_addresses = ','.join([member['address'] for label, member in cluster.status()['defaultReplicaSet']['topology'].items() if label not in {exclude_unit_labels}])",
+ (
+ "member_addresses = ','.join([member['address'] for label, member in "
+ f"cluster.status()['defaultReplicaSet']['topology'].items() if label not in {exclude_unit_labels}])"
+ ),
"print(f'{member_addresses}')",
)
@@ -2105,12 +2144,10 @@ def get_cluster_primary_address(
self, connect_instance_address: Optional[str] = None
) -> Optional[str]:
"""Get the cluster primary's address."""
- if not connect_instance_address:
- connect_instance_address = self.instance_address
- logger.debug(f"Getting cluster primary member's address from {connect_instance_address}")
+ logger.debug("Getting cluster primary member's address")
get_cluster_primary_commands = (
- f"shell.connect_to_primary('{self.cluster_admin_user}:{self.cluster_admin_password}@{connect_instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user, host=connect_instance_address)}')",
"primary_address = shell.parse_uri(session.uri)['host']",
"print(f'{primary_address}')",
)
@@ -2131,14 +2168,10 @@ def get_cluster_set_global_primary_address(
self, connect_instance_address: Optional[str] = None
) -> Optional[str]:
"""Get the cluster set global primary's address."""
- if not connect_instance_address:
- connect_instance_address = self.instance_address
- logger.debug(
- f"Getting cluster set global primary member's address from {connect_instance_address}"
- )
+ logger.debug("Getting cluster set global primary member's address")
get_cluster_set_global_primary_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{connect_instance_address}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, host=connect_instance_address)}')",
"cs = dba.get_cluster_set()",
"global_primary = cs.status()['globalPrimaryInstance']",
"print(f'{global_primary}')",
@@ -2154,7 +2187,12 @@ def get_cluster_set_global_primary_address(
if not matches:
return None
- return matches.group(1)
+ address = matches.group(1)
+ if ":" in address:
+ # strip port from address
+ address = address.split(":")[0]
+
+ return address
def get_primary_label(self) -> Optional[str]:
"""Get the label of the cluster's primary."""
@@ -2175,7 +2213,7 @@ def set_cluster_primary(self, new_primary_address: str) -> None:
logger.debug(f"Setting cluster primary to {new_primary_address}")
set_cluster_primary_commands = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
f"cluster.set_primary_instance('{new_primary_address}')",
)
@@ -2188,7 +2226,7 @@ def set_cluster_primary(self, new_primary_address: str) -> None:
def get_cluster_members_addresses(self) -> Optional[Iterable[str]]:
"""Get the addresses of the cluster's members."""
get_cluster_members_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
"members = ','.join((member['address'] for member in cluster.describe()['defaultReplicaSet']['topology']))",
"print(f'{members}')",
@@ -2210,8 +2248,7 @@ def get_cluster_members_addresses(self) -> Optional[Iterable[str]]:
def verify_server_upgradable(self, instance: Optional[str] = None) -> None:
"""Wrapper for API check_for_server_upgrade."""
check_command = [
- f"shell.connect('{self.server_config_user}"
- f":{self.server_config_password}@{instance or self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, host=instance)}')",
"try:",
" util.check_for_server_upgrade(options={'outputFormat': 'JSON'})",
"except ValueError:", # ValueError is raised for same version check
@@ -2244,7 +2281,7 @@ def get_mysql_version(self) -> Optional[str]:
logger.debug("Getting InnoDB version")
get_version_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
'result = session.run_sql("SELECT version()")',
'print(f"{result.fetch_one()[0]}")',
)
@@ -2267,8 +2304,11 @@ def grant_privileges_to_user(
) -> None:
"""Grants specified privileges to the provided database user."""
grant_privileges_commands = (
- f"shell.connect_to_primary('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
- f"session.run_sql(\"GRANT {', '.join(privileges)} ON *.* TO '{username}'@'{hostname}'{' WITH GRANT OPTION' if with_grant_option else ''}\")",
+ f"shell.connect_to_primary('{self.instance_def(self.server_config_user)}')",
+ (
+ f"session.run_sql(\"GRANT {', '.join(privileges)} ON *.* TO '{username}'@'{hostname}'"
+ f"{' WITH GRANT OPTION' if with_grant_option else ''}\")"
+ ),
)
try:
@@ -2279,26 +2319,22 @@ def grant_privileges_to_user(
def update_user_password(self, username: str, new_password: str, host: str = "%") -> None:
"""Updates user password in MySQL database."""
- logger.debug(f"Updating password for {username}.")
-
# password is set on the global primary
if not (instance_address := self.get_cluster_set_global_primary_address()):
raise MySQLCheckUserExistenceError("No primary found")
update_user_password_commands = (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{instance_address}')",
+ f"shell.connect('{self.instance_def(self.server_config_user, host=instance_address)}')",
f"session.run_sql(\"ALTER USER '{username}'@'{host}' IDENTIFIED BY '{new_password}';\")",
'session.run_sql("FLUSH PRIVILEGES;")',
)
+ logger.debug(f"Updating password for {username}.")
try:
self._run_mysqlsh_script("\n".join(update_user_password_commands))
- except MySQLClientError as e:
- logger.exception(
- f"Failed to update user password for user {username}",
- exc_info=e,
- )
- raise MySQLCheckUserExistenceError(e.message)
+ except MySQLClientError:
+ logger.exception(f"Failed to update user password for user {username}")
+ raise MySQLCheckUserExistenceError
@retry(reraise=True, stop=stop_after_attempt(3), wait=wait_fixed(GET_MEMBER_STATE_TIME))
def get_member_state(self) -> Tuple[str, str]:
@@ -2370,7 +2406,7 @@ def get_cluster_set_name(self, from_instance: Optional[str] = None) -> Optional[
def stop_group_replication(self) -> None:
"""Stop Group replication if enabled on the instance."""
stop_gr_command = (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
"data = session.run_sql('SELECT 1 FROM performance_schema.replication_group_members')",
"if len(data.fetch_all()) > 0:",
" session.run_sql('STOP GROUP_REPLICATION')",
@@ -2383,7 +2419,7 @@ def stop_group_replication(self) -> None:
def reboot_from_complete_outage(self) -> None:
"""Wrapper for reboot_cluster_from_complete_outage command."""
reboot_from_outage_command = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
f"dba.reboot_cluster_from_complete_outage('{self.cluster_name}')",
)
@@ -2414,8 +2450,8 @@ def set_instance_offline_mode(self, offline_mode: bool = False) -> None:
try:
self._run_mysqlcli_script(
"; ".join(set_instance_offline_mode_commands),
- user=self.cluster_admin_user,
- password=self.cluster_admin_password,
+ user=self.server_config_user,
+ password=self.server_config_password,
)
except MySQLClientError as e:
logger.exception(f"Failed to set instance state to offline_mode {mode}")
@@ -2424,7 +2460,7 @@ def set_instance_offline_mode(self, offline_mode: bool = False) -> None:
def set_instance_option(self, option: str, value: Any) -> None:
"""Sets an instance option."""
set_instance_option_commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
f"cluster = dba.get_cluster('{self.cluster_name}')",
f"cluster.set_instance_option('{self.instance_address}', '{option}', '{value}')",
)
@@ -2439,7 +2475,7 @@ def offline_mode_and_hidden_instance_exists(self) -> bool:
"""Indicates whether an instance exists in offline_mode and hidden from router."""
offline_mode_message = "Instance has offline_mode enabled"
commands = (
- f"shell.connect('{self.cluster_admin_user}:{self.cluster_admin_password}@{self.instance_address}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
f"cluster_topology = dba.get_cluster('{self.cluster_name}').status()['defaultReplicaSet']['topology']",
f"selected_instances = [label for label, member in cluster_topology.items() if '{offline_mode_message}' in member.get('instanceErrors', '') and member.get('hiddenFromRouter')]",
"print(f'{len(selected_instances)}')",
@@ -2628,9 +2664,9 @@ def delete_temp_backup_directory(
except MySQLExecError as e:
logger.exception("Failed to delete temp backup directory")
raise MySQLDeleteTempBackupDirectoryError(e.message)
- except Exception as e:
+ except Exception:
logger.exception("Failed to delete temp backup directory")
- raise MySQLDeleteTempBackupDirectoryError(e)
+ raise MySQLDeleteTempBackupDirectoryError
def retrieve_backup_with_xbcloud(
self,
@@ -2886,7 +2922,7 @@ def tls_setup(
def kill_unencrypted_sessions(self) -> None:
"""Kill non local, non system open unencrypted connections."""
kill_connections_command = (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
(
'processes = session.run_sql("'
"SELECT processlist_id FROM performance_schema.threads WHERE "
@@ -2906,7 +2942,7 @@ def kill_unencrypted_sessions(self) -> None:
def kill_client_sessions(self) -> None:
"""Kill non local, non system open unencrypted connections."""
kill_connections_command = (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
(
'processes = session.run_sql("'
"SELECT processlist_id FROM performance_schema.threads WHERE "
@@ -2926,7 +2962,7 @@ def kill_client_sessions(self) -> None:
def check_mysqlsh_connection(self) -> bool:
"""Checks if it is possible to connect to the server with mysqlsh."""
connect_commands = (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.instance_address}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
'session.run_sql("SELECT 1")',
)
@@ -2950,7 +2986,7 @@ def get_pid_of_port_3306(self) -> Optional[str]:
def flush_mysql_logs(self, logs_type: Union[MySQLTextLogs, list[MySQLTextLogs]]) -> None:
"""Flushes the specified logs_type logs."""
flush_logs_commands = [
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
'session.run_sql("SET sql_log_bin = 0")',
]
@@ -2975,7 +3011,7 @@ def flush_mysql_logs(self, logs_type: Union[MySQLTextLogs, list[MySQLTextLogs]])
def get_databases(self) -> set[str]:
"""Return a set with all databases on the server."""
list_databases_commands = (
- f"shell.connect('{self.server_config_user}:{self.server_config_password}@{self.socket_uri}')",
+ f"shell.connect('{self.instance_def(self.server_config_user)}')",
'result = session.run_sql("SHOW DATABASES")',
"for db in result.fetch_all():\n print(db[0])",
)
diff --git a/src/charm.py b/src/charm.py
index 770487cd9..fea61c42b 100755
--- a/src/charm.py
+++ b/src/charm.py
@@ -92,6 +92,7 @@
)
from flush_mysql_logs import FlushMySQLLogsCharmEvents, MySQLLogs
from hostname_resolution import MySQLMachineHostnameResolution
+from ip_address_observer import IPAddressChangeCharmEvents
from mysql_vm_helpers import (
MySQL,
MySQLCreateCustomMySQLDConfigError,
@@ -116,6 +117,10 @@ class MySQLDNotRestartedError(Error):
"""Exception raised when MySQLD is not restarted after configuring instance."""
+class MySQLCustomCharmEvents(FlushMySQLLogsCharmEvents, IPAddressChangeCharmEvents):
+ """Custom event sources for the charm."""
+
+
@trace_charm(
tracing_endpoint="tracing_endpoint",
extra_types=(
@@ -141,9 +146,7 @@ class MySQLOperatorCharm(MySQLCharmBase, TypedCharmBase[CharmConfig]):
"""Operator framework charm for MySQL."""
config_type = CharmConfig
- # FlushMySQLLogsCharmEvents needs to be defined on the charm object for logrotate
- # (which runs juju-run/juju-exec to dispatch a custom event from cron)
- on = FlushMySQLLogsCharmEvents() # type: ignore
+ on = MySQLCustomCharmEvents() # type: ignore
def __init__(self, *args):
super().__init__(*args)
diff --git a/src/config.py b/src/config.py
index 9ece1d0b6..94c2dd3f7 100644
--- a/src/config.py
+++ b/src/config.py
@@ -118,7 +118,8 @@ def experimental_max_connections_validator(cls, value: int) -> Optional[int]:
"""Check experimental max connections."""
if value < MAX_CONNECTIONS_FLOOR:
raise ValueError(
- f"experimental-max-connections must be greater than {MAX_CONNECTIONS_FLOOR}"
+ f"experimental-max-connections ({value=}) must be equal or greater "
+ + f" than {MAX_CONNECTIONS_FLOOR}"
)
return value
diff --git a/src/hostname_resolution.py b/src/hostname_resolution.py
index 99bea682a..0f906a931 100644
--- a/src/hostname_resolution.py
+++ b/src/hostname_resolution.py
@@ -39,7 +39,7 @@ def __init__(self, charm: "MySQLOperatorCharm"):
self.ip_address_observer = IPAddressObserver(charm)
self.framework.observe(self.charm.on.config_changed, self._update_host_details_in_databag)
- self.framework.observe(self.on.ip_address_change, self._update_host_details_in_databag)
+ self.framework.observe(self.charm.on.ip_address_change, self._on_ip_address_change)
self.framework.observe(self.charm.on[PEER].relation_changed, self.update_etc_hosts)
self.framework.observe(self.charm.on[PEER].relation_departed, self.update_etc_hosts)
@@ -63,6 +63,14 @@ def _update_host_details_in_databag(self, _) -> None:
self.charm.unit_peer_data[HOSTNAME_DETAILS] = json.dumps(host_details)
+ def _on_ip_address_change(self, _) -> None:
+ """Handle ip address changed.
+
+ admin_address is bound to previous IP, requiring mysqld restart.
+ """
+ self._update_host_details_in_databag(None)
+ self.charm._mysql.restart_mysqld()
+
def _get_host_details(self) -> list[HostsEntry]:
host_details = []
diff --git a/src/mysql_vm_helpers.py b/src/mysql_vm_helpers.py
index f2f39fd42..65e711b0b 100644
--- a/src/mysql_vm_helpers.py
+++ b/src/mysql_vm_helpers.py
@@ -383,7 +383,7 @@ def wait_until_mysql_connection(self, check_port: bool = True) -> None:
logger.debug("MySQL connection possible")
- def execute_backup_commands(
+ def execute_backup_commands( # type: ignore
self,
s3_directory: str,
s3_parameters: Dict[str, str],
@@ -402,7 +402,7 @@ def execute_backup_commands(
group=ROOT_SYSTEM_USER,
)
- def delete_temp_backup_directory(
+ def delete_temp_backup_directory( # type: ignore
self, from_directory: str = CHARMED_MYSQL_COMMON_DIRECTORY
) -> None:
"""Delete the temp backup directory."""
@@ -412,20 +412,25 @@ def delete_temp_backup_directory(
group=ROOT_SYSTEM_USER,
)
- def retrieve_backup_with_xbcloud(
+ def retrieve_backup_with_xbcloud( # type: ignore
self,
backup_id: str,
s3_parameters: Dict[str, str],
+ temp_restore_directory: str = CHARMED_MYSQL_COMMON_DIRECTORY,
+ xbcloud_location: str = CHARMED_MYSQL_XBCLOUD_LOCATION,
+ xbstream_location: str = CHARMED_MYSQL_XBSTREAM_LOCATION,
+ user=ROOT_SYSTEM_USER,
+ group=ROOT_SYSTEM_USER,
) -> Tuple[str, str, str]:
"""Retrieve the provided backup with xbcloud."""
return super().retrieve_backup_with_xbcloud(
backup_id,
s3_parameters,
- CHARMED_MYSQL_COMMON_DIRECTORY,
- CHARMED_MYSQL_XBCLOUD_LOCATION,
- CHARMED_MYSQL_XBSTREAM_LOCATION,
- user=ROOT_SYSTEM_USER,
- group=ROOT_SYSTEM_USER,
+ temp_restore_directory,
+ xbcloud_location,
+ xbstream_location,
+ user,
+ group,
)
def prepare_backup_for_restore(self, backup_location: str) -> Tuple[str, str]:
diff --git a/tests/integration/connector.py b/tests/integration/connector.py
index bd40c1cc8..0b6c58e07 100644
--- a/tests/integration/connector.py
+++ b/tests/integration/connector.py
@@ -2,6 +2,8 @@
# Copyright 2022 Canonical Ltd.
# See LICENSE file for licensing details.
+from time import sleep
+
import mysql.connector
@@ -37,3 +39,30 @@ def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.commit()
self.cursor.close()
self.connection.close()
+
+
+def create_db_connections(
+ num_connections: int, host: str, username: str, password: str, database: str
+) -> list[mysql.connector.MySQLConnection]:
+ """Create a list of database connections.
+
+ Args:
+ num_connections: Number of connections to create.
+ host: Hostname of the database.
+ username: Username to connect to the database.
+ password: Password to connect to the database.
+ database: Database to connect to.
+ """
+ connections = []
+ for _ in range(num_connections):
+ conn = mysql.connector.connect(
+ host=host,
+ user=username,
+ password=password,
+ database=database,
+ use_pure=True,
+ )
+ if conn.is_connected():
+ connections.append(conn)
+ sleep(0.5)
+ return connections
diff --git a/tests/integration/high_availability/high_availability_helpers.py b/tests/integration/high_availability/high_availability_helpers.py
index b73d93483..aab48cd90 100644
--- a/tests/integration/high_availability/high_availability_helpers.py
+++ b/tests/integration/high_availability/high_availability_helpers.py
@@ -355,37 +355,18 @@ async def ensure_all_units_continuous_writes_incrementing(
ops_test, primary, server_config_credentials
)
- select_all_continuous_writes_sql = [f"SELECT * FROM `{DATABASE_NAME}`.`{TABLE_NAME}`"]
-
- async with ops_test.fast_forward():
- for unit in mysql_units:
- for attempt in Retrying(
- reraise=True, stop=stop_after_delay(5 * 60), wait=wait_fixed(10)
- ):
- with attempt:
- # ensure that all units are up to date (including the previous primary)
- unit_address = await get_unit_ip(ops_test, unit.name)
-
+ async with ops_test.fast_forward(fast_interval="15s"):
+ for attempt in Retrying(reraise=True, stop=stop_after_delay(5 * 60), wait=wait_fixed(10)):
+ with attempt:
+ # ensure that all units are up to date (including the previous primary)
+ for unit in mysql_units:
# ensure the max written value is incrementing (continuous writes is active)
max_written_value = await get_max_written_value_in_database(
ops_test, unit, server_config_credentials
)
+ logger.info(f"{max_written_value=} on unit {unit.name}")
assert (
max_written_value > last_max_written_value
), "Continuous writes not incrementing"
- # ensure that the unit contains all values up to the max written value
- all_written_values = set(
- await execute_queries_on_unit(
- unit_address,
- server_config_credentials["username"],
- server_config_credentials["password"],
- select_all_continuous_writes_sql,
- )
- )
- numbers = set(range(1, max_written_value))
- assert (
- numbers <= all_written_values
- ), f"Missing numbers in database for unit {unit.name}"
-
last_max_written_value = max_written_value
diff --git a/tests/integration/high_availability/test_self_healing.py b/tests/integration/high_availability/test_self_healing.py
index 6c756f129..c34ee5287 100644
--- a/tests/integration/high_availability/test_self_healing.py
+++ b/tests/integration/high_availability/test_self_healing.py
@@ -215,7 +215,7 @@ async def test_network_cut(ops_test: OpsTest, highly_available_cluster, continuo
lambda: primary_unit.workload_status == "active", timeout=40 * 60
)
- await ensure_all_units_continuous_writes_incrementing(ops_test)
+ await ensure_all_units_continuous_writes_incrementing(ops_test)
# ensure that we are able to insert data into the primary and have it replicated to all units
database_name, table_name = "test-network-cut", "data"
diff --git a/tests/integration/test_saturate_max_connections.py b/tests/integration/test_saturate_max_connections.py
new file mode 100644
index 000000000..6603b0966
--- /dev/null
+++ b/tests/integration/test_saturate_max_connections.py
@@ -0,0 +1,95 @@
+# Copyright 2024 Canonical Ltd.
+# See LICENSE file for licensing details.
+
+import logging
+
+import pytest
+from mysql.connector.errors import OperationalError
+from pytest_operator.plugin import OpsTest
+
+from .connector import create_db_connections
+from .helpers import get_unit_ip
+from .juju_ import run_action
+
+logger = logging.getLogger(__name__)
+
+MYSQL_APP_NAME = "mysql"
+TEST_APP_NAME = "app"
+CONNECTIONS = 10
+
+
+@pytest.mark.group(1)
+@pytest.mark.abort_on_fail
+async def test_build_and_deploy(ops_test: OpsTest) -> None:
+ """Build the charm and deploy 1 units to ensure a cluster is formed."""
+ charm = await ops_test.build_charm(".")
+ config = {"profile-limit-memory": "2000", "experimental-max-connections": CONNECTIONS}
+
+ await ops_test.model.deploy(
+ charm,
+ application_name=MYSQL_APP_NAME,
+ config=config,
+ num_units=1,
+ base="ubuntu@22.04",
+ )
+
+
+@pytest.mark.group(1)
+@pytest.mark.abort_on_fail
+async def test_deploy_and_relate_test_app(ops_test: OpsTest) -> None:
+ config = {"auto_start_writes": False, "sleep_interval": "500"}
+ logger.info("Deploying test app")
+ await ops_test.model.deploy(
+ "mysql-test-app",
+ application_name=TEST_APP_NAME,
+ num_units=1,
+ base="ubuntu@22.04",
+ config=config,
+ channel="latest/edge",
+ )
+
+ logger.info("Relating test app to mysql")
+ await ops_test.model.relate(MYSQL_APP_NAME, f"{TEST_APP_NAME}:database")
+
+ logger.info("Waiting all to be active")
+ await ops_test.model.block_until(
+ lambda: all(unit.workload_status == "active" for unit in ops_test.model.units.values()),
+ timeout=60 * 10,
+ wait_period=5,
+ )
+
+
+@pytest.mark.group(1)
+@pytest.mark.abort_on_fail
+async def test_saturate_max_connections(ops_test: OpsTest) -> None:
+ app_unit = ops_test.model.applications[TEST_APP_NAME].units[0]
+ mysql_unit = ops_test.model.applications[MYSQL_APP_NAME].units[0]
+
+ host_ip = await get_unit_ip(ops_test, mysql_unit.name)
+ logger.info("Running action to get app connection data")
+ credentials = await run_action(app_unit, "get-client-connection-data")
+ if "return-code" in credentials:
+ # juju 2.9 dont have the return-code key
+ del credentials["return-code"]
+ if "Code" in credentials:
+ del credentials["Code"]
+ credentials["host"] = host_ip
+
+ logger.info(f"Creating {CONNECTIONS} connections")
+ connections = create_db_connections(CONNECTIONS, **credentials)
+ assert isinstance(connections, list), "Connections not created"
+
+ logger.info("Ensure all connections are established")
+ for conn in connections:
+ assert conn.is_connected(), "Connection failed to establish"
+
+ assert len(connections) == CONNECTIONS, "Not all connections were established"
+
+ logger.info("Ensure no more client connections are possible")
+
+ with pytest.raises(OperationalError):
+ # exception raised when too many connections are attempted
+ create_db_connections(1, **credentials)
+
+ logger.info("Get cluster status while connections are saturated")
+ _ = await run_action(mysql_unit, "get-cluster-status")
diff --git a/tests/unit/test_mysql.py b/tests/unit/test_mysql.py
index 73b2f4ee0..8a7f58f7d 100644
--- a/tests/unit/test_mysql.py
+++ b/tests/unit/test_mysql.py
@@ -195,12 +195,12 @@ def test_configure_mysqlrouter_user(self, _run_mysqlsh_script):
_run_mysqlsh_script.return_value = ""
_expected_create_mysqlrouter_user_commands = "\n".join((
- "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"session.run_sql(\"CREATE USER 'test_username'@'1.1.1.1' IDENTIFIED BY 'test_password' ATTRIBUTE '{\\\"unit_name\\\": \\\"app/0\\\"}';\")",
))
_expected_mysqlrouter_user_grant_commands = "\n".join((
- "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"session.run_sql(\"GRANT CREATE USER ON *.* TO 'test_username'@'1.1.1.1' WITH GRANT OPTION;\")",
"session.run_sql(\"GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON mysql_innodb_cluster_metadata.* TO 'test_username'@'1.1.1.1';\")",
"session.run_sql(\"GRANT SELECT ON mysql.user TO 'test_username'@'1.1.1.1';\")",
@@ -241,7 +241,7 @@ def test_create_application_database_and_scoped_user(self, _run_mysqlsh_script):
_run_mysqlsh_script.return_value = ""
_expected_create_scoped_user_commands = "\n".join((
- "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
'session.run_sql("CREATE DATABASE IF NOT EXISTS `test-database`;")',
'session.run_sql("CREATE USER `test-username`@`1.1.1.1` IDENTIFIED BY \'test-password\' ATTRIBUTE \'{\\"unit_name\\": \\"app/0\\"}\';")',
'session.run_sql("GRANT USAGE ON *.* TO `test-username`@`1.1.1.1`;")',
@@ -278,8 +278,8 @@ def test_configure_instance(self, _wait_until_mysql_connection, _run_mysqlsh_scr
"""Test a successful execution of configure_instance."""
# Test with create_cluster_admin=False
configure_instance_commands = [
- f"dba.configure_instance('serverconfig:serverconfigpassword@{self.mysql.socket_uri}', ",
- '{"restart": "true"})',
+ "dba.configure_instance('serverconfig:serverconfigpassword@127.0.0.1:33062', ",
+ "{'restart': 'true'})",
]
self.mysql.configure_instance(create_cluster_admin=False)
@@ -290,8 +290,8 @@ def test_configure_instance(self, _wait_until_mysql_connection, _run_mysqlsh_scr
# Test with create_cluster_admin=True
configure_instance_commands[1] = (
- '{"restart": "true", '
- '"clusterAdmin": "clusteradmin", "clusterAdminPassword": "clusteradminpassword"})'
+ "{'restart': 'true', "
+ "'clusterAdmin': 'clusteradmin', 'clusterAdminPassword': 'clusteradminpassword'})"
)
self.mysql.configure_instance(create_cluster_admin=True)
@@ -351,8 +351,8 @@ def test_initialize_juju_units_operations_table_exception(self, _run_mysqlcli_sc
def test_create_cluster(self, _run_mysqlsh_script):
"""Test a successful execution of create_cluster."""
create_cluster_commands = (
- "shell.connect('serverconfig:serverconfigpassword@127.0.0.1')",
- 'cluster = dba.create_cluster(\'test_cluster\', {"communicationStack": "MySQL"})',
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
+ "cluster = dba.create_cluster('test_cluster', {'communicationStack': 'MySQL'})",
"cluster.set_instance_option('127.0.0.1', 'label', 'mysql-0')",
)
@@ -372,7 +372,7 @@ def test_create_cluster_exceptions(self, _run_mysqlsh_script):
def test_create_cluster_set(self, _run_mysqlsh_script):
"""Test a successful execution of create_cluster."""
create_cluster_commands = (
- "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cluster = dba.get_cluster('test_cluster')",
"cluster.create_cluster_set('test_cluster_set')",
)
@@ -395,7 +395,7 @@ def test_create_cluster_set_exceptions(self, _run_mysqlsh_script):
def test_add_instance_to_cluster(self, _run_mysqlsh_script, _acquire_lock, _release_lock):
"""Test a successful execution of create_cluster."""
add_instance_to_cluster_commands = (
- "shell.connect('clusteradmin:clusteradminpassword@127.0.0.1')\n"
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')\n"
"cluster = dba.get_cluster('test_cluster')\n"
"shell.options['dba.restartWaitTimeout'] = 3600\n"
"cluster.add_instance('clusteradmin@127.0.0.2', {'password': 'clusteradminpassword',"
@@ -434,7 +434,7 @@ def test_is_instance_configured_for_innodb(self, _run_mysqlsh_script):
"""Test with no exceptions while calling the is_instance_configured_for_innodb method."""
# test successfully configured instance
check_instance_configuration_commands = (
- "shell.connect('clusteradmin:clusteradminpassword@127.0.0.2')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.2:33062')",
"instance_configured = dba.check_instance_configuration()['status'] == 'ok'",
'print("INSTANCE_CONFIGURED" if instance_configured else "INSTANCE_NOT_CONFIGURED")',
)
@@ -469,7 +469,7 @@ def test_is_instance_configured_for_innodb_exceptions(self, _run_mysqlsh_script)
_run_mysqlsh_script.side_effect = MySQLClientError("Error on subprocess")
check_instance_configuration_commands = (
- "shell.connect('clusteradmin:clusteradminpassword@127.0.0.2')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.2:33062')",
"instance_configured = dba.check_instance_configuration()['status'] == 'ok'",
'print("INSTANCE_CONFIGURED" if instance_configured else "INSTANCE_NOT_CONFIGURED")',
)
@@ -486,7 +486,7 @@ def test_is_instance_configured_for_innodb_exceptions(self, _run_mysqlsh_script)
@patch("charms.mysql.v0.mysql.MySQLBase._run_mysqlsh_script")
def test_execute_remove_instance(self, _run_mysqlsh_script):
expected_remove_instance_commands = (
- "shell.connect('clusteradmin:clusteradminpassword@1.2.3.4')\n"
+ "shell.connect('serverconfig:serverconfigpassword@1.2.3.4:33062')\n"
"cluster = dba.get_cluster('test_cluster')\n"
"cluster.remove_instance('clusteradmin@127.0.0.1', "
"{'password': 'clusteradminpassword', 'force': 'false'})"
@@ -520,7 +520,7 @@ def test_remove_primary_instance(
self.mysql.remove_instance("mysql-0")
expected_remove_instance_commands = (
- "shell.connect('clusteradmin:clusteradminpassword@127.0.0.1')\n"
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')\n"
"cluster = dba.get_cluster('test_cluster')\n"
"cluster.remove_instance('clusteradmin@127.0.0.1', "
"{'password': 'clusteradminpassword', 'force': 'true'})"
@@ -587,7 +587,7 @@ def test_remove_primary_instance_error_releasing_lock(
self.mysql.remove_instance("mysql-0")
expected_remove_instance_commands = (
- "shell.connect('clusteradmin:clusteradminpassword@127.0.0.1')\n"
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')\n"
"cluster = dba.get_cluster('test_cluster')\n"
"cluster.remove_instance('clusteradmin@127.0.0.1', "
"{'password': 'clusteradminpassword', 'force': 'true'})"
@@ -609,7 +609,7 @@ def test_acquire_lock(self, _run_mysqlsh_script):
self.assertTrue(acquired_lock)
expected_acquire_lock_commands = "\n".join([
- "shell.connect('clusteradmin:clusteradminpassword@1.1.1.1')",
+ "shell.connect('serverconfig:serverconfigpassword@1.1.1.1:33062')",
"session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='mysql-0', status='in-progress' WHERE task='unit-teardown' AND executor='';\")",
"acquired_lock = session.run_sql(\"SELECT count(*) FROM mysql.juju_units_operations WHERE task='unit-teardown' AND executor='mysql-0';\").fetch_one()[0]",
"print(f'{acquired_lock}')",
@@ -640,8 +640,9 @@ def test_release_lock(self, _run_mysqlsh_script):
self.mysql._release_lock("2.2.2.2", "mysql-0", "unit-teardown")
expected_release_lock_commands = "\n".join([
- "shell.connect('clusteradmin:clusteradminpassword@2.2.2.2')",
- "session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='', status='not-started' WHERE task='unit-teardown' AND executor='mysql-0';\")",
+ "shell.connect('serverconfig:serverconfigpassword@2.2.2.2:33062')",
+ "r = session.run_sql(\"UPDATE mysql.juju_units_operations SET executor='', status='not-started' WHERE task='unit-teardown' AND executor='mysql-0';\")",
+ "print(r.get_affected_items_count())",
])
_run_mysqlsh_script.assert_called_once_with(expected_release_lock_commands)
@@ -658,7 +659,7 @@ def test_get_cluster_member_addresses(self, _run_mysqlsh_script):
self.assertTrue(valid)
expected_commands = "\n".join([
- f"shell.connect('clusteradmin:clusteradminpassword@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cluster = dba.get_cluster('test_cluster')",
"member_addresses = ','.join([member['address'] for label, member in cluster.status()['defaultReplicaSet']['topology'].items() if label not in ['mysql-0']])",
"print(f'{member_addresses}')",
@@ -699,7 +700,7 @@ def test_get_cluster_primary_address(self, _run_mysqlsh_script):
self.assertEqual(primary_address, "1.1.1.1")
expected_commands = "\n".join([
- "shell.connect_to_primary('clusteradmin:clusteradminpassword@127.0.0.1')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"primary_address = shell.parse_uri(session.uri)['host']",
"print(f'{primary_address}')",
])
@@ -719,7 +720,7 @@ def test_no_match_cluster_primary_address_with_connect_instance_address(
self.assertIsNone(primary_address)
expected_commands = "\n".join([
- "shell.connect_to_primary('clusteradmin:clusteradminpassword@127.0.0.2')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.2:33062')",
"primary_address = shell.parse_uri(session.uri)['host']",
"print(f'{primary_address}')",
])
@@ -734,7 +735,7 @@ def test_is_instance_in_cluster(self, _run_mysqlsh_script):
self.assertTrue(result)
expected_commands = "\n".join([
- f"shell.connect('clusteradmin:clusteradminpassword@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cluster = dba.get_cluster('test_cluster')",
"print(cluster.status()['defaultReplicaSet']['topology'].get('mysql-0', {}).get('status', 'NOT_A_MEMBER'))",
])
@@ -760,7 +761,7 @@ def test_get_cluster_status(self, _run_mysqlsh_script):
self.mysql.get_cluster_status()
expected_commands = "\n".join((
- f"shell.connect('clusteradmin:clusteradminpassword@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cluster = dba.get_cluster('test_cluster')",
"print(cluster.status({'extended': False}))",
))
@@ -780,7 +781,7 @@ def test_rescan_cluster(self, _run_mysqlsh_script):
"""Test a successful execution of rescan_cluster()."""
self.mysql.rescan_cluster()
expected_commands = "\n".join((
- f"shell.connect('clusteradmin:clusteradminpassword@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cluster = dba.get_cluster('test_cluster')",
"cluster.rescan({})",
))
@@ -790,7 +791,7 @@ def test_rescan_cluster(self, _run_mysqlsh_script):
def test_set_instance_option(self, _run_mysqlsh_script):
"""Test execution of set_instance_option()."""
expected_commands = "\n".join((
- f"shell.connect('{self.mysql.cluster_admin_user}:{self.mysql.cluster_admin_password}@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
f"cluster = dba.get_cluster('{self.mysql.cluster_name}')",
f"cluster.set_instance_option('{self.mysql.instance_address}', 'label', 'label-0')",
))
@@ -868,10 +869,7 @@ def test_delete_users_for_relation_failure(
def test_delete_user(self, _run_mysqlsh_script):
"""Test delete_user() method."""
expected_commands = "\n".join((
- (
- f"shell.connect_to_primary('{self.mysql.server_config_user}:"
- f"{self.mysql.server_config_password}@{self.mysql.instance_address}')"
- ),
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"session.run_sql(\"DROP USER `testuser`@'%'\")",
))
self.mysql.delete_user("testuser")
@@ -888,7 +886,7 @@ def test_get_mysql_version(self, _run_mysqlsh_script):
version = self.mysql.get_mysql_version()
expected_commands = "\n".join((
- f"shell.connect('clusteradmin:clusteradminpassword@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
'result = session.run_sql("SELECT version()")',
'print(f"{result.fetch_one()[0]}")',
))
@@ -905,7 +903,7 @@ def test_get_mysql_version(self, _run_mysqlsh_script):
def test_grant_privileges_to_user(self, _run_mysqlsh_script):
"""Test the successful execution of grant_privileges_to_user."""
expected_commands = "\n".join((
- "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"session.run_sql(\"GRANT CREATE USER ON *.* TO 'test_user'@'%' WITH GRANT OPTION\")",
))
@@ -918,7 +916,7 @@ def test_grant_privileges_to_user(self, _run_mysqlsh_script):
_run_mysqlsh_script.reset_mock()
expected_commands = "\n".join((
- "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"session.run_sql(\"GRANT SELECT, UPDATE ON *.* TO 'test_user'@'%'\")",
))
@@ -945,7 +943,7 @@ def test_get_cluster_endpoints(self, _, _is_cluster_replica):
def test_cluster_metadata_exists(self, _run_mysqlsh_script):
"""Test cluster_metadata_exists method."""
commands = "\n".join((
- "shell.connect('clusteradmin:clusteradminpassword@1.2.3.4')",
+ "shell.connect('serverconfig:serverconfigpassword@1.2.3.4:33062')",
(
'result = session.run_sql("SELECT cluster_name FROM mysql_innodb_cluster_metadata'
f".clusters where cluster_name = '{self.mysql.cluster_name}';\")"
@@ -956,7 +954,7 @@ def test_cluster_metadata_exists(self, _run_mysqlsh_script):
_run_mysqlsh_script.return_value = "True\n"
self.assertTrue(self.mysql.cluster_metadata_exists("1.2.3.4"))
- _run_mysqlsh_script.assert_called_once_with(commands, timeout=10)
+ _run_mysqlsh_script.assert_called_once_with(commands, timeout=60)
_run_mysqlsh_script.reset_mock()
_run_mysqlsh_script.side_effect = MySQLClientError
@@ -966,7 +964,7 @@ def test_cluster_metadata_exists(self, _run_mysqlsh_script):
def test_offline_mode_and_hidden_instance_exists(self, _run_mysqlsh_script):
"""Test the offline_mode_and_hidden_instance_exists() method."""
commands = (
- "shell.connect('clusteradmin:clusteradminpassword@127.0.0.1')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cluster_topology = dba.get_cluster('test_cluster').status()['defaultReplicaSet']['topology']",
"selected_instances = [label for label, member in cluster_topology.items() if 'Instance has offline_mode enabled' in member.get('instanceErrors', '') and member.get('hiddenFromRouter')]",
"print(f'{len(selected_instances)}')",
@@ -1605,7 +1603,7 @@ def test_tls_restore_deafult(self, _run_mysqlcli_script):
def test_kill_unencrypted_sessions(self, _run_mysqlsh_script):
"""Test kill non TLS connections."""
commands = (
- f"shell.connect('{self.mysql.server_config_user}:{self.mysql.server_config_password}@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
(
'processes = session.run_sql("'
"SELECT processlist_id FROM performance_schema.threads WHERE "
@@ -1624,7 +1622,7 @@ def test_kill_unencrypted_sessions(self, _run_mysqlsh_script):
def test_are_locks_acquired(self, _run_mysqlsh_script):
"""Test are_locks_acquired."""
commands = (
- f"shell.connect('{self.mysql.server_config_user}:{self.mysql.server_config_password}@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"result = session.run_sql(\"SELECT COUNT(*) FROM mysql.juju_units_operations WHERE status='in-progress';\")",
"print(f'{result.fetch_one()[0]}')",
)
@@ -1636,7 +1634,7 @@ def test_are_locks_acquired(self, _run_mysqlsh_script):
def test_get_mysql_user_for_unit(self, _run_mysqlsh_script):
"""Test get_mysql_user_for_unit."""
commands = (
- f"shell.connect('{self.mysql.server_config_user}:{self.mysql.server_config_password}@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"result = session.run_sql(\"SELECT USER, ATTRIBUTE->>'$.router_id' FROM "
"INFORMATION_SCHEMA.USER_ATTRIBUTES WHERE ATTRIBUTE->'$.created_by_user'='relation-1' AND"
" ATTRIBUTE->'$.created_by_juju_unit'='mysql-router-k8s/0'\")",
@@ -1661,10 +1659,7 @@ def test_get_mysql_user_for_unit(self, _run_mysqlsh_script):
def test_remove_router_from_cluster_metadata(self, _run_mysqlsh_script):
"""Test remove_user_from_cluster_metadata."""
commands = (
- (
- f"shell.connect_to_primary('{self.mysql.cluster_admin_user}:{self.mysql.cluster_admin_password}@"
- f"{self.mysql.instance_address}')"
- ),
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cluster = dba.get_cluster()",
'cluster.remove_router_metadata("1")',
)
@@ -1681,14 +1676,14 @@ def test_remove_router_from_cluster_metadata(self, _run_mysqlsh_script):
def test_set_dynamic_variables(self, _run_mysqlsh_script):
"""Test dynamic_variables."""
commands = (
- f"shell.connect('{self.mysql.server_config_user}:{self.mysql.server_config_password}@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
'session.run_sql("SET GLOBAL variable=value")',
)
self.mysql.set_dynamic_variable(variable="variable", value="value")
_run_mysqlsh_script.assert_called_with("\n".join(commands))
commands = (
- f"shell.connect('{self.mysql.server_config_user}:{self.mysql.server_config_password}@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
'session.run_sql("SET GLOBAL variable=`/a/path/value`")',
)
self.mysql.set_dynamic_variable(variable="variable", value="/a/path/value")
@@ -1717,7 +1712,7 @@ def test_get_variable_value(self, _run_mysqlsh_script):
def test_set_cluster_primary(self, _run_mysqlsh_script):
"""Test set_cluster_primary."""
commands = (
- f"shell.connect_to_primary('{self.mysql.server_config_user}:{self.mysql.server_config_password}@127.0.0.1')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cluster = dba.get_cluster('test_cluster')",
"cluster.set_primary_instance('test')",
)
@@ -1733,7 +1728,7 @@ def test_set_cluster_primary(self, _run_mysqlsh_script):
def test_verify_server_upgradable(self, _run_mysqlsh_script):
"""Test is_server_upgradable."""
commands = (
- f"shell.connect('{self.mysql.server_config_user}:{self.mysql.server_config_password}@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"try:\n util.check_for_server_upgrade(options={'outputFormat': 'JSON'})",
"except ValueError:",
" if session.run_sql('select @@version').fetch_all()[0][0].split('-')[0] == shell.version.split()[1]:",
@@ -1809,6 +1804,7 @@ def test_render_mysqld_configuration(self, _get_available_memory):
expected_config = {
"bind-address": "0.0.0.0",
"mysqlx-bind-address": "0.0.0.0",
+ "admin_address": "127.0.0.1",
"report_host": "127.0.0.1",
"max_connections": "724",
"innodb_buffer_pool_size": "23219666944",
@@ -1905,7 +1901,7 @@ def test_create_replica_cluster(self, _run_mysqlsh_script):
"communicationStack": "MySQL",
}
commands = (
- f"shell.connect_to_primary('{self.mysql.server_config_user}:{self.mysql.server_config_password}@{self.mysql.instance_address}')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cs = dba.get_cluster_set()",
f"repl_cluster = cs.create_replica_cluster('{endpoint}','{replica_cluster_name}', {options})",
f"repl_cluster.set_instance_option('{endpoint}', 'label', '{instance_label}')",
@@ -1929,7 +1925,7 @@ def test_create_replica_cluster(self, _run_mysqlsh_script):
options["recoveryMethod"] = "clone"
commands2 = (
- f"shell.connect_to_primary('{self.mysql.server_config_user}:{self.mysql.server_config_password}@{self.mysql.instance_address}')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cs = dba.get_cluster_set()",
f"repl_cluster = cs.create_replica_cluster('{endpoint}','{replica_cluster_name}', {options})",
f"repl_cluster.set_instance_option('{endpoint}', 'label', '{instance_label}')",
@@ -1944,7 +1940,7 @@ def test_remove_replica_cluster(self, _run_mysqlsh_script):
"""Test remove_replica_cluster."""
replica_cluster_name = "replica_cluster"
commands = [
- f"shell.connect_to_primary('{self.mysql.server_config_user}:{self.mysql.server_config_password}@{self.mysql.instance_address}')",
+ "shell.connect_to_primary('serverconfig:serverconfigpassword@127.0.0.1:33062')",
"cs = dba.get_cluster_set()",
f"cs.remove_cluster('{replica_cluster_name}')",
]
@@ -1990,8 +1986,7 @@ def test_get_cluster_node_count(self, _run_mysqlsh_script):
" WHERE member_state = 'ONLINE'"
)
commands = (
- f"shell.connect('{self.mysql.cluster_admin_user}:{self.mysql.cluster_admin_password}"
- f"@{self.mysql.socket_uri}')",
+ "shell.connect('serverconfig:serverconfigpassword@127.0.0.1:33062')",
f'result = session.run_sql("{query}")',
'print(f"{result.fetch_one()[0]}")',
)
diff --git a/tests/unit/test_mysqlsh_helpers.py b/tests/unit/test_mysqlsh_helpers.py
index fbb6e3c3d..3d030e37d 100644
--- a/tests/unit/test_mysqlsh_helpers.py
+++ b/tests/unit/test_mysqlsh_helpers.py
@@ -283,6 +283,7 @@ def test_write_mysqld_config(
"[mysqld]",
"bind-address = 0.0.0.0",
"mysqlx-bind-address = 0.0.0.0",
+ "admin_address = 127.0.0.1",
"report_host = 127.0.0.1",
"max_connections = 111",
"innodb_buffer_pool_size = 1234",
@@ -325,6 +326,7 @@ def test_write_mysqld_config(
"[mysqld]",
"bind-address = 0.0.0.0",
"mysqlx-bind-address = 0.0.0.0",
+ "admin_address = 127.0.0.1",
"report_host = 127.0.0.1",
"max_connections = 100",
"innodb_buffer_pool_size = 20971520",