diff --git a/helm-chart/kube-hpa-scale-to-zero/Chart.yaml b/helm-chart/kube-hpa-scale-to-zero/Chart.yaml index a50d78d..2560209 100644 --- a/helm-chart/kube-hpa-scale-to-zero/Chart.yaml +++ b/helm-chart/kube-hpa-scale-to-zero/Chart.yaml @@ -2,5 +2,5 @@ apiVersion: v2 name: kube-hpa-scale-to-zero description: See https://github.com/machine424/kube-hpa-scale-to-zero type: application -version: 0.3.0 -appVersion: "0.3.0" +version: 0.4.0 +appVersion: "0.4.0" diff --git a/helm-chart/kube-hpa-scale-to-zero/templates/rbac.yaml b/helm-chart/kube-hpa-scale-to-zero/templates/rbac.yaml index b96f8ab..f6a7bfa 100644 --- a/helm-chart/kube-hpa-scale-to-zero/templates/rbac.yaml +++ b/helm-chart/kube-hpa-scale-to-zero/templates/rbac.yaml @@ -10,6 +10,9 @@ rules: - apiGroups: ["apps"] resources: ["deployments/scale"] verbs: ["get", "patch"] + - apiGroups: ["apps"] + resources: ["statefulsets/scale"] + verbs: ["get", "patch"] - apiGroups: ["autoscaling"] resources: ["horizontalpodautoscalers"] verbs: ["get", "list", "watch"] diff --git a/main.py b/main.py index 6160744..a496ce5 100644 --- a/main.py +++ b/main.py @@ -150,8 +150,14 @@ def update_target(hpa: HPA) -> None: name=hpa.target_name, needed_replicas=needed_replicas, ) + case "StatefulSet": + scale_statefulset( + namespace=hpa.namespace, + name=hpa.target_name, + needed_replicas=needed_replicas, + ) case _: - raise ValueError("Only support Deployment as HPA target for now.") + raise ValueError(f"Target kind {hpa.target_kind} not supported.") def scaling_is_needed(*, current_replicas, needed_replicas) -> bool: @@ -180,6 +186,24 @@ def scale_deployment(*, namespace, name, needed_replicas) -> None: LOGGER.warning(f"Deployment {namespace}/{name} was not found.") +def scale_statefulset(*, namespace, name, needed_replicas) -> None: + try: + scale = APP_V1.read_namespaced_stateful_set_scale(namespace=namespace, name=name) + current_replicas = scale.status.replicas + if not scaling_is_needed(current_replicas=current_replicas, needed_replicas=needed_replicas): + LOGGER.info(f"No need to scale statefulset {namespace}/{name} {current_replicas=} {needed_replicas=}.") + return + + scale.spec.replicas = needed_replicas + # Maybe do not scale immediately? but don't want to reimplement an HPA. + APP_V1.patch_namespaced_stateful_set_scale(namespace=namespace, name=name, body=scale) + LOGGER.info(f"StatefulSet {namespace}/{name} was scaled {current_replicas=}->{needed_replicas=}.") + except kubernetes.client.exceptions.ApiException as exc: + if exc.status != 404: + raise exc + LOGGER.warning(f"StatefulSet {namespace}/{name} was not found.") + + def parse_cli_args(): parser = argparse.ArgumentParser( description="kube-hpa-scale-to-zero. Check https://github.com/machine424/kube-hpa-scale-to-zero" diff --git a/tests/e2e_test.py b/tests/e2e_test.py index 250b635..6864bdd 100644 --- a/tests/e2e_test.py +++ b/tests/e2e_test.py @@ -43,6 +43,7 @@ def setup(): "prometheus-community/prometheus", "--values", f"{MANIFESTS_PATH}/prometheus-values.yaml", + "--wait", ] ) run( @@ -57,32 +58,20 @@ def setup(): ] ) - run(command=["kubectl", "apply", "-f", f"{MANIFESTS_PATH}/metrics-generator.yaml"]) + run(command=["kubectl", "apply", "-f", f"{MANIFESTS_PATH}/metrics-generator.yaml", "--wait=true"]) yield finally: - run( - command=[ - "helm", - "delete", - "prometheus", - ] - ) - run( - command=[ - "helm", - "delete", - "prometheus-adapter", - ] - ) - run(command=["kubectl", "delete", "-f", f"{MANIFESTS_PATH}/metrics-generator.yaml"]) + run(command=["helm", "delete", "prometheus", "--wait"]) + run(command=["helm", "delete", "prometheus-adapter", "--wait"]) + run(command=["kubectl", "delete", "-f", f"{MANIFESTS_PATH}/metrics-generator.yaml", "--wait=true"]) def deploy_target(manifest: str): - run(command=["kubectl", "apply", "-f", f"{MANIFESTS_PATH}/{manifest}"]) + run(command=["kubectl", "apply", "-f", f"{MANIFESTS_PATH}/{manifest}", "--wait=true"]) def delete_target(manifest: str): - run(command=["kubectl", "delete", "-f", f"{MANIFESTS_PATH}/{manifest}"]) + run(command=["kubectl", "delete", "-f", f"{MANIFESTS_PATH}/{manifest}", "--wait=true"]) def run_scaler(): @@ -103,38 +92,39 @@ def set_foo_metric_value(value: int): run(command=["kubectl", "rollout", "status", "deployment", "metrics-generator"]) -def wait_deployment_scale(*, name: str, replicas: int): +def wait_scale(*, kind: str, name: str, replicas: int): run( command=[ "kubectl", "wait", f"--for=jsonpath={{.spec.replicas}}={replicas}", - "deployment", + kind, name, f"--timeout={TIMEOUT}s", ] ) -def test_target_1(setup): - target_name = "target-1" +@pytest.mark.parametrize("target_name, kind", [("target-1", "deployment"), ("target-2", "statefulset")]) +def test_target(setup, target_name: str, kind: str): + set_foo_metric_value(0) deploy_target(f"{target_name}.yaml") # The intial replicas count is 1 - wait_deployment_scale(name=target_name, replicas=1) + wait_scale(kind=kind, name=target_name, replicas=1) khstz = run_scaler() try: # The initial metric value is 0, it should scale the target to 0 - wait_deployment_scale(name=target_name, replicas=0) + wait_scale(kind=kind, name=target_name, replicas=0) # Increase the metric value set_foo_metric_value(10) # The deloyment was revived anf the HPA was able to scale it up - wait_deployment_scale(name=target_name, replicas=3) + wait_scale(kind=kind, name=target_name, replicas=3) finally: khstz.kill() delete_target(f"{target_name}.yaml") diff --git a/tests/manifests/target-1.yaml b/tests/manifests/target-1.yaml index ec1a909..89f395a 100644 --- a/tests/manifests/target-1.yaml +++ b/tests/manifests/target-1.yaml @@ -41,5 +41,5 @@ spec: spec: terminationGracePeriodSeconds: 1 containers: - - name: nginx - image: nginx:latest + - name: nginx + image: nginx:stable-alpine-slim diff --git a/tests/manifests/target-2.yaml b/tests/manifests/target-2.yaml new file mode 100644 index 0000000..53f6007 --- /dev/null +++ b/tests/manifests/target-2.yaml @@ -0,0 +1,64 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: target-2 +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: target-2 + minReplicas: 1 + maxReplicas: 3 + metrics: + - type: Object + object: + metric: + name: foo_metric + describedObject: + apiVersion: "/v1" + kind: Service + name: metrics-generator + target: + type: Value + value: 1 + +--- + +apiVersion: v1 +kind: Service +metadata: + name: target-2 + labels: + app: target-2 +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: target-2 + +--- + +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: target-2 +spec: + replicas: 1 + selector: + matchLabels: + app: target-2 + serviceName: target-2 + template: + metadata: + labels: + app: target-2 + spec: + terminationGracePeriodSeconds: 1 + containers: + - name: nginx + image: nginx:stable-alpine-slim + ports: + - containerPort: 80 + name: web