diff --git a/ci/limits.json b/ci/limits.json index bff56ae0..c716de4e 100644 --- a/ci/limits.json +++ b/ci/limits.json @@ -4,7 +4,7 @@ "github.com/theketchio/ketch/cmd/ketch/output": 68.3, "github.com/theketchio/ketch/cmd/manager": 0, "github.com/theketchio/ketch/codeprofile/unit_test_coverage": 0, - "github.com/theketchio/ketch/internal/api/v1beta1": 33.0, + "github.com/theketchio/ketch/internal/api/v1beta1": 32.8, "github.com/theketchio/ketch/internal/api/v1beta1/mocks": 0, "github.com/theketchio/ketch/internal/build": 92.3, "github.com/theketchio/ketch/internal/chart": 67.0, diff --git a/config/crd/bases/theketch.io_apps.yaml b/config/crd/bases/theketch.io_apps.yaml index a69e81a9..b0fe335f 100644 --- a/config/crd/bases/theketch.io_apps.yaml +++ b/config/crd/bases/theketch.io_apps.yaml @@ -2544,8 +2544,35 @@ spec: description: ServiceAccountName specifies a service account name to be used for this application. type: string + type: + description: Type specifies whether an app should be a deployment + or a statefulset + enum: + - Deployment + - StatefulSet + type: string version: type: string + volumeClaimTemplates: + description: VolumeClaimTemplates is a list of an app's volumeClaimTemplates + items: + properties: + accessModes: + items: + type: string + type: array + name: + type: string + storage: + type: string + storageClassName: + type: string + required: + - accessModes + - name + - storage + type: object + type: array required: - deployments - framework diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 06f1d58b..82aec07b 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -89,6 +89,18 @@ rules: - patch - update - watch +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - autoscaling resources: diff --git a/internal/api/v1beta1/app_types.go b/internal/api/v1beta1/app_types.go index 186861d6..37ccaf8c 100644 --- a/internal/api/v1beta1/app_types.go +++ b/internal/api/v1beta1/app_types.go @@ -244,6 +244,34 @@ type AppSpec struct { // +optional // +kubebuilder:pruning:PreserveUnknownFields Extensions []runtime.RawExtension `json:"extensions,omitempty"` + // VolumeClaimTemplates is a list of an app's volumeClaimTemplates + VolumeClaimTemplates []PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` + + // Type specifies whether an app should be a deployment or a statefulset + // +kubebuilder:validation:default:=Deployment + Type *AppType `json:"type,omitempty"` +} + +// +kubebuilder:validation:Enum=Deployment;StatefulSet +type AppType string + +const ( + DeploymentAppType AppType = "Deployment" + StatefulSetAppType AppType = "StatefulSet" +) + +func (spec AppSpec) GetType() AppType { + if spec.Type == nil { + return DeploymentAppType + } + return *spec.Type +} + +type PersistentVolumeClaim struct { + Name string `json:"name"` + AccessModes []v1.PersistentVolumeAccessMode `json:"accessModes"` + StorageClassName *string `json:"storageClassName,omitempty"` + Storage string `json:"storage"` } // MetadataItem represent a request to add label/annotations to processes diff --git a/internal/api/v1beta1/app_types_test.go b/internal/api/v1beta1/app_types_test.go index 279e07dc..eaf34861 100644 --- a/internal/api/v1beta1/app_types_test.go +++ b/internal/api/v1beta1/app_types_test.go @@ -1518,3 +1518,32 @@ func TestParseAppReconcileOutcome_Multiple(t *testing.T) { } } } + +func TestAppType(t *testing.T) { + at := AppType("NonExistantType") + tt := []struct { + name string + appType *AppType + expected AppType + }{ + { + name: "return specified type", + appType: &at, + expected: at, + }, + { + name: "type is nil", + appType: nil, + expected: DeploymentAppType, + }, + } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + as := AppSpec{ + Type: tc.appType, + } + appType := as.GetType() + require.Equal(t, tc.expected, appType) + }) + } +} diff --git a/internal/chart/application_chart.go b/internal/chart/application_chart.go index 1463e119..c0aceff1 100644 --- a/internal/chart/application_chart.go +++ b/internal/chart/application_chart.go @@ -59,6 +59,10 @@ type app struct { ServiceAccountName string `json:"serviceAccountName"` // SecurityContext specifies security settings for a pod/app, which get applied to all containers. SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` + // VolumeClaimTemplates is a list of an app's volumeClaimTemplates + VolumeClaimTemplates []ketchv1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` + // Type specifies whether the app should be a deployment or a statefulset + Type ketchv1.AppType `json:"type"` } type deployment struct { @@ -133,6 +137,7 @@ func New(application *ketchv1.App, framework *ketchv1.Framework, opts ...Option) MetadataLabels: application.Spec.Labels, MetadataAnnotations: application.Spec.Annotations, ServiceAccountName: application.Spec.ServiceAccountName, + Type: application.Spec.GetType(), }, IngressController: &framework.Spec.IngressController, } @@ -141,6 +146,10 @@ func New(application *ketchv1.App, framework *ketchv1.Framework, opts ...Option) values.App.SecurityContext = application.Spec.SecurityContext } + if application.Spec.VolumeClaimTemplates != nil { + values.App.VolumeClaimTemplates = application.Spec.VolumeClaimTemplates + } + for _, deploymentSpec := range application.Spec.Deployments { deployment := deployment{ Image: deploymentSpec.Image, diff --git a/internal/chart/application_chart_test.go b/internal/chart/application_chart_test.go index c0faa54b..5e5ee96e 100644 --- a/internal/chart/application_chart_test.go +++ b/internal/chart/application_chart_test.go @@ -236,6 +236,31 @@ func TestNewApplicationChart(t *testing.T) { } return &out } + setVolumeClaimTemplates := func(app *ketchv1.App) *ketchv1.App { + out := *app + storageClass := "standard" + out.Spec.VolumeClaimTemplates = []ketchv1.PersistentVolumeClaim{ + { + Name: "v1-shipa", + AccessModes: []v1.PersistentVolumeAccessMode{"ReadWriteMany"}, + StorageClassName: &storageClass, + Storage: "1Gi", + }, + { + Name: "v2-shipa", + AccessModes: []v1.PersistentVolumeAccessMode{"ReadWriteOnce"}, + StorageClassName: &storageClass, + Storage: "1Gi", + }, + } + return &out + } + setStatefulSet := func(app *ketchv1.App) *ketchv1.App { + out := *app + appType := ketchv1.StatefulSetAppType + out.Spec.Type = &appType + return &out + } tests := []struct { name string @@ -287,6 +312,16 @@ func TestNewApplicationChart(t *testing.T) { framework: frameworkWithClusterIssuer, wantYamlsFilename: "dashboard-istio-cluster-issuer-pod-security-context", }, + { + name: "istio templates with cluster issuer and volume claim templates", + opts: []Option{ + WithTemplates(templates.IstioDefaultTemplates), + WithExposedPorts(exportedPorts), + }, + application: setStatefulSet(setVolumeClaimTemplates(dashboard)), + framework: frameworkWithClusterIssuer, + wantYamlsFilename: "dashboard-istio-cluster-issuer-volume-claim-templates", + }, { name: "istio templates without cluster issuer", opts: []Option{ diff --git a/internal/chart/post_render.go b/internal/chart/post_render.go index 88f87766..e29bb911 100644 --- a/internal/chart/post_render.go +++ b/internal/chart/post_render.go @@ -21,7 +21,6 @@ type postRender struct { } func (p *postRender) Run(renderedManifests *bytes.Buffer) (modifiedManifests *bytes.Buffer, err error) { - var configMapList v1.ConfigMapList opts := &client.ListOptions{Namespace: p.namespace} if err := p.cli.List(context.Background(), &configMapList, opts); err != nil { diff --git a/internal/chart/testdata/charts/dashboard-istio-cluster-issuer-volume-claim-templates.yaml b/internal/chart/testdata/charts/dashboard-istio-cluster-issuer-volume-claim-templates.yaml new file mode 100755 index 00000000..e3c991e1 --- /dev/null +++ b/internal/chart/testdata/charts/dashboard-istio-cluster-issuer-volume-claim-templates.yaml @@ -0,0 +1,654 @@ +--- +# Source: dashboard/templates/gateway_service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + theketch.io/app-name: "dashboard" + theketch.io/is-isolated-run: "false" + name: app-dashboard +spec: + type: ClusterIP + ports: + - name: http-default-1 + port: 9091 + protocol: TCP + targetPort: 9091 + selector: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" +--- +# Source: dashboard/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" + name: dashboard-web-3 +spec: + type: ClusterIP + ports: + - name: http-default-1 + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" +--- +# Source: dashboard/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" + name: dashboard-worker-3 +spec: + type: ClusterIP + ports: + - name: http-default-1 + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" +--- +# Source: dashboard/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" + annotations: + theketch.io/test-annotation: "test-annotation-value" + name: dashboard-web-4 +spec: + type: ClusterIP + ports: + - name: http-default-1 + port: 9091 + protocol: TCP + targetPort: 9091 + selector: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" +--- +# Source: dashboard/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" + name: dashboard-worker-4 +spec: + type: ClusterIP + ports: + - name: http-default-1 + port: 9091 + protocol: TCP + targetPort: 9091 + selector: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" +--- +# Source: dashboard/templates/stateful_set.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-process-replicas: "3" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" + theketch.io/test-label: "test-label-value" + theketch.io/test-label-all: "test-label-value-all" + name: dashboard-web-3 +spec: + selector: + matchLabels: + app: "dashboard" + version: "3" + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" + serviceName: "dashboard" + template: + metadata: + labels: + app: "dashboard" + version: "3" + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" + pod.io/label: "pod-label" + annotations: + pod.io/annotation: "pod-annotation" + spec: + containers: + - name: dashboard-web-3 + command: ["python"] + env: + - name: TEST_API_KEY + value: SECRET + - name: TEST_API_URL + value: example.com + - name: port + value: "9090" + - name: PORT + value: "9090" + - name: PORT_web + value: "9090" + - name: VAR + value: VALUE + image: shipasoftware/go-app:v1 + ports: + - containerPort: 9090 + volumeMounts: + - mountPath: /test-ebs + name: test-volume + resources: + limits: + cpu: 5Gi + memory: 5300m + requests: + cpu: 5Gi + memory: 5300m + readinessProbe: + failureThreshold: 3 + httpGet: + path: /actuator/health/liveness + port: 9090 + scheme: HTTP + periodSeconds: 10 + timeoutSeconds: 60 + imagePullSecrets: + - name: registry-secret + - name: private-registry-secret + volumes: + - awsElasticBlockStore: + fsType: ext4 + volumeID: volume-id + name: test-volume + volumeClaimTemplates: + - metadata: + name: v1-shipa + spec: + accessModes: [ReadWriteMany] + storageClassName: "standard" + resources: + requests: + storage: 1Gi + - metadata: + name: v2-shipa + spec: + accessModes: [ReadWriteOnce] + storageClassName: "standard" + resources: + requests: + storage: 1Gi +--- +# Source: dashboard/templates/stateful_set.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-process-replicas: "1" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" + theketch.io/test-label-all: "test-label-value-all" + name: dashboard-worker-3 +spec: + selector: + matchLabels: + app: "dashboard" + version: "3" + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" + serviceName: "dashboard" + template: + metadata: + labels: + app: "dashboard" + version: "3" + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-deployment-version: "3" + theketch.io/is-isolated-run: "false" + spec: + containers: + - name: dashboard-worker-3 + command: ["celery"] + env: + - name: port + value: "9090" + - name: PORT + value: "9090" + - name: PORT_worker + value: "9090" + - name: VAR + value: VALUE + image: shipasoftware/go-app:v1 + ports: + - containerPort: 9090 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /actuator/health/liveness + port: 9090 + scheme: HTTP + periodSeconds: 10 + timeoutSeconds: 60 + imagePullSecrets: + - name: registry-secret + - name: private-registry-secret + volumeClaimTemplates: + - metadata: + name: v1-shipa + spec: + accessModes: [ReadWriteMany] + storageClassName: "standard" + resources: + requests: + storage: 1Gi + - metadata: + name: v2-shipa + spec: + accessModes: [ReadWriteOnce] + storageClassName: "standard" + resources: + requests: + storage: 1Gi +--- +# Source: dashboard/templates/stateful_set.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-process-replicas: "3" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" + theketch.io/test-label-all: "test-label-value-all" + name: dashboard-web-4 +spec: + selector: + matchLabels: + app: "dashboard" + version: "4" + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" + serviceName: "dashboard" + template: + metadata: + labels: + app: "dashboard" + version: "4" + theketch.io/app-name: "dashboard" + theketch.io/app-process: "web" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" + spec: + containers: + - name: dashboard-web-4 + command: ["python"] + env: + - name: port + value: "9091" + - name: PORT + value: "9091" + - name: PORT_web + value: "9091" + - name: VAR + value: VALUE + image: shipasoftware/go-app:v2 + ports: + - containerPort: 9091 + imagePullSecrets: + - name: default-image-pull-secret + volumeClaimTemplates: + - metadata: + name: v1-shipa + spec: + accessModes: [ReadWriteMany] + storageClassName: "standard" + resources: + requests: + storage: 1Gi + - metadata: + name: v2-shipa + spec: + accessModes: [ReadWriteOnce] + storageClassName: "standard" + resources: + requests: + storage: 1Gi +--- +# Source: dashboard/templates/stateful_set.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-process-replicas: "1" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" + theketch.io/test-label-all: "test-label-value-all" + name: dashboard-worker-4 +spec: + selector: + matchLabels: + app: "dashboard" + version: "4" + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" + serviceName: "dashboard" + template: + metadata: + labels: + app: "dashboard" + version: "4" + theketch.io/app-name: "dashboard" + theketch.io/app-process: "worker" + theketch.io/app-deployment-version: "4" + theketch.io/is-isolated-run: "false" + spec: + containers: + - name: dashboard-worker-4 + command: ["celery"] + env: + - name: port + value: "9091" + - name: PORT + value: "9091" + - name: PORT_worker + value: "9091" + - name: VAR + value: VALUE + image: shipasoftware/go-app:v2 + ports: + - containerPort: 9091 + imagePullSecrets: + - name: default-image-pull-secret + volumeClaimTemplates: + - metadata: + name: v1-shipa + spec: + accessModes: [ReadWriteMany] + storageClassName: "standard" + resources: + requests: + storage: 1Gi + - metadata: + name: v2-shipa + spec: + accessModes: [ReadWriteOnce] + storageClassName: "standard" + resources: + requests: + storage: 1Gi +--- +# Source: dashboard/templates/certificate.yaml +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "dashboard-cname-theketch-io" + namespace: istio-system + labels: + theketch.io/app-name: "dashboard" +spec: + secretName: dashboard-cname-theketch-io + secretTemplate: + labels: + theketch.io/app-name: "dashboard" + dnsNames: + - theketch.io + issuerRef: + name: letsencrypt-production + kind: ClusterIssuer +--- +# Source: dashboard/templates/certificate.yaml +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: "dashboard-cname-app-theketch-io" + namespace: istio-system + labels: + theketch.io/app-name: "dashboard" +spec: + secretName: dashboard-cname-app-theketch-io + secretTemplate: + labels: + theketch.io/app-name: "dashboard" + dnsNames: + - app.theketch.io + issuerRef: + name: letsencrypt-production + kind: ClusterIssuer +--- +# Source: dashboard/templates/destinationRule.yaml +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: shipa-dashboard-rule-3 + labels: + theketch.io/app-name: "dashboard" +spec: + host: dashboard-web-3 + subsets: + - name: v3 + labels: + app: "dashboard" + version: "3" +--- +# Source: dashboard/templates/destinationRule.yaml +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: shipa-dashboard-rule-4 + labels: + theketch.io/app-name: "dashboard" +spec: + host: dashboard-web-4 + subsets: + - name: v4 + labels: + app: "dashboard" + version: "4" +--- +# Source: dashboard/templates/gateway.yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + labels: + theketch.io/app-name: "dashboard" + name: dashboard-http-gateway + annotations: + theketch.io/metadata-item-kind: Gateway + theketch.io/metadata-item-apiVersion: networking.istio.io/v1alpha3 + theketch.io/gateway-annotation: "test-gateway" +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 80 + name: http-3 + protocol: HTTP + hosts: + - dashboard.10.10.10.10.shipa.cloud + - port: + number: 443 + name: https-3-theketch.io + protocol: HTTPS + tls: + mode: SIMPLE + credentialName: dashboard-cname-theketch-io + hosts: + - theketch.io + - port: + name: http-to-https-3-theketch.io + number: 80 + protocol: HTTP + hosts: + - theketch.io + tls: + httpsRedirect: true + - port: + number: 443 + name: https-3-app.theketch.io + protocol: HTTPS + tls: + mode: SIMPLE + credentialName: dashboard-cname-app-theketch-io + hosts: + - app.theketch.io + - port: + name: http-to-https-3-app.theketch.io + number: 80 + protocol: HTTP + hosts: + - app.theketch.io + tls: + httpsRedirect: true + - port: + number: 443 + name: https-3-darkweb.theketch.io + protocol: HTTPS + tls: + mode: SIMPLE + credentialName: darkweb-ssl + hosts: + - darkweb.theketch.io + - port: + name: http-to-https-3-darkweb.theketch.io + number: 80 + protocol: HTTP + hosts: + - darkweb.theketch.io + tls: + httpsRedirect: true + - port: + number: 80 + name: http-4 + protocol: HTTP + hosts: + - dashboard.10.10.10.10.shipa.cloud + - port: + number: 443 + name: https-4-theketch.io + protocol: HTTPS + tls: + mode: SIMPLE + credentialName: dashboard-cname-theketch-io + hosts: + - theketch.io + - port: + name: http-to-https-4-theketch.io + number: 80 + protocol: HTTP + hosts: + - theketch.io + tls: + httpsRedirect: true + - port: + number: 443 + name: https-4-app.theketch.io + protocol: HTTPS + tls: + mode: SIMPLE + credentialName: dashboard-cname-app-theketch-io + hosts: + - app.theketch.io + - port: + name: http-to-https-4-app.theketch.io + number: 80 + protocol: HTTP + hosts: + - app.theketch.io + tls: + httpsRedirect: true + - port: + number: 443 + name: https-4-darkweb.theketch.io + protocol: HTTPS + tls: + mode: SIMPLE + credentialName: darkweb-ssl + hosts: + - darkweb.theketch.io + - port: + name: http-to-https-4-darkweb.theketch.io + number: 80 + protocol: HTTP + hosts: + - darkweb.theketch.io + tls: + httpsRedirect: true +--- +# Source: dashboard/templates/virtualService.yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + annotations: + kubernetes.io/ingress.class: "ingress-class" + labels: + theketch.io/app-name: "dashboard" + name: dashboard-http +spec: + hosts: + - dashboard.10.10.10.10.shipa.cloud + - theketch.io + - app.theketch.io + - darkweb.theketch.io + gateways: + - dashboard-http-gateway + http: + - route: + - destination: + host: dashboard-web-3 + port: + number: 9090 + subset: "v3" + weight: 30 + - destination: + host: dashboard-web-4 + port: + number: 9091 + subset: "v4" + weight: 70 \ No newline at end of file diff --git a/internal/controllers/app_controller.go b/internal/controllers/app_controller.go index a57eba34..936c74fc 100644 --- a/internal/controllers/app_controller.go +++ b/internal/controllers/app_controller.go @@ -27,7 +27,6 @@ import ( "github.com/go-logr/logr" "github.com/pkg/errors" "helm.sh/helm/v3/pkg/release" - appsv1 "k8s.io/api/apps/v1" "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" @@ -90,6 +89,7 @@ const ( // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="apps",resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="apps",resources=statefulsets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="networking.k8s.io",resources=ingresses,verbs=get;list;watch;create;update;patch;delete @@ -224,6 +224,76 @@ func (r appReconcileResult) isConflictError() bool { } } +type condition struct { + Type string + Reason string +} + +// workload contains the needed information for watchDeployEvents logic +// deployments and statefulsets are both supported so it became necessary +// to abstract their common properties into a separate type +type workload struct { + Name string + Replicas int + UpdatedReplicas int + ReadyReplicas int + Generation int + ObservedGeneration int + Conditions []condition +} + +type workloadClient struct { + workloadType ketchv1.AppType + workloadName string + workloadNamespace string + k8sClient kubernetes.Interface +} + +// Get populates workload values based on the workloadType and returns the populated struct +func (cli workloadClient) Get(ctx context.Context) (*workload, error) { + switch cli.workloadType { + case ketchv1.DeploymentAppType: + o, err := cli.k8sClient.AppsV1().Deployments(cli.workloadNamespace).Get(ctx, cli.workloadName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + w := workload{ + Name: o.Name, + UpdatedReplicas: int(o.Status.UpdatedReplicas), + ReadyReplicas: int(o.Status.ReadyReplicas), + Generation: int(o.Generation), + ObservedGeneration: int(o.Status.ObservedGeneration), + } + if o.Spec.Replicas != nil { + w.Replicas = int(*o.Spec.Replicas) + } + for _, c := range o.Status.Conditions { + w.Conditions = append(w.Conditions, condition{Type: string(c.Type), Reason: c.Reason}) + } + return &w, nil + case ketchv1.StatefulSetAppType: + o, err := cli.k8sClient.AppsV1().StatefulSets(cli.workloadNamespace).Get(ctx, cli.workloadName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + w := workload{ + Name: o.Name, + UpdatedReplicas: int(o.Status.UpdatedReplicas), + ReadyReplicas: int(o.Status.ReadyReplicas), + Generation: int(o.Generation), + ObservedGeneration: int(o.Status.ObservedGeneration), + } + if o.Spec.Replicas != nil { + w.Replicas = int(*o.Spec.Replicas) + } + for _, c := range o.Status.Conditions { + w.Conditions = append(w.Conditions, condition{Type: string(c.Type), Reason: c.Reason}) + } + return &w, nil + } + return nil, fmt.Errorf("unknown workload type") +} + func (r *AppReconciler) reconcile(ctx context.Context, app *ketchv1.App, logger logr.Logger) appReconcileResult { framework := ketchv1.Framework{} @@ -337,16 +407,29 @@ func (r *AppReconciler) reconcile(ctx context.Context, app *ketchv1.App, logger // use latest deployment and watch events for each process latestDeployment := app.Spec.Deployments[len(app.Spec.Deployments)-1] for _, process := range latestDeployment.Processes { - var dep appsv1.Deployment - if err := r.Get(ctx, client.ObjectKey{ - Namespace: framework.Spec.NamespaceName, - Name: fmt.Sprintf("%s-%s-%d", app.GetName(), process.Name, latestDeployment.Version), - }, &dep); err != nil { + + cli, err := kubernetes.NewForConfig(r.Config) + if err != nil { return appReconcileResult{ - err: fmt.Errorf("failed to get deployment: %w", err), + err: err, } } - err = r.watchDeployEvents(ctx, app, framework.Spec.NamespaceName, &dep, &process, r.Recorder) + + wc := workloadClient{ + k8sClient: cli, + workloadName: fmt.Sprintf("%s-%s-%d", app.GetName(), process.Name, latestDeployment.Version), + workloadNamespace: framework.Spec.NamespaceName, + workloadType: app.Spec.GetType(), + } + + wl, err := wc.Get(ctx) + if err != nil { + return appReconcileResult{ + err: err, + } + } + + err = r.watchDeployEvents(ctx, app, &wc, wl, &process, r.Recorder) if err != nil { return appReconcileResult{ err: fmt.Errorf("failed to get deploy events: %w", err), @@ -369,25 +452,20 @@ func (r *AppReconciler) reconcile(ctx context.Context, app *ketchv1.App, logger // watchDeployEvents watches a namespace for events and, after a deployment has started updating, records events // with updated deployment status and/or healthcheck and timeout failures -func (r *AppReconciler) watchDeployEvents(ctx context.Context, app *ketchv1.App, namespace string, dep *appsv1.Deployment, process *ketchv1.ProcessSpec, recorder record.EventRecorder) error { - cli, err := kubernetes.NewForConfig(r.Config) - if err != nil { - return err - } - +func (r *AppReconciler) watchDeployEvents(ctx context.Context, app *ketchv1.App, cli *workloadClient, wl *workload, process *ketchv1.ProcessSpec, recorder record.EventRecorder) error { opts := metav1.ListOptions{ FieldSelector: "involvedObject.kind=Pod", Watch: true, } - watcher, err := cli.CoreV1().Events(namespace).Watch(ctx, opts) // requires "watch" permission on events in clusterrole + watcher, err := cli.k8sClient.CoreV1().Events(cli.workloadNamespace).Watch(ctx, opts) // requires "watch" permission on events in clusterrole if err != nil { return err } // wait for Deployment Generation timeout := time.After(DefaultPodRunningTimeout) - for dep.Status.ObservedGeneration < dep.Generation { - dep, err = cli.AppsV1().Deployments(namespace).Get(ctx, dep.Name, metav1.GetOptions{}) + for wl.ObservedGeneration < wl.Generation { + wl, err = cli.Get(ctx) if err != nil { recorder.Eventf(app, v1.EventTypeWarning, ketchv1.AppReconcileError, "error getting deployments: %s", err.Error()) return err @@ -404,45 +482,43 @@ func (r *AppReconciler) watchDeployEvents(ctx context.Context, app *ketchv1.App, ctx, cancel := context.WithCancel(ctx) // assign current cancelFunc and cancel the previous one - cleanup := r.CancelMap.replaceAndCancelPrevious(dep.Name, cancel) + cleanup := r.CancelMap.replaceAndCancelPrevious(wl.Name, cancel) reconcileStartedEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileStarted, fmt.Sprintf("Updating units [%s]", process.Name), process.Name) recorder.AnnotatedEventf(app, reconcileStartedEvent.Annotations, v1.EventTypeNormal, reconcileStartedEvent.Reason, reconcileStartedEvent.Description) - go r.watchFunc(ctx, cleanup, app, namespace, dep, process.Name, recorder, watcher, cli, timeout, watcher.Stop) + go r.watchFunc(ctx, cleanup, app, process.Name, recorder, watcher, cli, wl, timeout) return nil } -func (r *AppReconciler) watchFunc(ctx context.Context, cleanup cleanupFunc, app *ketchv1.App, namespace string, dep *appsv1.Deployment, processName string, recorder record.EventRecorder, watcher watch.Interface, cli kubernetes.Interface, timeout <-chan time.Time, stopFunc func()) error { +func (r *AppReconciler) watchFunc(ctx context.Context, cleanup cleanupFunc, app *ketchv1.App, processName string, recorder record.EventRecorder, watcher watch.Interface, cli *workloadClient, wl *workload, timeout <-chan time.Time) error { defer cleanup() var err error watchCh := watcher.ResultChan() + defer watcher.Stop() - var specReplicas int32 - if dep.Spec.Replicas != nil { - specReplicas = *dep.Spec.Replicas - } - oldUpdatedReplicas := int32(-1) - oldReadyUnits := int32(-1) - oldPendingTermination := int32(-1) + specReplicas := wl.Replicas + oldUpdatedReplicas := -1 + oldReadyUnits := -1 + oldPendingTermination := -1 now := time.Now() var healthcheckTimeout <-chan time.Time for { - for i := range dep.Status.Conditions { - c := dep.Status.Conditions[i] + for i := range wl.Conditions { + c := wl.Conditions[i] if c.Type == DeploymentProgressing && c.Reason == deadlineExeceededProgressCond { - deadlineExceededEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileError, fmt.Sprintf("deployment %q exceeded its progress deadline", dep.Name), processName) + deadlineExceededEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileError, fmt.Sprintf("deployment %q exceeded its progress deadline", wl.Name), processName) recorder.AnnotatedEventf(app, deadlineExceededEvent.Annotations, v1.EventTypeWarning, deadlineExceededEvent.Reason, deadlineExceededEvent.Description) - return errors.Errorf("deployment %q exceeded its progress deadline", dep.Name) + return errors.Errorf("deployment %q exceeded its progress deadline", wl.Name) } } - if oldUpdatedReplicas != dep.Status.UpdatedReplicas { - unitsCreatedEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileUpdate, fmt.Sprintf("%d of %d new units created", dep.Status.UpdatedReplicas, specReplicas), processName) + if oldUpdatedReplicas != wl.UpdatedReplicas { + unitsCreatedEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileUpdate, fmt.Sprintf("%d of %d new units created", wl.UpdatedReplicas, specReplicas), processName) recorder.AnnotatedEventf(app, unitsCreatedEvent.Annotations, v1.EventTypeNormal, unitsCreatedEvent.Reason, unitsCreatedEvent.Description) } - if healthcheckTimeout == nil && dep.Status.UpdatedReplicas == specReplicas { + if healthcheckTimeout == nil && wl.UpdatedReplicas == specReplicas { err := checkPodStatus(r.Group, r.Client, app.Name, app.Spec.Deployments[len(app.Spec.Deployments)-1].Version) if err == nil { healthcheckTimeout = time.After(maxWaitTimeDuration) @@ -451,23 +527,23 @@ func (r *AppReconciler) watchFunc(ctx context.Context, cleanup cleanupFunc, app } } - readyUnits := dep.Status.UpdatedReplicas - dep.Status.UnavailableReplicas + readyUnits := wl.ReadyReplicas if oldReadyUnits != readyUnits && readyUnits >= 0 { unitsReadyEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileUpdate, fmt.Sprintf("%d of %d new units ready", readyUnits, specReplicas), processName) recorder.AnnotatedEventf(app, unitsReadyEvent.Annotations, v1.EventTypeNormal, unitsReadyEvent.Reason, unitsReadyEvent.Description) } - pendingTermination := dep.Status.Replicas - dep.Status.UpdatedReplicas + pendingTermination := wl.Replicas - wl.UpdatedReplicas if oldPendingTermination != pendingTermination && pendingTermination > 0 { pendingTerminationEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileUpdate, fmt.Sprintf("%d old units pending termination", pendingTermination), processName) recorder.AnnotatedEventf(app, pendingTerminationEvent.Annotations, v1.EventTypeNormal, pendingTerminationEvent.Reason, pendingTerminationEvent.Description) } - oldUpdatedReplicas = dep.Status.UpdatedReplicas + oldUpdatedReplicas = wl.UpdatedReplicas oldReadyUnits = readyUnits oldPendingTermination = pendingTermination if readyUnits == specReplicas && - dep.Status.Replicas == specReplicas { + wl.Replicas == specReplicas { break } @@ -477,17 +553,17 @@ func (r *AppReconciler) watchFunc(ctx context.Context, cleanup cleanupFunc, app if !isOpen { break } - if isDeploymentEvent(msg, dep) { + if isDeploymentEvent(msg, wl.Name) { appDeploymentEvent := appDeploymentEventFromWatchEvent(msg, app, processName) recorder.AnnotatedEventf(app, appDeploymentEvent.Annotations, v1.EventTypeNormal, ketchv1.AppReconcileUpdate, appDeploymentEvent.Description) } case <-healthcheckTimeout: - err = createDeployTimeoutError(ctx, cli, app, time.Since(now), namespace, string(app.GroupVersionKind().Group), "healthcheck") + err = createDeployTimeoutError(ctx, cli.k8sClient, app, time.Since(now), cli.workloadNamespace, app.GroupVersionKind().Group, "healthcheck") healthcheckTimeoutEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileError, fmt.Sprintf("error waiting for healthcheck: %s", err.Error()), processName) recorder.AnnotatedEventf(app, healthcheckTimeoutEvent.Annotations, v1.EventTypeWarning, healthcheckTimeoutEvent.Reason, healthcheckTimeoutEvent.Description) return err case <-timeout: - err = createDeployTimeoutError(ctx, cli, app, time.Since(now), namespace, string(app.GroupVersionKind().Group), "full rollout") + err = createDeployTimeoutError(ctx, cli.k8sClient, app, time.Since(now), cli.workloadNamespace, app.GroupVersionKind().Group, "full rollout") timeoutEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileError, fmt.Sprintf("deployment timeout: %s", err.Error()), processName) recorder.AnnotatedEventf(app, timeoutEvent.Annotations, v1.EventTypeWarning, timeoutEvent.Reason, timeoutEvent.Description) return err @@ -495,7 +571,7 @@ func (r *AppReconciler) watchFunc(ctx context.Context, cleanup cleanupFunc, app return ctx.Err() } - dep, err = cli.AppsV1().Deployments(namespace).Get(context.TODO(), dep.Name, metav1.GetOptions{}) + wl, err = cli.Get(ctx) if err != nil { deploymentErrorEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileError, fmt.Sprintf("error getting deployments: %s", err.Error()), processName) recorder.AnnotatedEventf(app, deploymentErrorEvent.Annotations, v1.EventTypeWarning, deploymentErrorEvent.Reason, deploymentErrorEvent.Description) @@ -503,10 +579,9 @@ func (r *AppReconciler) watchFunc(ctx context.Context, cleanup cleanupFunc, app } } - outcome := ketchv1.AppReconcileOutcome{AppName: app.Name, DeploymentCount: int(dep.Status.ReadyReplicas)} + outcome := ketchv1.AppReconcileOutcome{AppName: app.Name, DeploymentCount: wl.ReadyReplicas} outcomeEvent := newAppDeploymentEvent(app, ketchv1.AppReconcileComplete, outcome.String(), processName) recorder.AnnotatedEventf(app, outcomeEvent.Annotations, v1.EventTypeNormal, outcomeEvent.Reason, outcomeEvent.Description) - stopFunc() return nil } @@ -562,10 +637,10 @@ func newAppDeploymentEvent(app *ketchv1.App, reason, desc, processName string) * } } -// isDeploymentEvent returns true if the watchEvnet is an Event type and matches the deployment.Name -func isDeploymentEvent(msg watch.Event, dep *appsv1.Deployment) bool { +// isDeploymentEvent returns true if the watchEvent is an Event type and matches the deployment.Name +func isDeploymentEvent(msg watch.Event, name string) bool { evt, ok := msg.Object.(*v1.Event) - return ok && strings.HasPrefix(evt.Name, dep.Name) + return ok && strings.HasPrefix(evt.Name, name) } // createDeployTimeoutError gets pods that are not status == ready aggregates and returns the pod phase errors diff --git a/internal/controllers/app_controller_test.go b/internal/controllers/app_controller_test.go index 1a98570e..846b708b 100644 --- a/internal/controllers/app_controller_test.go +++ b/internal/controllers/app_controller_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "k8s.io/client-go/tools/record" "testing" "time" @@ -20,7 +21,6 @@ import ( "k8s.io/apimachinery/pkg/watch" clientFake "k8s.io/client-go/kubernetes/fake" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlFake "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -220,188 +220,264 @@ func TestAppReconciler_Reconcile(t *testing.T) { } func TestWatchDeployEvents(t *testing.T) { - process := &ketchv1.ProcessSpec{ - Name: "test", - } - - app := &ketchv1.App{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: ketchv1.AppSpec{ - Deployments: []ketchv1.AppDeploymentSpec{ - { - Image: "gcr.io/test", - Version: 1, - Processes: []ketchv1.ProcessSpec{*process}, - }, - }, - Framework: "test", - }, - } - namespace := "ketch-test" - replicas := int32(1) - - // depStart is the Deployment in it's initial state - depStart := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "ketch-test", + tt := []struct { + name string + appType ketchv1.AppType + expectedError string + }{ + { + name: "watch deploy events - deployment", + appType: "Deployment", }, - Status: appsv1.DeploymentStatus{ - UpdatedReplicas: 1, - UnavailableReplicas: 1, - Replicas: 1, + { + name: "watch deploy events - statefulset", + appType: "StatefulSet", }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, + { + name: "unknown type", + appType: "TypeThatDoesNotExist", + expectedError: "unknown workload type", }, } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + process := &ketchv1.ProcessSpec{ + Name: "test", + } - // depFetch is the Deployment as returned via Get() in the function's loop - depFetch := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "ketch-test", - }, - Status: appsv1.DeploymentStatus{ - UpdatedReplicas: 1, - UnavailableReplicas: 0, - Replicas: 1, - ReadyReplicas: 1, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - }, - } + app := &ketchv1.App{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: ketchv1.AppSpec{ + Deployments: []ketchv1.AppDeploymentSpec{ + { + Image: "gcr.io/test", + Version: 1, + Processes: []ketchv1.ProcessSpec{*process}, + }, + }, + Framework: "test", + }, + } + namespace := "ketch-test" + replicas := int32(1) + + // wl initial state + wl := workload{ + Name: "test", + UpdatedReplicas: 1, + ReadyReplicas: 0, + Replicas: 1, + } - recorder := record.NewFakeRecorder(1024) - watcher := watch.NewFake() - cli := clientFake.NewSimpleClientset(depFetch) - timeout := time.After(DefaultPodRunningTimeout) - r := AppReconciler{} - ctx := context.Background() - - var events []string - go func() { - for ev := range recorder.Events { - events = append(events, ev) - } - }() + var cli *clientFake.Clientset + // details returned via Get() in the function's loop + if tc.appType == "Deployment" { + cli = clientFake.NewSimpleClientset(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "ketch-test", + }, + Status: appsv1.DeploymentStatus{ + UpdatedReplicas: 1, + UnavailableReplicas: 0, + Replicas: 1, + ReadyReplicas: 1, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + }, + }) + } else { + cli = clientFake.NewSimpleClientset(&appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "ketch-test", + }, + Status: appsv1.StatefulSetStatus{ + UpdatedReplicas: 1, + Replicas: 1, + ReadyReplicas: 1, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + }) + } - cleanupIsCalled := false - cleanupFn := func() { - cleanupIsCalled = true - } + recorder := record.NewFakeRecorder(1024) + watcher := watch.NewFake() + timeout := time.After(DefaultPodRunningTimeout) + r := AppReconciler{} + ctx := context.Background() - err := r.watchFunc(ctx, cleanupFn, app, namespace, depStart, process.Name, recorder, watcher, cli, timeout, func() {}) - require.Nil(t, err) + var events []string + go func() { + for ev := range recorder.Events { + events = append(events, ev) + } + }() - time.Sleep(time.Millisecond * 100) + cleanupIsCalled := false + cleanupFn := func() { + cleanupIsCalled = true + } - expectedEvents := []string{ - "Normal AppReconcileUpdate 1 of 1 new units created", - "Normal AppReconcileUpdate 0 of 1 new units ready", - "Normal AppReconcileUpdate 1 of 1 new units ready", - "Normal AppReconcileComplete app test 1 reconcile success", - } + wc := workloadClient{ + k8sClient: cli, + workloadNamespace: namespace, + workloadType: tc.appType, + workloadName: "test", + } -EXPECTED: - for _, expected := range expectedEvents { - for _, ev := range events { - if ev == expected { - continue EXPECTED + err := r.watchFunc(ctx, cleanupFn, app, process.Name, recorder, watcher, &wc, &wl, timeout) + if tc.expectedError != "" { + require.Equal(t, tc.expectedError, err.Error()) + return } - } - t.Errorf("expected event %s, but it was not found", expected) - } + require.Nil(t, err) - require.True(t, cleanupIsCalled) -} + time.Sleep(time.Millisecond * 100) -func TestCancelWatchDeployEvents(t *testing.T) { - process := &ketchv1.ProcessSpec{ - Name: "test", - } + expectedEvents := []string{ + "Normal AppReconcileUpdate 1 of 1 new units created", + "Normal AppReconcileUpdate 0 of 1 new units ready", + "Normal AppReconcileUpdate 1 of 1 new units ready", + "Normal AppReconcileComplete app test 1 reconcile success", + } - app := &ketchv1.App{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - }, - Spec: ketchv1.AppSpec{ - Deployments: []ketchv1.AppDeploymentSpec{ - { - Image: "gcr.io/test", - Version: 1, - Processes: []ketchv1.ProcessSpec{*process}, - }, - }, - Framework: "test", - }, - } - namespace := "ketch-test" - replicas := int32(1) + EXPECTED: + for _, expected := range expectedEvents { + for _, ev := range events { + if ev == expected { + continue EXPECTED + } + } + t.Errorf("expected event %s, but it was not found", expected) + } - // depStart is the Deployment in it's initial state - depStart := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "ketch-test", - }, - Status: appsv1.DeploymentStatus{ - UpdatedReplicas: 1, - UnavailableReplicas: 1, - Replicas: 1, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - }, + require.True(t, cleanupIsCalled) + }) } +} - // depFetch is the Deployment as returned via Get() in the function's loop - depFetch := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - Namespace: "ketch-test", - }, - Status: appsv1.DeploymentStatus{ - UpdatedReplicas: 1, - UnavailableReplicas: 0, - Replicas: 1, - ReadyReplicas: 1, +func TestCancelWatchDeployEvents(t *testing.T) { + tt := []struct { + name string + appType ketchv1.AppType + }{ + { + name: "cancel watch deploy events - deployment", + appType: "Deployment", }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, + { + name: "cancel watch deploy events - statefulset", + appType: "StatefulSet", }, } + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + process := &ketchv1.ProcessSpec{ + Name: "test", + } + + app := &ketchv1.App{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + Spec: ketchv1.AppSpec{ + Deployments: []ketchv1.AppDeploymentSpec{ + { + Image: "gcr.io/test", + Version: 1, + Processes: []ketchv1.ProcessSpec{*process}, + }, + }, + Framework: "test", + }, + } + namespace := "ketch-test" + replicas := int32(1) + + // wl initial state + wl := workload{ + Name: "test", + UpdatedReplicas: 1, + ReadyReplicas: 0, + Replicas: 1, + } + + var cli *clientFake.Clientset + // details returned via Get() in the function's loop + if tc.appType == "Deployment" { + cli = clientFake.NewSimpleClientset(&appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "ketch-test", + }, + Status: appsv1.DeploymentStatus{ + UpdatedReplicas: 1, + UnavailableReplicas: 0, + Replicas: 1, + ReadyReplicas: 1, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + }, + }) + } else { + cli = clientFake.NewSimpleClientset(&appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "ketch-test", + }, + Status: appsv1.StatefulSetStatus{ + UpdatedReplicas: 1, + Replicas: 1, + ReadyReplicas: 1, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + }) + } - recorder := record.NewFakeRecorder(1024) - watcher := watch.NewFake() - cli := clientFake.NewSimpleClientset(depFetch) - timeout := time.After(DefaultPodRunningTimeout) - r := AppReconciler{} + recorder := record.NewFakeRecorder(1024) + watcher := watch.NewFake() + timeout := time.After(DefaultPodRunningTimeout) + r := AppReconciler{} - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) - var events []string - go func() { - for ev := range recorder.Events { - cancel() // cancel context after first event received - events = append(events, ev) - } - }() + var events []string + go func() { + for ev := range recorder.Events { + cancel() // cancel context after first event received + events = append(events, ev) + } + }() - err := r.watchFunc(ctx, func() {}, app, namespace, depStart, process.Name, recorder, watcher, cli, timeout, func() {}) - require.EqualError(t, err, "context canceled") + wc := workloadClient{ + k8sClient: cli, + workloadNamespace: namespace, + workloadType: tc.appType, + workloadName: "test", + } - // assert that watchFunc() ended early via context cancelation and that not all events were processed. - allPossibleEvents := []string{ - "Normal AppReconcileUpdate 1 of 1 new units created", - "Normal AppReconcileUpdate 0 of 1 new units ready", - "Normal AppReconcileUpdate 1 of 1 new units ready", - "Normal AppReconcileComplete app test 1 reconcile success", + err := r.watchFunc(ctx, func() {}, app, process.Name, recorder, watcher, &wc, &wl, timeout) + require.EqualError(t, err, "context canceled") + + // assert that watchFunc() ended early via context cancelation and that not all events were processed. + allPossibleEvents := []string{ + "Normal AppReconcileUpdate 1 of 1 new units created", + "Normal AppReconcileUpdate 0 of 1 new units ready", + "Normal AppReconcileUpdate 1 of 1 new units ready", + "Normal AppReconcileComplete app test 1 reconcile success", + } + require.True(t, len(events) < len(allPossibleEvents)) + }) } - require.True(t, len(events) < len(allPossibleEvents)) } func Test_checkPodStatus(t *testing.T) { @@ -616,7 +692,7 @@ func TestIsDeploymentEvent(t *testing.T) { for _, tc := range tests { t.Run("", func(t *testing.T) { - res := isDeploymentEvent(tc.msg, dep) + res := isDeploymentEvent(tc.msg, dep.Name) require.Equal(t, tc.expected, res) }) } diff --git a/internal/templates/common/yamls/_pod.tpl b/internal/templates/common/yamls/_pod.tpl new file mode 100644 index 00000000..820fb333 --- /dev/null +++ b/internal/templates/common/yamls/_pod.tpl @@ -0,0 +1,67 @@ +{{/* Generate pod template for deployment and stateful_set */}} +{{- define "app.podTemplate" }} + spec: + {{- if .root.app.serviceAccountName }} + serviceAccountName: {{ .root.app.serviceAccountName }} + {{- end }} + {{- if .root.app.securityContext }} + securityContext: +{{ .root.app.securityContext | toYaml | indent 8 }} + {{- end }} + containers: + - name: {{ .root.app.name }}-{{ .process.name }}-{{ .deployment.version }} + command: {{ .process.cmd | toJson }} + {{- if or .process.env .root.app.env }} + env: + {{- if .process.env }} +{{ .process.env | toYaml | indent 12 }} + {{- end }} + {{- if .root.app.env }} +{{ .root.app.env | toYaml | indent 12 }} + {{- end }} + {{- end }} + image: {{ .deployment.image }} + {{- if .process.containerPorts }} + ports: +{{ .process.containerPorts | toYaml | indent 10 }} + {{- end }} + {{- if .process.volumeMounts }} + volumeMounts: +{{ .process.volumeMounts | toYaml | indent 12 }} + {{- end }} + {{- if .process.resourceRequirements }} + resources: +{{ .process.resourceRequirements | toYaml | indent 12 }} + {{- end }} + {{- if .process.lifecycle }} + lifecycle: +{{ .process.lifecycle | toYaml | indent 12 }} + {{- end }} + {{- if .process.securityContext }} + securityContext: +{{ .process.securityContext | toYaml | indent 12 }} + {{- end }} + {{- if .process.readinessProbe }} + readinessProbe: +{{ .process.readinessProbe | toYaml | indent 12 }} + {{- end }} + {{- if .process.livenessProbe }} + livenessProbe: +{{ .process.livenessProbe | toYaml | indent 12 }} + {{- end }} + {{- if .deployment.imagePullSecrets }} + imagePullSecrets: +{{ .deployment.imagePullSecrets | toYaml | indent 12}} + {{- end }} + {{- if .process.volumes }} + volumes: +{{ .process.volumes | toYaml | indent 12 }} + {{- end }} + {{- if .process.nodeSelectorTerms }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: +{{ .process.nodeSelectorTerms | toYaml | indent 14 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/internal/templates/common/yamls/deployment.yaml b/internal/templates/common/yamls/deployment.yaml index 5f912d48..4b2768a9 100644 --- a/internal/templates/common/yamls/deployment.yaml +++ b/internal/templates/common/yamls/deployment.yaml @@ -1,3 +1,4 @@ +{{ if eq $.Values.app.type "Deployment" }} {{ range $_, $deployment := .Values.app.deployments }} {{ range $_, $process := $deployment.processes }} apiVersion: apps/v1 @@ -47,70 +48,8 @@ spec: {{ $k }}: {{ $v | quote }} {{- end }} {{- end }} - spec: - {{- if $.Values.app.serviceAccountName }} - serviceAccountName: {{ $.Values.app.serviceAccountName }} - {{- end }} - {{- if $.Values.app.securityContext }} - securityContext: -{{ $.Values.app.securityContext | toYaml | indent 8 }} - {{- end }} - containers: - - name: {{ $.Values.app.name }}-{{ $process.name }}-{{ $deployment.version }} - command: {{ $process.cmd | toJson }} - {{- if or $process.env $.Values.app.env }} - env: - {{- if $process.env }} -{{ $process.env | toYaml | indent 12 }} - {{- end }} - {{- if $.Values.app.env }} -{{ $.Values.app.env | toYaml | indent 12 }} - {{- end }} - {{- end }} - image: {{ $deployment.image }} - {{- if $process.containerPorts }} - ports: -{{ $process.containerPorts | toYaml | indent 10 }} - {{- end }} - {{- if $process.volumeMounts }} - volumeMounts: -{{ $process.volumeMounts | toYaml | indent 12 }} - {{- end }} - {{- if $process.resourceRequirements }} - resources: -{{ $process.resourceRequirements | toYaml | indent 12 }} - {{- end }} - {{- if $process.lifecycle }} - lifecycle: -{{ $process.lifecycle | toYaml | indent 12 }} - {{- end }} - {{- if $process.securityContext }} - securityContext: -{{ $process.securityContext | toYaml | indent 12 }} - {{- end }} - {{- if $process.readinessProbe }} - readinessProbe: -{{ $process.readinessProbe | toYaml | indent 12 }} - {{- end }} - {{- if $process.livenessProbe }} - livenessProbe: -{{ $process.livenessProbe | toYaml | indent 12 }} - {{- end }} - {{- if $deployment.imagePullSecrets }} - imagePullSecrets: -{{ $deployment.imagePullSecrets | toYaml | indent 12}} - {{- end }} - {{- if $process.volumes }} - volumes: -{{ $process.volumes | toYaml | indent 12 }} - {{- end }} - {{- if $process.nodeSelectorTerms }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: -{{ $process.nodeSelectorTerms | toYaml | indent 14 }} - {{- end }} + {{- template "app.podTemplate" (dict "root" $.Values "deployment" $deployment "process" $process) }} --- {{ end }} {{ end }} + {{- end }} diff --git a/internal/templates/common/yamls/stateful_set.yaml b/internal/templates/common/yamls/stateful_set.yaml new file mode 100644 index 00000000..93d73efd --- /dev/null +++ b/internal/templates/common/yamls/stateful_set.yaml @@ -0,0 +1,68 @@ +{{ if eq $.Values.app.type "StatefulSet" }} +{{ range $_, $deployment := .Values.app.deployments }} + {{ range $_, $process := $deployment.processes }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + {{ $.Values.app.group }}/app-name: {{ $.Values.app.name | quote }} + {{ $.Values.app.group }}/app-process: {{ $process.name | quote }} + {{ $.Values.app.group }}/app-process-replicas: {{ $process.units | quote }} + {{ $.Values.app.group }}/app-deployment-version: {{ $deployment.version | quote }} + {{ $.Values.app.group }}/is-isolated-run: "false" + {{- range $k, $v := $process.deploymentMetadata.labels }} + {{ $k }}: {{ $v | quote }} + {{- end}} + {{- if $process.deploymentMetadata.annotations }} + annotations: + {{- range $k, $v := $process.deploymentMetadata.annotations }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + name: {{ $.Values.app.name }}-{{ $process.name }}-{{ $deployment.version }} +spec: + selector: + matchLabels: + app: {{ default $.Values.app.name $.Values.app.id | quote }} + version: {{ $deployment.version | quote }} + {{ $.Values.app.group }}/app-name: {{ $.Values.app.name | quote }} + {{ $.Values.app.group }}/app-process: {{ $process.name | quote }} + {{ $.Values.app.group }}/app-deployment-version: {{ $deployment.version | quote }} + {{ $.Values.app.group }}/is-isolated-run: "false" + serviceName: {{ $.Values.app.name | quote }} + template: + metadata: + labels: + app: {{ default $.Values.app.name $.Values.app.id | quote }} + version: {{ $deployment.version | quote }} + {{ $.Values.app.group }}/app-name: {{ $.Values.app.name | quote }} + {{ $.Values.app.group }}/app-process: {{ $process.name | quote }} + {{ $.Values.app.group }}/app-deployment-version: {{ $deployment.version | quote }} + {{ $.Values.app.group }}/is-isolated-run: "false" + {{- range $k, $v := $process.podMetadata.labels }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- if $process.podMetadata.annotations }} + annotations: + {{- range $k, $v := $process.podMetadata.annotations }} + {{ $k }}: {{ $v | quote }} + {{- end }} + {{- end }} + {{- template "app.podTemplate" (dict "root" $.Values "deployment" $deployment "process" $process) }} + {{- if $.Values.app.volumeClaimTemplates }} + volumeClaimTemplates: + {{- range $_, $template := $.Values.app.volumeClaimTemplates }} + - metadata: + name: {{ $template.name }} + spec: + accessModes: {{ $template.accessModes }} + storageClassName: {{ $template.storageClassName | quote }} + resources: + requests: + storage: {{ $template.storage }} + {{- end }} + {{- end }} +--- +{{ end }} +{{ end }} +{{- end }} \ No newline at end of file