diff --git a/tests/integration/test_csi_driver_nfs.py b/tests/integration/test_csi_driver_nfs.py new file mode 100644 index 0000000..27f6888 --- /dev/null +++ b/tests/integration/test_csi_driver_nfs.py @@ -0,0 +1,129 @@ +# +# Copyright 2024 Canonical, Ltd. +# See LICENSE file for licensing details +# + +import logging +import pathlib + +from k8s_test_harness import harness +from k8s_test_harness.util import constants, env_util, exec_util, k8s_util + +LOG = logging.getLogger(__name__) + +DIR = pathlib.Path(__file__).absolute().parent +MANIFESTS_DIR = DIR / ".." / "templates" + + +def _clone_helm_chart_repo( + instance: harness.Instance, dest_path: pathlib.Path, version: str +): + clone_command = [ + "git", + "clone", + "https://github.com/kubernetes-csi/csi-driver-nfs", + "--depth", + "1", + str(dest_path.absolute()), + ] + instance.exec(clone_command) + + # The Helm chart deploys the CSI components with readOnlyRootFilesystem: true, not allowing + # Pebble to run properly. + templates_path = dest_path / "charts" / version / "csi-driver-nfs" / "templates" + abs_path = str(templates_path.absolute()) + sed_str = "'s/readOnlyRootFilesystem: true/readOnlyRootFilesystem: false/g'" + cmd = f"find {abs_path}/ -name '*.yaml' -exec sed -i -e {sed_str} {{}} \\;" # noqa + replace_command = ["bash", "-c", cmd] + + instance.exec(replace_command) + + +def _get_nfsplugin_csi_helm_cmd(chart_path: pathlib.Path): + image_tuples = [ + # (rock_name, version, helm_image_subitem) + ("csi-provisioner", "4.0.0", "csiProvisioner"), + ("livenessprobe", "2.12.0", "livenessProbe"), + ("csi-node-driver-registrar", "2.10.0", "nodeDriverRegistrar"), + ("snapshot-controller", "6.3.3", "externalSnapshotter"), + ("csi-snapshotter", "6.3.3", "csiSnapshotter"), + ] + + images = [] + for rock_name, version, helm_image_subitem in image_tuples: + rock = env_util.get_build_meta_info_for_rock_version( + rock_name, version, "amd64" + ) + images.append(k8s_util.HelmImage(rock.image, subitem=helm_image_subitem)) + + set_configs = [ + "externalSnapshotter.enabled=true", + ] + + return k8s_util.get_helm_install_command( + "csi-driver-nfs", + chart_name=str(chart_path.absolute()), + chart_version="v4.7.0", + images=images, + set_configs=set_configs, + ) + + +def test_nfsplugin_integration( + tmp_path: pathlib.Path, function_instance: harness.Instance +): + version = "v4.7.0" + clone_path = tmp_path / "csi-driver-nfs" + chart_path = clone_path / "charts" / version / "csi-driver-nfs" + + _clone_helm_chart_repo(function_instance, clone_path, version) + helm_command = _get_nfsplugin_csi_helm_cmd(chart_path) + function_instance.exec(helm_command) + + # wait for all the components to become active. + k8s_util.wait_for_daemonset(function_instance, "csi-nfs-node", "kube-system") + k8s_util.wait_for_deployment(function_instance, "csi-nfs-controller", "kube-system") + k8s_util.wait_for_deployment( + function_instance, "snapshot-controller", "kube-system" + ) + + # call the nfsplugin's liveness probes to check that they're running as intended. + for port in [29652, 29653]: + # It has hostNetwork=true, which means that curling localhost should work. + exec_util.stubbornly(retries=5, delay_s=5).on(function_instance).exec( + ["curl", f"http://localhost:{port}/healthz"] + ) + + # Deploy a NFS server and an nginx Pod with a NFS volume attached. + for item in ["nfs-server.yaml", "nginx-pod.yaml"]: + manifest = MANIFESTS_DIR / item + function_instance.exec( + ["k8s", "kubectl", "apply", "-f", "-"], + input=pathlib.Path(manifest).read_bytes(), + ) + + # Expect the Pod to become ready, and that it has the volume attached. + k8s_util.wait_for_deployment(function_instance, "nfs-server") + k8s_util.wait_for_resource( + function_instance, + "pod", + "nginx-nfs-example", + condition=constants.K8S_CONDITION_READY, + ) + + process = function_instance.exec( + [ + "k8s", + "kubectl", + "exec", + "nginx-nfs-example", + "--", + "bash", + "-c", + "findmnt /var/www -o TARGET,SOURCE,FSTYPE", + ], + capture_output=True, + text=True, + ) + + assert "/var/www nfs-server.default.svc.cluster.local:/ nfs4" in process.stdout diff --git a/tests/integration/test_csi_provisioner.py b/tests/integration/test_csi_provisioner.py deleted file mode 100644 index 6cb18d3..0000000 --- a/tests/integration/test_csi_provisioner.py +++ /dev/null @@ -1,20 +0,0 @@ -# -# Copyright 2024 Canonical, Ltd. -# See LICENSE file for licensing details -# - -import logging - -from k8s_test_harness import harness -from k8s_test_harness.util import env_util - -LOG = logging.getLogger(__name__) - - -def test_integration_csi_provisioner(function_instance: harness.Instance): - rock = env_util.get_build_meta_info_for_rock_version( - "csi-provisioner", "4.0.0", "amd64" - ) - - LOG.info(f"Using rock: {rock.image}") - LOG.warn("Integration tests are not yet implemented yet") diff --git a/tests/templates/nfs-server.yaml b/tests/templates/nfs-server.yaml new file mode 100644 index 0000000..a3acc77 --- /dev/null +++ b/tests/templates/nfs-server.yaml @@ -0,0 +1,62 @@ +# from: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/nfs-provisioner/nfs-server.yaml +--- +kind: Service +apiVersion: v1 +metadata: + name: nfs-server + namespace: default + labels: + app: nfs-server +spec: + type: ClusterIP # use "LoadBalancer" to get a public ip + selector: + app: nfs-server + ports: + - name: tcp-2049 + port: 2049 + protocol: TCP + - name: udp-111 + port: 111 + protocol: UDP +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nfs-server + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + app: nfs-server + template: + metadata: + name: nfs-server + labels: + app: nfs-server + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: nfs-server + image: itsthenetwork/nfs-server-alpine:latest + env: + - name: SHARED_DIRECTORY + value: "/exports" + volumeMounts: + - mountPath: /exports + name: nfs-vol + securityContext: + privileged: true + ports: + - name: tcp-2049 + containerPort: 2049 + protocol: TCP + - name: udp-111 + containerPort: 111 + protocol: UDP + volumes: + - name: nfs-vol + hostPath: + path: /nfs-vol # modify this to specify another path to store nfs share data + type: DirectoryOrCreate diff --git a/tests/templates/nginx-pod.yaml b/tests/templates/nginx-pod.yaml new file mode 100644 index 0000000..5ef1c9f --- /dev/null +++ b/tests/templates/nginx-pod.yaml @@ -0,0 +1,60 @@ +# from: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/deploy/example/nfs-provisioner/nginx-pod.yaml +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + annotations: + pv.kubernetes.io/provisioned-by: nfs.csi.k8s.io + name: pv-nginx + namespace: default +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Delete + mountOptions: + - nfsvers=4.1 + csi: + driver: nfs.csi.k8s.io + # volumeHandle format: {nfs-server-address}#{sub-dir-name}#{share-name} + # make sure this value is unique for every share in the cluster + volumeHandle: nfs-server.default.svc.cluster.local/share## + volumeAttributes: + server: nfs-server.default.svc.cluster.local + share: / +--- +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: pvc-nginx + namespace: default +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + volumeName: pv-nginx + storageClassName: "" +--- +apiVersion: v1 +kind: Pod +metadata: + name: nginx-nfs-example + namespace: default +spec: + containers: + - image: nginx + name: nginx + ports: + - containerPort: 80 + protocol: TCP + volumeMounts: + - mountPath: /var/www + name: pvc-nginx + readOnly: false + volumes: + - name: pvc-nginx + persistentVolumeClaim: + claimName: pvc-nginx