From 025014fe29a58632c88963f1a615ab04f221a6ea Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Mon, 8 Jan 2024 20:56:15 +0100 Subject: [PATCH] make in memory runtime and server accessible from outside --- .../inmemory/controllers/alias.go | 20 ++-- .../cloud/api/v1alpha1/groupversion_info.go | 2 +- .../cloud/api/v1alpha1/machine_types.go | 2 +- .../controllers/inmemorycluster_controller.go | 18 +-- .../controllers/inmemorymachine_controller.go | 97 ++++++++------- .../inmemorymachine_controller_test.go | 112 +++++++++--------- .../webhooks/inmemorycluster_webhook.go | 4 +- .../inmemoryclustertemplate_webhook.go | 4 +- .../webhooks/inmemorymachine_webhook.go | 4 +- .../inmemorymachinetemplate_webhook.go | 4 +- test/infrastructure/inmemory/main.go | 28 ++--- .../{internal/cloud => pkg/runtime}/alias.go | 12 +- .../cloud => pkg}/runtime/cache/cache.go | 0 .../cloud => pkg}/runtime/cache/cache_test.go | 0 .../cloud => pkg}/runtime/cache/client.go | 0 .../runtime/cache/client_test.go | 0 .../cloud => pkg}/runtime/cache/doc.go | 0 .../cloud => pkg}/runtime/cache/gc.go | 0 .../cloud => pkg}/runtime/cache/gc_test.go | 0 .../cloud => pkg}/runtime/cache/hooks.go | 0 .../cloud => pkg}/runtime/cache/informer.go | 0 .../cloud => pkg}/runtime/cache/sync.go | 0 .../cloud => pkg}/runtime/cache/sync_test.go | 0 .../cloud => pkg}/runtime/client/client.go | 0 .../cloud => pkg}/runtime/client/doc.go | 0 .../{internal/cloud => pkg/runtime}/doc.go | 19 ++- .../cloud => pkg}/runtime/manager/doc.go | 0 .../cloud => pkg}/runtime/manager/manager.go | 18 +-- .../resourcegroup/cached_resourcegroup.go | 14 +-- .../runtime/resourcegroup/doc.go | 0 .../runtime/resourcegroup/resourcegroup.go | 4 +- .../{internal => pkg}/server/api/const.go | 0 .../{internal => pkg}/server/api/debug.go | 6 +- .../{internal => pkg}/server/api/doc.go | 0 .../{internal => pkg}/server/api/handler.go | 40 +++---- .../{internal => pkg}/server/api/metrics.go | 0 .../server/api/portforward/doc.go | 0 .../server/api/portforward/httpstreams.go | 0 .../{internal => pkg}/server/api/watch.go | 0 .../{internal => pkg}/server/certs.go | 0 .../inmemory/{internal => pkg}/server/doc.go | 0 .../{internal => pkg}/server/etcd/doc.go | 0 .../{internal => pkg}/server/etcd/handler.go | 31 +++-- .../server/etcd/handler_test.go | 12 +- .../{internal => pkg}/server/etcd/metrics.go | 0 .../{internal => pkg}/server/listener.go | 0 .../inmemory/{internal => pkg}/server/mux.go | 16 +-- .../{internal => pkg}/server/mux_test.go | 18 +-- .../{internal => pkg}/server/proxy/addr.go | 0 .../{internal => pkg}/server/proxy/conn.go | 0 .../{internal => pkg}/server/proxy/dial.go | 0 .../{internal => pkg}/server/proxy/doc.go | 0 .../{internal => pkg}/server/proxy/proxy.go | 0 53 files changed, 241 insertions(+), 244 deletions(-) rename test/infrastructure/inmemory/{internal/cloud => pkg/runtime}/alias.go (76%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/cache.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/cache_test.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/client.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/client_test.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/doc.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/gc.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/gc_test.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/hooks.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/informer.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/sync.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/cache/sync_test.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/client/client.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/client/doc.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg/runtime}/doc.go (58%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/manager/doc.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/manager/manager.go (78%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/resourcegroup/cached_resourcegroup.go (81%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/resourcegroup/doc.go (100%) rename test/infrastructure/inmemory/{internal/cloud => pkg}/runtime/resourcegroup/resourcegroup.go (84%) rename test/infrastructure/inmemory/{internal => pkg}/server/api/const.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/api/debug.go (88%) rename test/infrastructure/inmemory/{internal => pkg}/server/api/doc.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/api/handler.go (94%) rename test/infrastructure/inmemory/{internal => pkg}/server/api/metrics.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/api/portforward/doc.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/api/portforward/httpstreams.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/api/watch.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/certs.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/doc.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/etcd/doc.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/etcd/handler.go (89%) rename test/infrastructure/inmemory/{internal => pkg}/server/etcd/handler_test.go (89%) rename test/infrastructure/inmemory/{internal => pkg}/server/etcd/metrics.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/listener.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/mux.go (96%) rename test/infrastructure/inmemory/{internal => pkg}/server/mux_test.go (97%) rename test/infrastructure/inmemory/{internal => pkg}/server/proxy/addr.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/proxy/conn.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/proxy/dial.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/proxy/doc.go (100%) rename test/infrastructure/inmemory/{internal => pkg}/server/proxy/proxy.go (100%) diff --git a/test/infrastructure/inmemory/controllers/alias.go b/test/infrastructure/inmemory/controllers/alias.go index e1cfb2260094..fbf333c2e352 100644 --- a/test/infrastructure/inmemory/controllers/alias.go +++ b/test/infrastructure/inmemory/controllers/alias.go @@ -24,9 +24,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud" inmemorycontrollers "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/controllers" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" ) // Following types provides access to reconcilers implemented in internal/controllers, thus @@ -34,9 +34,9 @@ import ( // InMemoryClusterReconciler reconciles a InMemoryCluster object. type InMemoryClusterReconciler struct { - Client client.Client - CloudManager cloud.Manager - APIServerMux *server.WorkloadClustersMux // TODO: find a way to use an interface here + Client client.Client + InMemoryManager inmemoryruntime.Manager + APIServerMux *inmemoryserver.WorkloadClustersMux // TODO: find a way to use an interface here // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -46,7 +46,7 @@ type InMemoryClusterReconciler struct { func (r *InMemoryClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&inmemorycontrollers.InMemoryClusterReconciler{ Client: r.Client, - CloudManager: r.CloudManager, + InMemoryManager: r.InMemoryManager, APIServerMux: r.APIServerMux, WatchFilterValue: r.WatchFilterValue, }).SetupWithManager(ctx, mgr, options) @@ -54,9 +54,9 @@ func (r *InMemoryClusterReconciler) SetupWithManager(ctx context.Context, mgr ct // InMemoryMachineReconciler reconciles a InMemoryMachine object. type InMemoryMachineReconciler struct { - Client client.Client - CloudManager cloud.Manager - APIServerMux *server.WorkloadClustersMux // TODO: find a way to use an interface here + Client client.Client + InMemoryManager inmemoryruntime.Manager + APIServerMux *inmemoryserver.WorkloadClustersMux // TODO: find a way to use an interface here // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -66,7 +66,7 @@ type InMemoryMachineReconciler struct { func (r *InMemoryMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { return (&inmemorycontrollers.InMemoryMachineReconciler{ Client: r.Client, - CloudManager: r.CloudManager, + InMemoryManager: r.InMemoryManager, APIServerMux: r.APIServerMux, WatchFilterValue: r.WatchFilterValue, }).SetupWithManager(ctx, mgr, options) diff --git a/test/infrastructure/inmemory/internal/cloud/api/v1alpha1/groupversion_info.go b/test/infrastructure/inmemory/internal/cloud/api/v1alpha1/groupversion_info.go index fde5f7ca8dbf..545aea883276 100644 --- a/test/infrastructure/inmemory/internal/cloud/api/v1alpha1/groupversion_info.go +++ b/test/infrastructure/inmemory/internal/cloud/api/v1alpha1/groupversion_info.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package v1alpha1 contains API Schema definitions for the cloud v1alpha1 API group +// Package v1alpha1 contains API Schema definitions for the inmemory v1alpha1 API group // +kubebuilder:object:generate=true // +groupName=virtual.cluster.x-k8s.io package v1alpha1 diff --git a/test/infrastructure/inmemory/internal/cloud/api/v1alpha1/machine_types.go b/test/infrastructure/inmemory/internal/cloud/api/v1alpha1/machine_types.go index 303ed4611fab..d1724a7757ca 100644 --- a/test/infrastructure/inmemory/internal/cloud/api/v1alpha1/machine_types.go +++ b/test/infrastructure/inmemory/internal/cloud/api/v1alpha1/machine_types.go @@ -33,7 +33,7 @@ type CloudMachineStatus struct { // +kubebuilder:object:root=true -// CloudMachine represents a machine in the cloud. +// CloudMachine represents a machine in memory. type CloudMachine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/test/infrastructure/inmemory/internal/controllers/inmemorycluster_controller.go b/test/infrastructure/inmemory/internal/controllers/inmemorycluster_controller.go index aae65769c31b..fb0c69a98ce6 100644 --- a/test/infrastructure/inmemory/internal/controllers/inmemorycluster_controller.go +++ b/test/infrastructure/inmemory/internal/controllers/inmemorycluster_controller.go @@ -34,8 +34,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -44,8 +44,8 @@ import ( // InMemoryClusterReconciler reconciles a InMemoryCluster object. type InMemoryClusterReconciler struct { client.Client - CloudManager cloud.Manager - APIServerMux *server.WorkloadClustersMux + InMemoryManager inmemoryruntime.Manager + APIServerMux *inmemoryserver.WorkloadClustersMux // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -158,11 +158,11 @@ func (r *InMemoryClusterReconciler) reconcileNormal(_ context.Context, cluster * // Store the resource group used by this inMemoryCluster. inMemoryCluster.Annotations[infrav1.ResourceGroupAnnotationName] = resourceGroup - // Create a resource group for all the cloud resources belonging the workload cluster; + // Create a resource group for all the in memory resources belonging the workload cluster; // if the resource group already exists, the operation is a no-op. - // NOTE: We are storing in this resource group both the cloud resources (e.g. VM) as + // NOTE: We are storing in this resource group both the in memory resources (e.g. VM) as // well as Kubernetes resources that are expected to exist on the workload cluster (e.g Nodes). - r.CloudManager.AddResourceGroup(resourceGroup) + r.InMemoryManager.AddResourceGroup(resourceGroup) // Initialize a listener for the workload cluster; if the listener has been already initialized // the operation is a no-op. @@ -190,8 +190,8 @@ func (r *InMemoryClusterReconciler) reconcileDelete(_ context.Context, cluster * // Compute the resource group unique name. resourceGroup := klog.KObj(cluster).String() - // Delete the resource group hosting all the cloud resources belonging the workload cluster; - r.CloudManager.DeleteResourceGroup(resourceGroup) + // Delete the resource group hosting all the in memory resources belonging the workload cluster; + r.InMemoryManager.DeleteResourceGroup(resourceGroup) // Delete the listener for the workload cluster; if err := r.APIServerMux.DeleteWorkloadClusterListener(resourceGroup); err != nil { diff --git a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go index 778097d3ab74..d291ccfbfa9f 100644 --- a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go +++ b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller.go @@ -44,10 +44,9 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud" cloudv1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/api/v1alpha1" - cclient "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/client" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/certs" @@ -61,8 +60,8 @@ import ( // InMemoryMachineReconciler reconciles a InMemoryMachine object. type InMemoryMachineReconciler struct { client.Client - CloudManager cloud.Manager - APIServerMux *server.WorkloadClustersMux + InMemoryManager inmemoryruntime.Manager + APIServerMux *inmemoryserver.WorkloadClustersMux // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string @@ -240,21 +239,21 @@ func (r *InMemoryMachineReconciler) reconcileNormalCloudMachine(ctx context.Cont // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() // Create VM; a Cloud VM can be created as soon as the Infra Machine is created - // NOTE: for sake of simplicity we keep cloud resources as global resources (namespace empty). + // NOTE: for sake of simplicity we keep in memory resources as global resources (namespace empty). cloudMachine := &cloudv1.CloudMachine{ ObjectMeta: metav1.ObjectMeta{ Name: inMemoryMachine.Name, }, } - if err := cloudClient.Get(ctx, client.ObjectKeyFromObject(cloudMachine), cloudMachine); err != nil { + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(cloudMachine), cloudMachine); err != nil { if !apierrors.IsNotFound(err) { return ctrl.Result{}, err } - if err := cloudClient.Create(ctx, cloudMachine); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, cloudMachine); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create CloudMachine") } } @@ -324,7 +323,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalNode(ctx context.Context, clu // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() // Create Node // TODO: consider if to handle an additional setting adding a delay in between create node and node ready/provider ID being set @@ -351,14 +350,14 @@ func (r *InMemoryMachineReconciler) reconcileNormalNode(ctx context.Context, clu node.Labels["node-role.kubernetes.io/control-plane"] = "" } - if err := cloudClient.Get(ctx, client.ObjectKeyFromObject(node), node); err != nil { + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(node), node); err != nil { if !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to get node") } // NOTE: for the first control plane machine we might create the node before etcd and API server pod are running // but this is not an issue, because it won't be visible to CAPI until the API server start serving requests. - if err := cloudClient.Create(ctx, node); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, node); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create Node") } } @@ -409,7 +408,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalETCD(ctx context.Context, clu // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() // Create the etcd pod // TODO: consider if to handle an additional setting adding a delay in between create pod and pod ready @@ -436,13 +435,13 @@ func (r *InMemoryMachineReconciler) reconcileNormalETCD(ctx context.Context, clu }, }, } - if err := cloudClient.Get(ctx, client.ObjectKeyFromObject(etcdPod), etcdPod); err != nil { + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(etcdPod), etcdPod); err != nil { if !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to get etcd Pod") } // Gets info about the current etcd cluster, if any. - info, err := r.getEtcdInfo(ctx, cloudClient) + info, err := r.getEtcdInfo(ctx, inmemoryClient) if err != nil { return ctrl.Result{}, err } @@ -479,7 +478,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalETCD(ctx context.Context, clu // NOTE: for the first control plane machine we might create the etcd pod before the API server pod is running // but this is not an issue, because it won't be visible to CAPI until the API server start serving requests. - if err := cloudClient.Create(ctx, etcdPod); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, etcdPod); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create Pod") } } @@ -526,9 +525,9 @@ type etcdInfo struct { members sets.Set[string] } -func (r *InMemoryMachineReconciler) getEtcdInfo(ctx context.Context, cloudClient cclient.Client) (etcdInfo, error) { +func (r *InMemoryMachineReconciler) getEtcdInfo(ctx context.Context, inmemoryClient inmemoryruntime.Client) (etcdInfo, error) { etcdPods := &corev1.PodList{} - if err := cloudClient.List(ctx, etcdPods, + if err := inmemoryClient.List(ctx, etcdPods, client.InNamespace(metav1.NamespaceSystem), client.MatchingLabels{ "component": "etcd", @@ -613,7 +612,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalAPIServer(ctx context.Context // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() // Create the apiserver pod // TODO: consider if to handle an additional setting adding a delay in between create pod and pod ready @@ -641,12 +640,12 @@ func (r *InMemoryMachineReconciler) reconcileNormalAPIServer(ctx context.Context }, }, } - if err := cloudClient.Get(ctx, client.ObjectKeyFromObject(apiServerPod), apiServerPod); err != nil { + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(apiServerPod), apiServerPod); err != nil { if !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to get apiServer Pod") } - if err := cloudClient.Create(ctx, apiServerPod); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, apiServerPod); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create apiServer Pod") } } @@ -706,7 +705,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalScheduler(ctx context.Context // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() schedulerPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -730,7 +729,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalScheduler(ctx context.Context }, }, } - if err := cloudClient.Create(ctx, schedulerPod); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, schedulerPod); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create scheduler Pod") } @@ -754,7 +753,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalControllerManager(ctx context // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() controllerManagerPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -778,7 +777,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalControllerManager(ctx context }, }, } - if err := cloudClient.Create(ctx, controllerManagerPod); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, controllerManagerPod); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create controller manager Pod") } @@ -794,7 +793,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalKubeadmObjects(ctx context.Co // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() // create kubeadm ClusterRole and ClusterRoleBinding enforced by KCP // NOTE: we create those objects because this is what kubeadm does, but KCP creates @@ -812,7 +811,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalKubeadmObjects(ctx context.Co }, }, } - if err := cloudClient.Create(ctx, role); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, role); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create kubeadm:get-nodes ClusterRole") } @@ -832,7 +831,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalKubeadmObjects(ctx context.Co }, }, } - if err := cloudClient.Create(ctx, roleBinding); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, roleBinding); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create kubeadm:get-nodes ClusterRoleBinding") } @@ -846,7 +845,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalKubeadmObjects(ctx context.Co "ClusterConfiguration": "", }, } - if err := cloudClient.Create(ctx, cm); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, cm); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create kubeadm-config ConfigMap") } @@ -864,7 +863,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalKubeProxy(ctx context.Context // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() // Create the kube-proxy-daemonset kubeProxyDaemonSet := &appsv1.DaemonSet{ @@ -888,12 +887,12 @@ func (r *InMemoryMachineReconciler) reconcileNormalKubeProxy(ctx context.Context }, }, } - if err := cloudClient.Get(ctx, client.ObjectKeyFromObject(kubeProxyDaemonSet), kubeProxyDaemonSet); err != nil { + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(kubeProxyDaemonSet), kubeProxyDaemonSet); err != nil { if !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to get kube-proxy DaemonSet") } - if err := cloudClient.Create(ctx, kubeProxyDaemonSet); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, kubeProxyDaemonSet); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create kube-proxy DaemonSet") } } @@ -911,7 +910,7 @@ func (r *InMemoryMachineReconciler) reconcileNormalCoredns(ctx context.Context, // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() // Create the coredns configMap. corednsConfigMap := &corev1.ConfigMap{ @@ -923,12 +922,12 @@ func (r *InMemoryMachineReconciler) reconcileNormalCoredns(ctx context.Context, "Corefile": "ANG", }, } - if err := cloudClient.Get(ctx, client.ObjectKeyFromObject(corednsConfigMap), corednsConfigMap); err != nil { + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(corednsConfigMap), corednsConfigMap); err != nil { if !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to get coreDNS configMap") } - if err := cloudClient.Create(ctx, corednsConfigMap); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, corednsConfigMap); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create coreDNS configMap") } } @@ -952,12 +951,12 @@ func (r *InMemoryMachineReconciler) reconcileNormalCoredns(ctx context.Context, }, } - if err := cloudClient.Get(ctx, client.ObjectKeyFromObject(corednsDeployment), corednsDeployment); err != nil { + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(corednsDeployment), corednsDeployment); err != nil { if !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to get coreDNS deployment") } - if err := cloudClient.Create(ctx, corednsDeployment); err != nil && !apierrors.IsAlreadyExists(err) { + if err := inmemoryClient.Create(ctx, corednsDeployment); err != nil && !apierrors.IsAlreadyExists(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to create coreDNS deployment") } } @@ -999,7 +998,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteCloudMachine(ctx context.Cont // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() // Delete VM cloudMachine := &cloudv1.CloudMachine{ @@ -1007,7 +1006,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteCloudMachine(ctx context.Cont Name: inMemoryMachine.Name, }, } - if err := cloudClient.Delete(ctx, cloudMachine); err != nil && !apierrors.IsNotFound(err) { + if err := inmemoryClient.Delete(ctx, cloudMachine); err != nil && !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to delete CloudMachine") } @@ -1018,7 +1017,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteNode(ctx context.Context, clu // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() // Delete Node node := &corev1.Node{ @@ -1028,7 +1027,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteNode(ctx context.Context, clu } // TODO(killianmuldoon): check if we can drop this given that the MachineController is already draining pods and deleting nodes. - if err := cloudClient.Delete(ctx, node); err != nil && !apierrors.IsNotFound(err) { + if err := inmemoryClient.Delete(ctx, node); err != nil && !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to delete Node") } @@ -1044,7 +1043,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteETCD(ctx context.Context, clu // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() etcdMember := fmt.Sprintf("etcd-%s", inMemoryMachine.Name) etcdPod := &corev1.Pod{ @@ -1053,7 +1052,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteETCD(ctx context.Context, clu Name: etcdMember, }, } - if err := cloudClient.Delete(ctx, etcdPod); err != nil && !apierrors.IsNotFound(err) { + if err := inmemoryClient.Delete(ctx, etcdPod); err != nil && !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to delete etcd Pod") } if err := r.APIServerMux.DeleteEtcdMember(resourceGroup, etcdMember); err != nil { @@ -1077,7 +1076,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteAPIServer(ctx context.Context // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() apiServer := fmt.Sprintf("kube-apiserver-%s", inMemoryMachine.Name) apiServerPod := &corev1.Pod{ @@ -1086,7 +1085,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteAPIServer(ctx context.Context Name: apiServer, }, } - if err := cloudClient.Delete(ctx, apiServerPod); err != nil && !apierrors.IsNotFound(err) { + if err := inmemoryClient.Delete(ctx, apiServerPod); err != nil && !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to delete apiServer Pod") } if err := r.APIServerMux.DeleteAPIServer(resourceGroup, apiServer); err != nil { @@ -1105,7 +1104,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteScheduler(ctx context.Context // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() schedulerPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -1113,7 +1112,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteScheduler(ctx context.Context Name: fmt.Sprintf("kube-scheduler-%s", inMemoryMachine.Name), }, } - if err := cloudClient.Delete(ctx, schedulerPod); err != nil && !apierrors.IsNotFound(err) { + if err := inmemoryClient.Delete(ctx, schedulerPod); err != nil && !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to scheduler Pod") } @@ -1129,7 +1128,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteControllerManager(ctx context // Compute the resource group unique name. // NOTE: We are using reconcilerGroup also as a name for the listener for sake of simplicity. resourceGroup := klog.KObj(cluster).String() - cloudClient := r.CloudManager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() controllerManagerPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -1137,7 +1136,7 @@ func (r *InMemoryMachineReconciler) reconcileDeleteControllerManager(ctx context Name: fmt.Sprintf("kube-controller-manager-%s", inMemoryMachine.Name), }, } - if err := cloudClient.Delete(ctx, controllerManagerPod); err != nil && !apierrors.IsNotFound(err) { + if err := inmemoryClient.Delete(ctx, controllerManagerPod); err != nil && !apierrors.IsNotFound(err) { return ctrl.Result{}, errors.Wrapf(err, "failed to controller manager Pod") } diff --git a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller_test.go b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller_test.go index 70643ba4d280..6c63ae164772 100644 --- a/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller_test.go +++ b/test/infrastructure/inmemory/internal/controllers/inmemorymachine_controller_test.go @@ -41,8 +41,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" cloudv1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/api/v1alpha1" - cmanager "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/manager" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" "sigs.k8s.io/cluster-api/util/certs" "sigs.k8s.io/cluster-api/util/conditions" secretutil "sigs.k8s.io/cluster-api/util/secret" @@ -102,10 +102,10 @@ func TestReconcileNormalCloudMachine(t *testing.T) { g := NewWithT(t) r := InMemoryMachineReconciler{ - CloudManager: cmanager.New(scheme), + InMemoryManager: inmemoryruntime.NewManager(scheme), } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := r.reconcileNormalCloudMachine(ctx, cluster, cpMachine, inMemoryMachine) g.Expect(err).ToNot(HaveOccurred()) @@ -181,10 +181,10 @@ func TestReconcileNormalNode(t *testing.T) { g := NewWithT(t) r := InMemoryMachineReconciler{ - CloudManager: cmanager.New(scheme), + InMemoryManager: inmemoryruntime.NewManager(scheme), } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := r.reconcileNormalNode(ctx, cluster, cpMachine, inMemoryMachineWithVMNotYetProvisioned) g.Expect(err).ToNot(HaveOccurred()) @@ -203,10 +203,10 @@ func TestReconcileNormalNode(t *testing.T) { g := NewWithT(t) r := InMemoryMachineReconciler{ - CloudManager: cmanager.New(scheme), + InMemoryManager: inmemoryruntime.NewManager(scheme), } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := r.reconcileNormalNode(ctx, cluster, cpMachine, inMemoryMachineWithVMProvisioned) g.Expect(err).ToNot(HaveOccurred()) @@ -289,10 +289,10 @@ func TestReconcileNormalEtcd(t *testing.T) { g := NewWithT(t) r := InMemoryMachineReconciler{ - CloudManager: cmanager.New(scheme), + InMemoryManager: inmemoryruntime.NewManager(scheme), } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := r.reconcileNormalETCD(ctx, cluster, cpMachine, inMemoryMachineWithNodeNotYetProvisioned) g.Expect(err).ToNot(HaveOccurred()) @@ -311,26 +311,26 @@ func TestReconcileNormalEtcd(t *testing.T) { t.Run("create pod if Node is ready", func(t *testing.T) { g := NewWithT(t) - manager := cmanager.New(scheme) + manager := inmemoryruntime.NewManager(scheme) host := "127.0.0.1" - wcmux, err := server.NewWorkloadClustersMux(manager, host, server.CustomPorts{ + wcmux, err := inmemoryserver.NewWorkloadClustersMux(manager, host, inmemoryserver.CustomPorts{ // NOTE: make sure to use ports different than other tests, so we can run tests in parallel - MinPort: server.DefaultMinPort + 1000, - MaxPort: server.DefaultMinPort + 1099, - DebugPort: server.DefaultDebugPort + 10, + MinPort: inmemoryserver.DefaultMinPort + 1000, + MaxPort: inmemoryserver.DefaultMinPort + 1099, + DebugPort: inmemoryserver.DefaultDebugPort + 10, }) g.Expect(err).ToNot(HaveOccurred()) _, err = wcmux.InitWorkloadClusterListener(klog.KObj(cluster).String()) g.Expect(err).ToNot(HaveOccurred()) r := InMemoryMachineReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(createCASecret(t, cluster, secretutil.EtcdCA)).Build(), - CloudManager: manager, - APIServerMux: wcmux, + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(createCASecret(t, cluster, secretutil.EtcdCA)).Build(), + InMemoryManager: manager, + APIServerMux: wcmux, } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := r.reconcileNormalETCD(ctx, cluster, cpMachine, inMemoryMachineWithNodeProvisioned1) g.Expect(err).ToNot(HaveOccurred()) @@ -390,26 +390,26 @@ func TestReconcileNormalEtcd(t *testing.T) { inMemoryMachineWithNodeProvisioned2 := inMemoryMachineWithNodeProvisioned1.DeepCopy() inMemoryMachineWithNodeProvisioned2.Name = "bar2" - manager := cmanager.New(scheme) + manager := inmemoryruntime.NewManager(scheme) host := "127.0.0.1" - wcmux, err := server.NewWorkloadClustersMux(manager, host, server.CustomPorts{ + wcmux, err := inmemoryserver.NewWorkloadClustersMux(manager, host, inmemoryserver.CustomPorts{ // NOTE: make sure to use ports different than other tests, so we can run tests in parallel - MinPort: server.DefaultMinPort + 1200, - MaxPort: server.DefaultMinPort + 1299, - DebugPort: server.DefaultDebugPort + 20, + MinPort: inmemoryserver.DefaultMinPort + 1200, + MaxPort: inmemoryserver.DefaultMinPort + 1299, + DebugPort: inmemoryserver.DefaultDebugPort + 20, }) g.Expect(err).ToNot(HaveOccurred()) _, err = wcmux.InitWorkloadClusterListener(klog.KObj(cluster).String()) g.Expect(err).ToNot(HaveOccurred()) r := InMemoryMachineReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(createCASecret(t, cluster, secretutil.EtcdCA)).Build(), - CloudManager: manager, - APIServerMux: wcmux, + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(createCASecret(t, cluster, secretutil.EtcdCA)).Build(), + InMemoryManager: manager, + APIServerMux: wcmux, } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() // first etcd pod gets annotated with clusterID, memberID, and also set as a leader @@ -496,10 +496,10 @@ func TestReconcileNormalApiServer(t *testing.T) { g := NewWithT(t) r := InMemoryMachineReconciler{ - CloudManager: cmanager.New(scheme), + InMemoryManager: inmemoryruntime.NewManager(scheme), } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := r.reconcileNormalAPIServer(ctx, cluster, cpMachine, inMemoryMachineWithNodeNotYetProvisioned) g.Expect(err).ToNot(HaveOccurred()) @@ -518,26 +518,26 @@ func TestReconcileNormalApiServer(t *testing.T) { t.Run("create pod if Node is ready", func(t *testing.T) { g := NewWithT(t) - manager := cmanager.New(scheme) + manager := inmemoryruntime.NewManager(scheme) host := "127.0.0.1" - wcmux, err := server.NewWorkloadClustersMux(manager, host, server.CustomPorts{ + wcmux, err := inmemoryserver.NewWorkloadClustersMux(manager, host, inmemoryserver.CustomPorts{ // NOTE: make sure to use ports different than other tests, so we can run tests in parallel - MinPort: server.DefaultMinPort + 1100, - MaxPort: server.DefaultMinPort + 1199, - DebugPort: server.DefaultDebugPort + 11, + MinPort: inmemoryserver.DefaultMinPort + 1100, + MaxPort: inmemoryserver.DefaultMinPort + 1199, + DebugPort: inmemoryserver.DefaultDebugPort + 11, }) g.Expect(err).ToNot(HaveOccurred()) _, err = wcmux.InitWorkloadClusterListener(klog.KObj(cluster).String()) g.Expect(err).ToNot(HaveOccurred()) r := InMemoryMachineReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(createCASecret(t, cluster, secretutil.ClusterCA)).Build(), - CloudManager: manager, - APIServerMux: wcmux, + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(createCASecret(t, cluster, secretutil.ClusterCA)).Build(), + InMemoryManager: manager, + APIServerMux: wcmux, } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := r.reconcileNormalAPIServer(ctx, cluster, cpMachine, inMemoryMachineWithNodeProvisioned) g.Expect(err).ToNot(HaveOccurred()) @@ -624,10 +624,10 @@ func testReconcileNormalComponent(t *testing.T, component string, reconcileFunc g := NewWithT(t) r := InMemoryMachineReconciler{ - CloudManager: cmanager.New(scheme), + InMemoryManager: inmemoryruntime.NewManager(scheme), } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := reconcileFunc(r)(ctx, cluster, workerMachine, inMemoryMachineWithAPIServerProvisioned) g.Expect(err).ToNot(HaveOccurred()) @@ -647,10 +647,10 @@ func testReconcileNormalComponent(t *testing.T, component string, reconcileFunc g := NewWithT(t) r := InMemoryMachineReconciler{ - CloudManager: cmanager.New(scheme), + InMemoryManager: inmemoryruntime.NewManager(scheme), } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := reconcileFunc(r)(ctx, cluster, cpMachine, inMemoryMachineWithAPIServerNotYetProvisioned) g.Expect(err).ToNot(HaveOccurred()) @@ -670,10 +670,10 @@ func testReconcileNormalComponent(t *testing.T, component string, reconcileFunc g := NewWithT(t) r := InMemoryMachineReconciler{ - CloudManager: cmanager.New(scheme), + InMemoryManager: inmemoryruntime.NewManager(scheme), } - r.CloudManager.AddResourceGroup(klog.KObj(cluster).String()) - c := r.CloudManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() + r.InMemoryManager.AddResourceGroup(klog.KObj(cluster).String()) + c := r.InMemoryManager.GetResourceGroup(klog.KObj(cluster).String()).GetClient() res, err := reconcileFunc(r)(ctx, cluster, cpMachine, inMemoryMachineWithAPIServerProvisioned) g.Expect(err).ToNot(HaveOccurred()) diff --git a/test/infrastructure/inmemory/internal/webhooks/inmemorycluster_webhook.go b/test/infrastructure/inmemory/internal/webhooks/inmemorycluster_webhook.go index 1ba0e4c715ad..55096259d06c 100644 --- a/test/infrastructure/inmemory/internal/webhooks/inmemorycluster_webhook.go +++ b/test/infrastructure/inmemory/internal/webhooks/inmemorycluster_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" ) // InMemoryCluster implements a validating and defaulting webhook for InMemoryCluster. @@ -32,7 +32,7 @@ type InMemoryCluster struct{} func (webhook *InMemoryCluster) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(&v1alpha1.InMemoryCluster{}). + For(&infrav1.InMemoryCluster{}). WithDefaulter(webhook). WithValidator(webhook). Complete() diff --git a/test/infrastructure/inmemory/internal/webhooks/inmemoryclustertemplate_webhook.go b/test/infrastructure/inmemory/internal/webhooks/inmemoryclustertemplate_webhook.go index 14d7a6f23360..22c5e9a6555f 100644 --- a/test/infrastructure/inmemory/internal/webhooks/inmemoryclustertemplate_webhook.go +++ b/test/infrastructure/inmemory/internal/webhooks/inmemoryclustertemplate_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" ) // InMemoryClusterTemplate implements a validating and defaulting webhook for InMemoryClusterTemplate. @@ -32,7 +32,7 @@ type InMemoryClusterTemplate struct{} func (webhook *InMemoryClusterTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(&v1alpha1.InMemoryClusterTemplate{}). + For(&infrav1.InMemoryClusterTemplate{}). WithDefaulter(webhook). WithValidator(webhook). Complete() diff --git a/test/infrastructure/inmemory/internal/webhooks/inmemorymachine_webhook.go b/test/infrastructure/inmemory/internal/webhooks/inmemorymachine_webhook.go index 53ef4bcbee5a..386617cd574f 100644 --- a/test/infrastructure/inmemory/internal/webhooks/inmemorymachine_webhook.go +++ b/test/infrastructure/inmemory/internal/webhooks/inmemorymachine_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" ) // InMemoryMachine implements a validating and defaulting webhook for InMemoryMachine. @@ -32,7 +32,7 @@ type InMemoryMachine struct{} func (webhook *InMemoryMachine) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(&v1alpha1.InMemoryMachine{}). + For(&infrav1.InMemoryMachine{}). WithDefaulter(webhook). WithValidator(webhook). Complete() diff --git a/test/infrastructure/inmemory/internal/webhooks/inmemorymachinetemplate_webhook.go b/test/infrastructure/inmemory/internal/webhooks/inmemorymachinetemplate_webhook.go index dde90a151040..4439f94b9466 100644 --- a/test/infrastructure/inmemory/internal/webhooks/inmemorymachinetemplate_webhook.go +++ b/test/infrastructure/inmemory/internal/webhooks/inmemorymachinetemplate_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" ) // InMemoryMachineTemplate implements a validating and defaulting webhook for InMemoryMachineTemplate. @@ -32,7 +32,7 @@ type InMemoryMachineTemplate struct{} func (webhook *InMemoryMachineTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). - For(&v1alpha1.InMemoryMachineTemplate{}). + For(&infrav1.InMemoryMachineTemplate{}). WithDefaulter(webhook). WithValidator(webhook). Complete() diff --git a/test/infrastructure/inmemory/main.go b/test/infrastructure/inmemory/main.go index 2c434dc75a4b..4d190738cfea 100644 --- a/test/infrastructure/inmemory/main.go +++ b/test/infrastructure/inmemory/main.go @@ -48,16 +48,16 @@ import ( "sigs.k8s.io/cluster-api/feature" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/controllers" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud" cloudv1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/api/v1alpha1" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/webhooks" "sigs.k8s.io/cluster-api/util/flags" "sigs.k8s.io/cluster-api/version" ) var ( - cloudScheme = runtime.NewScheme() + inmemoryScheme = runtime.NewScheme() scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") controllerName = "cluster-api-inmemory-controller-manager" @@ -92,10 +92,10 @@ func init() { _ = infrav1.AddToScheme(scheme) // scheme used for operating on the cloud resource. - _ = cloudv1.AddToScheme(cloudScheme) - _ = corev1.AddToScheme(cloudScheme) - _ = appsv1.AddToScheme(cloudScheme) - _ = rbacv1.AddToScheme(cloudScheme) + _ = cloudv1.AddToScheme(inmemoryScheme) + _ = corev1.AddToScheme(inmemoryScheme) + _ = appsv1.AddToScheme(inmemoryScheme) + _ = rbacv1.AddToScheme(inmemoryScheme) } // InitFlags initializes the flags. @@ -272,16 +272,16 @@ func setupIndexes(_ context.Context, _ ctrl.Manager) { } func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { - // Start cloud manager - cloudMgr := cloud.NewManager(cloudScheme) - if err := cloudMgr.Start(ctx); err != nil { - setupLog.Error(err, "unable to start a cloud manager") + // Start in memory manager + inMemoryManager := inmemoryruntime.NewManager(inmemoryScheme) + if err := inMemoryManager.Start(ctx); err != nil { + setupLog.Error(err, "unable to start a in memory manager") os.Exit(1) } // Start an http server podIP := os.Getenv("POD_IP") - apiServerMux, err := server.NewWorkloadClustersMux(cloudMgr, podIP) + apiServerMux, err := inmemoryserver.NewWorkloadClustersMux(inMemoryManager, podIP) if err != nil { setupLog.Error(err, "unable to create workload clusters mux") os.Exit(1) @@ -290,7 +290,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { // Setup reconcilers if err := (&controllers.InMemoryClusterReconciler{ Client: mgr.GetClient(), - CloudManager: cloudMgr, + InMemoryManager: inMemoryManager, APIServerMux: apiServerMux, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, concurrency(clusterConcurrency)); err != nil { @@ -300,7 +300,7 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) { if err := (&controllers.InMemoryMachineReconciler{ Client: mgr.GetClient(), - CloudManager: cloudMgr, + InMemoryManager: inMemoryManager, APIServerMux: apiServerMux, WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr, concurrency(machineConcurrency)); err != nil { diff --git a/test/infrastructure/inmemory/internal/cloud/alias.go b/test/infrastructure/inmemory/pkg/runtime/alias.go similarity index 76% rename from test/infrastructure/inmemory/internal/cloud/alias.go rename to test/infrastructure/inmemory/pkg/runtime/alias.go index 51daceae1f77..f15ad8e70582 100644 --- a/test/infrastructure/inmemory/internal/cloud/alias.go +++ b/test/infrastructure/inmemory/pkg/runtime/alias.go @@ -14,26 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cloud +package runtime import ( "sigs.k8s.io/controller-runtime/pkg/client" - cclient "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/client" - cmanager "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/manager" + inmemoryclient "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime/client" + inmemorymanager "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime/manager" ) // Client knows how to perform CRUD operations on resources in a resource group. -type Client cclient.Client +type Client inmemoryclient.Client // Object represents an object. type Object client.Object // Manager initializes shared dependencies such as Caches and Clients, and provides them to Runnables. // A Manager is required to create Controllers. -type Manager cmanager.Manager +type Manager inmemorymanager.Manager var ( // NewManager returns a new Manager for creating Controllers. - NewManager = cmanager.New + NewManager = inmemorymanager.New ) diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/cache.go b/test/infrastructure/inmemory/pkg/runtime/cache/cache.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/cache.go rename to test/infrastructure/inmemory/pkg/runtime/cache/cache.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/cache_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/cache_test.go rename to test/infrastructure/inmemory/pkg/runtime/cache/cache_test.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/client.go b/test/infrastructure/inmemory/pkg/runtime/cache/client.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/client.go rename to test/infrastructure/inmemory/pkg/runtime/cache/client.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/client_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/client_test.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/client_test.go rename to test/infrastructure/inmemory/pkg/runtime/cache/client_test.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/doc.go b/test/infrastructure/inmemory/pkg/runtime/cache/doc.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/doc.go rename to test/infrastructure/inmemory/pkg/runtime/cache/doc.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/gc.go b/test/infrastructure/inmemory/pkg/runtime/cache/gc.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/gc.go rename to test/infrastructure/inmemory/pkg/runtime/cache/gc.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/gc_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/gc_test.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/gc_test.go rename to test/infrastructure/inmemory/pkg/runtime/cache/gc_test.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/hooks.go b/test/infrastructure/inmemory/pkg/runtime/cache/hooks.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/hooks.go rename to test/infrastructure/inmemory/pkg/runtime/cache/hooks.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/informer.go b/test/infrastructure/inmemory/pkg/runtime/cache/informer.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/informer.go rename to test/infrastructure/inmemory/pkg/runtime/cache/informer.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/sync.go b/test/infrastructure/inmemory/pkg/runtime/cache/sync.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/sync.go rename to test/infrastructure/inmemory/pkg/runtime/cache/sync.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/cache/sync_test.go b/test/infrastructure/inmemory/pkg/runtime/cache/sync_test.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/cache/sync_test.go rename to test/infrastructure/inmemory/pkg/runtime/cache/sync_test.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/client/client.go b/test/infrastructure/inmemory/pkg/runtime/client/client.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/client/client.go rename to test/infrastructure/inmemory/pkg/runtime/client/client.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/client/doc.go b/test/infrastructure/inmemory/pkg/runtime/client/doc.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/client/doc.go rename to test/infrastructure/inmemory/pkg/runtime/client/doc.go diff --git a/test/infrastructure/inmemory/internal/cloud/doc.go b/test/infrastructure/inmemory/pkg/runtime/doc.go similarity index 58% rename from test/infrastructure/inmemory/internal/cloud/doc.go rename to test/infrastructure/inmemory/pkg/runtime/doc.go index a88deec31244..5ae88e52033b 100644 --- a/test/infrastructure/inmemory/internal/cloud/doc.go +++ b/test/infrastructure/inmemory/pkg/runtime/doc.go @@ -15,18 +15,17 @@ limitations under the License. */ /* -Package cloud implements an in memory cloud provider. +Package runtime implements an in memory runtime for handling objects grouped in resource groups, +similarly to resource groups in Azure. -Cloud provider objects are grouped in resource groups, similarly to resource groups in Azure. +In memory objects are defined like Kubernetes objects and they can be operated with +a client inspired from the controller-runtime client; they also have some behaviour +of real Kubernetes objects, like e.g. a garbage collection and owner references, +as well as informers to support watches. -Cloud provider objects are defined like Kubernetes objects and they can be operated with -a client inspired from the controller-runtime client. - -We can't use controller-runtime directly for the following reasons: +NOTE: We can't use controller-runtime directly for the following reasons: * multi-cluster (we have resourceGroups to differentiate resources belonging to different clusters) * data should be stored in-memory -* we would like that objects in memory behave like Kubernetes objects (garbage collection) - -The Manager, is the object responsible for the lifecycle of objects. +* we would like that objects in memory behave like Kubernetes objects (garbage collection). */ -package cloud +package runtime diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/manager/doc.go b/test/infrastructure/inmemory/pkg/runtime/manager/doc.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/manager/doc.go rename to test/infrastructure/inmemory/pkg/runtime/manager/doc.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/manager/manager.go b/test/infrastructure/inmemory/pkg/runtime/manager/manager.go similarity index 78% rename from test/infrastructure/inmemory/internal/cloud/runtime/manager/manager.go rename to test/infrastructure/inmemory/pkg/runtime/manager/manager.go index 07dd04614dc5..777636b645d6 100644 --- a/test/infrastructure/inmemory/internal/cloud/runtime/manager/manager.go +++ b/test/infrastructure/inmemory/pkg/runtime/manager/manager.go @@ -23,8 +23,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" - ccache "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/cache" - cresourcegroup "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/resourcegroup" + inmemorycache "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime/cache" + inmemoryresoucegroup "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime/resourcegroup" ) // Manager initializes shared dependencies such as Caches and Clients. @@ -32,12 +32,12 @@ type Manager interface { // TODO: refactor in resoucegroup.add/delete/get; make delete fail if rs does not exist AddResourceGroup(name string) DeleteResourceGroup(name string) - GetResourceGroup(name string) cresourcegroup.ResourceGroup + GetResourceGroup(name string) inmemoryresoucegroup.ResourceGroup GetScheme() *runtime.Scheme // TODO: expose less (only get informers) - GetCache() ccache.Cache + GetCache() inmemorycache.Cache Start(ctx context.Context) error } @@ -47,7 +47,7 @@ var _ Manager = &manager{} type manager struct { scheme *runtime.Scheme - cache ccache.Cache + cache inmemorycache.Cache started bool } @@ -56,7 +56,7 @@ func New(scheme *runtime.Scheme) Manager { m := &manager{ scheme: scheme, } - m.cache = ccache.NewCache(scheme) + m.cache = inmemorycache.NewCache(scheme) return m } @@ -69,15 +69,15 @@ func (m *manager) DeleteResourceGroup(name string) { } // GetResourceGroup returns a resource group which reads from the cache. -func (m *manager) GetResourceGroup(name string) cresourcegroup.ResourceGroup { - return cresourcegroup.NewResourceGroup(name, m.cache) +func (m *manager) GetResourceGroup(name string) inmemoryresoucegroup.ResourceGroup { + return inmemoryresoucegroup.NewResourceGroup(name, m.cache) } func (m *manager) GetScheme() *runtime.Scheme { return m.scheme } -func (m *manager) GetCache() ccache.Cache { +func (m *manager) GetCache() inmemorycache.Cache { return m.cache } diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/resourcegroup/cached_resourcegroup.go b/test/infrastructure/inmemory/pkg/runtime/resourcegroup/cached_resourcegroup.go similarity index 81% rename from test/infrastructure/inmemory/internal/cloud/runtime/resourcegroup/cached_resourcegroup.go rename to test/infrastructure/inmemory/pkg/runtime/resourcegroup/cached_resourcegroup.go index 14590be3ded2..662ff701b82e 100644 --- a/test/infrastructure/inmemory/internal/cloud/runtime/resourcegroup/cached_resourcegroup.go +++ b/test/infrastructure/inmemory/pkg/runtime/resourcegroup/cached_resourcegroup.go @@ -21,37 +21,37 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" - ccache "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/cache" - cclient "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/client" + inmemorycache "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime/cache" + inmemoryclient "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime/client" ) var _ ResourceGroup = &cachedResourceGroup{} type cachedResourceGroup struct { name string - cache ccache.Cache + cache inmemorycache.Cache } // NewResourceGroup returns a new resource group. -func NewResourceGroup(name string, cache ccache.Cache) ResourceGroup { +func NewResourceGroup(name string, cache inmemorycache.Cache) ResourceGroup { return &cachedResourceGroup{ name: name, cache: cache, } } -func (cc *cachedResourceGroup) GetClient() cclient.Client { +func (cc *cachedResourceGroup) GetClient() inmemoryclient.Client { return &cachedClient{ resourceGroup: cc.name, cache: cc.cache, } } -var _ cclient.Client = &cachedClient{} +var _ inmemoryclient.Client = &cachedClient{} type cachedClient struct { resourceGroup string - cache ccache.Cache + cache inmemorycache.Cache } func (c *cachedClient) Get(_ context.Context, key client.ObjectKey, obj client.Object) error { diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/resourcegroup/doc.go b/test/infrastructure/inmemory/pkg/runtime/resourcegroup/doc.go similarity index 100% rename from test/infrastructure/inmemory/internal/cloud/runtime/resourcegroup/doc.go rename to test/infrastructure/inmemory/pkg/runtime/resourcegroup/doc.go diff --git a/test/infrastructure/inmemory/internal/cloud/runtime/resourcegroup/resourcegroup.go b/test/infrastructure/inmemory/pkg/runtime/resourcegroup/resourcegroup.go similarity index 84% rename from test/infrastructure/inmemory/internal/cloud/runtime/resourcegroup/resourcegroup.go rename to test/infrastructure/inmemory/pkg/runtime/resourcegroup/resourcegroup.go index e8cfaf973841..8e1449574078 100644 --- a/test/infrastructure/inmemory/internal/cloud/runtime/resourcegroup/resourcegroup.go +++ b/test/infrastructure/inmemory/pkg/runtime/resourcegroup/resourcegroup.go @@ -17,10 +17,10 @@ limitations under the License. package resourcegroup import ( - cclient "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/client" + inmemoryclient "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime/client" ) // ResourceGroup groups resources for a workload cluster. type ResourceGroup interface { - GetClient() cclient.Client + GetClient() inmemoryclient.Client } diff --git a/test/infrastructure/inmemory/internal/server/api/const.go b/test/infrastructure/inmemory/pkg/server/api/const.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/api/const.go rename to test/infrastructure/inmemory/pkg/server/api/const.go diff --git a/test/infrastructure/inmemory/internal/server/api/debug.go b/test/infrastructure/inmemory/pkg/server/api/debug.go similarity index 88% rename from test/infrastructure/inmemory/internal/server/api/debug.go rename to test/infrastructure/inmemory/pkg/server/api/debug.go index 2481047e9a47..5ceb8684b403 100644 --- a/test/infrastructure/inmemory/internal/server/api/debug.go +++ b/test/infrastructure/inmemory/pkg/server/api/debug.go @@ -23,7 +23,7 @@ import ( "github.com/go-logr/logr" "k8s.io/apimachinery/pkg/runtime" - cmanager "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/manager" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" ) // DebugInfoProvider defines the methods the server must implement @@ -33,7 +33,7 @@ type DebugInfoProvider interface { } // NewDebugHandler returns an http.Handler for debugging the server. -func NewDebugHandler(manager cmanager.Manager, log logr.Logger, infoProvider DebugInfoProvider) http.Handler { +func NewDebugHandler(manager inmemoryruntime.Manager, log logr.Logger, infoProvider DebugInfoProvider) http.Handler { debugServer := &debugHandler{ container: restful.NewContainer(), manager: manager, @@ -54,7 +54,7 @@ func NewDebugHandler(manager cmanager.Manager, log logr.Logger, infoProvider Deb type debugHandler struct { container *restful.Container - manager cmanager.Manager + manager inmemoryruntime.Manager log logr.Logger infoProvider DebugInfoProvider } diff --git a/test/infrastructure/inmemory/internal/server/api/doc.go b/test/infrastructure/inmemory/pkg/server/api/doc.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/api/doc.go rename to test/infrastructure/inmemory/pkg/server/api/doc.go diff --git a/test/infrastructure/inmemory/internal/server/api/handler.go b/test/infrastructure/inmemory/pkg/server/api/handler.go similarity index 94% rename from test/infrastructure/inmemory/internal/server/api/handler.go rename to test/infrastructure/inmemory/pkg/server/api/handler.go index 63a07e770472..418d57652247 100644 --- a/test/infrastructure/inmemory/internal/server/api/handler.go +++ b/test/infrastructure/inmemory/pkg/server/api/handler.go @@ -48,8 +48,8 @@ import ( "k8s.io/client-go/tools/portforward" "sigs.k8s.io/controller-runtime/pkg/client" - cmanager "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/manager" - gportforward "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server/api/portforward" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryportforward "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server/api/portforward" ) // ResourceGroupResolver defines a func that can identify which workloadCluster/resourceGroup a @@ -57,7 +57,7 @@ import ( type ResourceGroupResolver func(host string) (string, error) // NewAPIServerHandler returns an http.Handler for a fake API server. -func NewAPIServerHandler(manager cmanager.Manager, log logr.Logger, resolver ResourceGroupResolver) http.Handler { +func NewAPIServerHandler(manager inmemoryruntime.Manager, log logr.Logger, resolver ResourceGroupResolver) http.Handler { apiServer := &apiServerHandler{ container: restful.NewContainer(), manager: manager, @@ -128,7 +128,7 @@ func NewAPIServerHandler(manager cmanager.Manager, log logr.Logger, resolver Res type apiServerHandler struct { container *restful.Container - manager cmanager.Manager + manager inmemoryruntime.Manager log logr.Logger resourceGroupResolver ResourceGroupResolver requestInfoResolver *request.RequestInfoFactory @@ -243,7 +243,7 @@ func (h *apiServerHandler) apiV1Create(req *restful.Request, resp *restful.Respo } // Gets at client to the resource group. - cloudClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() // Maps the requested resource to a gvk. gvk, err := requestToGVK(req) @@ -273,7 +273,7 @@ func (h *apiServerHandler) apiV1Create(req *restful.Request, resp *restful.Respo obj := newObj.(client.Object) // TODO: consider check vs enforce for namespace on the object - namespace on the request path obj.SetNamespace(req.PathParameter("namespace")) - if err := cloudClient.Create(ctx, obj); err != nil { + if err := inmemoryClient.Create(ctx, obj); err != nil { if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) return @@ -298,7 +298,7 @@ func (h *apiServerHandler) apiV1List(req *restful.Request, resp *restful.Respons } // Gets at client to the resource group. - cloudClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() // Maps the requested resource to a gvk. gvk, err := requestToGVK(req) @@ -327,7 +327,7 @@ func (h *apiServerHandler) apiV1List(req *restful.Request, resp *restful.Respons listOpts = append(listOpts, client.MatchingFieldsSelector{Selector: selector}) } - if err := cloudClient.List(ctx, list, listOpts...); err != nil { + if err := inmemoryClient.List(ctx, list, listOpts...); err != nil { if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) return @@ -375,7 +375,7 @@ func (h *apiServerHandler) apiV1Get(req *restful.Request, resp *restful.Response } // Gets at client to the resource group. - cloudClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() // Maps the requested resource to a gvk. gvk, err := requestToGVK(req) @@ -391,7 +391,7 @@ func (h *apiServerHandler) apiV1Get(req *restful.Request, resp *restful.Response obj.SetName(req.PathParameter("name")) obj.SetNamespace(req.PathParameter("namespace")) - if err := cloudClient.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) return @@ -416,7 +416,7 @@ func (h *apiServerHandler) apiV1Update(req *restful.Request, resp *restful.Respo } // Gets at client to the resource group. - cloudClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() // Maps the requested resource to a gvk. gvk, err := requestToGVK(req) @@ -446,7 +446,7 @@ func (h *apiServerHandler) apiV1Update(req *restful.Request, resp *restful.Respo obj := newObj.(client.Object) // TODO: consider check vs enforce for namespace on the object - namespace on the request path obj.SetNamespace(req.PathParameter("namespace")) - if err := cloudClient.Update(ctx, obj); err != nil { + if err := inmemoryClient.Update(ctx, obj); err != nil { if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) return @@ -471,7 +471,7 @@ func (h *apiServerHandler) apiV1Patch(req *restful.Request, resp *restful.Respon } // Gets at client to the resource group. - cloudClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() // Maps the requested resource to a gvk. gvk, err := requestToGVK(req) @@ -495,11 +495,11 @@ func (h *apiServerHandler) apiV1Patch(req *restful.Request, resp *restful.Respon obj.SetName(req.PathParameter("name")) obj.SetNamespace(req.PathParameter("namespace")) - if err := cloudClient.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + if err := inmemoryClient.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { _ = resp.WriteErrorString(http.StatusInternalServerError, err.Error()) return } - if err := cloudClient.Patch(ctx, obj, patch); err != nil { + if err := inmemoryClient.Patch(ctx, obj, patch); err != nil { if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) return @@ -524,7 +524,7 @@ func (h *apiServerHandler) apiV1Delete(req *restful.Request, resp *restful.Respo } // Gets at client to the resource group. - cloudClient := h.manager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := h.manager.GetResourceGroup(resourceGroup).GetClient() // Maps the requested resource to a gvk. gvk, err := requestToGVK(req) @@ -540,7 +540,7 @@ func (h *apiServerHandler) apiV1Delete(req *restful.Request, resp *restful.Respo obj.SetName(req.PathParameter("name")) obj.SetNamespace(req.PathParameter("namespace")) - if err := cloudClient.Delete(ctx, obj); err != nil { + if err := inmemoryClient.Delete(ctx, obj); err != nil { if status, ok := err.(apierrors.APIStatus); ok || errors.As(err, &status) { _ = resp.WriteHeaderAndEntity(int(status.Status().Code), status) return @@ -579,7 +579,7 @@ func (h *apiServerHandler) apiV1PortForward(req *restful.Request, resp *restful. // Upgrade the connection specifying what to do when a new http stream is received. // After being received, the new stream will be published into the stream channel for handling. upgrader := spdy.NewResponseUpgrader() - conn := upgrader.UpgradeResponse(respWriter, request, gportforward.HTTPStreamReceived(streamChan)) + conn := upgrader.UpgradeResponse(respWriter, request, inmemoryportforward.HTTPStreamReceived(streamChan)) if conn == nil { _ = resp.WriteErrorString(http.StatusInternalServerError, "failed to get upgraded connection") return @@ -592,7 +592,7 @@ func (h *apiServerHandler) apiV1PortForward(req *restful.Request, resp *restful. // Start the process handling streams that are published in the stream channel, please note that: // - The connection with the target will be established only when the first operation will be executed // - Following operations will re-use the same connection. - streamHandler := gportforward.NewHTTPStreamHandler( + streamHandler := inmemoryportforward.NewHTTPStreamHandler( conn, streamChan, podName, @@ -623,7 +623,7 @@ func (h *apiServerHandler) doPortForward(ctx context.Context, address string, st // Create a tunnel for bidirectional copy of data between the stream // originated from the initiator of the port forward operation and the target. - return gportforward.HTTPStreamTunnel(ctx, stream, dial) + return inmemoryportforward.HTTPStreamTunnel(ctx, stream, dial) } func (h *apiServerHandler) healthz(_ *restful.Request, resp *restful.Response) { diff --git a/test/infrastructure/inmemory/internal/server/api/metrics.go b/test/infrastructure/inmemory/pkg/server/api/metrics.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/api/metrics.go rename to test/infrastructure/inmemory/pkg/server/api/metrics.go diff --git a/test/infrastructure/inmemory/internal/server/api/portforward/doc.go b/test/infrastructure/inmemory/pkg/server/api/portforward/doc.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/api/portforward/doc.go rename to test/infrastructure/inmemory/pkg/server/api/portforward/doc.go diff --git a/test/infrastructure/inmemory/internal/server/api/portforward/httpstreams.go b/test/infrastructure/inmemory/pkg/server/api/portforward/httpstreams.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/api/portforward/httpstreams.go rename to test/infrastructure/inmemory/pkg/server/api/portforward/httpstreams.go diff --git a/test/infrastructure/inmemory/internal/server/api/watch.go b/test/infrastructure/inmemory/pkg/server/api/watch.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/api/watch.go rename to test/infrastructure/inmemory/pkg/server/api/watch.go diff --git a/test/infrastructure/inmemory/internal/server/certs.go b/test/infrastructure/inmemory/pkg/server/certs.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/certs.go rename to test/infrastructure/inmemory/pkg/server/certs.go diff --git a/test/infrastructure/inmemory/internal/server/doc.go b/test/infrastructure/inmemory/pkg/server/doc.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/doc.go rename to test/infrastructure/inmemory/pkg/server/doc.go diff --git a/test/infrastructure/inmemory/internal/server/etcd/doc.go b/test/infrastructure/inmemory/pkg/server/etcd/doc.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/etcd/doc.go rename to test/infrastructure/inmemory/pkg/server/etcd/doc.go diff --git a/test/infrastructure/inmemory/internal/server/etcd/handler.go b/test/infrastructure/inmemory/pkg/server/etcd/handler.go similarity index 89% rename from test/infrastructure/inmemory/internal/server/etcd/handler.go rename to test/infrastructure/inmemory/pkg/server/etcd/handler.go index 9661f7520310..3ed31f8d9c46 100644 --- a/test/infrastructure/inmemory/internal/server/etcd/handler.go +++ b/test/infrastructure/inmemory/pkg/server/etcd/handler.go @@ -34,8 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" cloudv1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/api/v1alpha1" - cclient "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/client" - cmanager "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/manager" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" ) // ResourceGroupResolver defines a func that can identify which workloadCluster/resourceGroup a @@ -43,7 +42,7 @@ import ( type ResourceGroupResolver func(host string) (string, error) // NewEtcdServerHandler returns an http.Handler for fake etcd members. -func NewEtcdServerHandler(manager cmanager.Manager, log logr.Logger, resolver ResourceGroupResolver) http.Handler { +func NewEtcdServerHandler(manager inmemoryruntime.Manager, log logr.Logger, resolver ResourceGroupResolver) http.Handler { svr := grpc.NewServer() baseSvr := &baseServer{ @@ -102,10 +101,10 @@ func (m *maintenanceServer) Status(ctx context.Context, _ *pb.StatusRequest) (*p if err != nil { return nil, err } - cloudClient := m.manager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := m.manager.GetResourceGroup(resourceGroup).GetClient() m.log.V(4).Info("Etcd: Status", "resourceGroup", resourceGroup, "etcdMember", etcdMember) - _, statusResponse, err := m.inspectEtcd(ctx, cloudClient, etcdMember) + _, statusResponse, err := m.inspectEtcd(ctx, inmemoryClient, etcdMember) if err != nil { return nil, err } @@ -143,8 +142,8 @@ func (m *maintenanceServer) MoveLeader(ctx context.Context, req *pb.MoveLeaderRe return nil, err } etcdPods := &corev1.PodList{} - cloudClient := m.manager.GetResourceGroup(resourceGroup).GetClient() - if err := cloudClient.List(ctx, etcdPods, + inmemoryClient := m.manager.GetResourceGroup(resourceGroup).GetClient() + if err := inmemoryClient.List(ctx, etcdPods, client.InNamespace(metav1.NamespaceSystem), client.MatchingLabels{ "component": "etcd", @@ -167,7 +166,7 @@ func (m *maintenanceServer) MoveLeader(ctx context.Context, req *pb.MoveLeaderRe annotations := updatedPod.GetAnnotations() annotations[cloudv1.EtcdLeaderFromAnnotationName] = time.Now().Format(time.RFC3339) updatedPod.SetAnnotations(annotations) - err := cloudClient.Patch(ctx, updatedPod, client.MergeFrom(pod)) + err := inmemoryClient.Patch(ctx, updatedPod, client.MergeFrom(pod)) if err != nil { return nil, err } @@ -206,11 +205,11 @@ func (c *clusterServerServer) MemberRemove(ctx context.Context, req *pb.MemberRe if err != nil { return nil, err } - cloudClient := c.manager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := c.manager.GetResourceGroup(resourceGroup).GetClient() etcdPods := &corev1.PodList{} - if err := cloudClient.List(ctx, etcdPods, + if err := inmemoryClient.List(ctx, etcdPods, client.InNamespace(metav1.NamespaceSystem), client.MatchingLabels{ "component": "etcd", @@ -227,7 +226,7 @@ func (c *clusterServerServer) MemberRemove(ctx context.Context, req *pb.MemberRe } updatedPod := pod.DeepCopy() updatedPod.Annotations[cloudv1.EtcdMemberRemoved] = "" - if err := cloudClient.Patch(ctx, updatedPod, client.MergeFrom(&pod)); err != nil { + if err := inmemoryClient.Patch(ctx, updatedPod, client.MergeFrom(&pod)); err != nil { return nil, err } return out, nil @@ -252,10 +251,10 @@ func (c *clusterServerServer) MemberList(ctx context.Context, _ *pb.MemberListRe if err != nil { return nil, err } - cloudClient := c.manager.GetResourceGroup(resourceGroup).GetClient() + inmemoryClient := c.manager.GetResourceGroup(resourceGroup).GetClient() c.log.V(4).Info("Etcd: MemberList", "resourceGroup", resourceGroup, "etcdMember", etcdMember) - memberList, _, err := c.inspectEtcd(ctx, cloudClient, etcdMember) + memberList, _, err := c.inspectEtcd(ctx, inmemoryClient, etcdMember) if err != nil { return nil, err } @@ -268,7 +267,7 @@ func (c *clusterServerServer) MemberPromote(_ context.Context, _ *pb.MemberPromo } type baseServer struct { - manager cmanager.Manager + manager inmemoryruntime.Manager log logr.Logger resourceGroupResolver ResourceGroupResolver } @@ -289,9 +288,9 @@ func (b *baseServer) getResourceGroupAndMember(ctx context.Context) (resourceGro return } -func (b *baseServer) inspectEtcd(ctx context.Context, cloudClient cclient.Client, etcdMember string) (*pb.MemberListResponse, *pb.StatusResponse, error) { +func (b *baseServer) inspectEtcd(ctx context.Context, inmemoryClient inmemoryruntime.Client, etcdMember string) (*pb.MemberListResponse, *pb.StatusResponse, error) { etcdPods := &corev1.PodList{} - if err := cloudClient.List(ctx, etcdPods, + if err := inmemoryClient.List(ctx, etcdPods, client.InNamespace(metav1.NamespaceSystem), client.MatchingLabels{ "component": "etcd", diff --git a/test/infrastructure/inmemory/internal/server/etcd/handler_test.go b/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go similarity index 89% rename from test/infrastructure/inmemory/internal/server/etcd/handler_test.go rename to test/infrastructure/inmemory/pkg/server/etcd/handler_test.go index 15336e52bf7d..113dc1f1bd62 100644 --- a/test/infrastructure/inmemory/internal/server/etcd/handler_test.go +++ b/test/infrastructure/inmemory/pkg/server/etcd/handler_test.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" cloudv1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/api/v1alpha1" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/manager" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" ) func Test_etcd_scalingflow(t *testing.T) { @@ -42,7 +42,7 @@ func Test_etcd_scalingflow(t *testing.T) { // During a scale down event - for example during upgrade - KCP will call `MoveLeader` and `MemberRemove` in sequence. g := NewWithT(t) ctx := metadata.NewIncomingContext(context.Background(), metadata.New(map[string]string{":authority": "etcd-1"})) - manager := manager.New(scheme) + manager := inmemoryruntime.NewManager(scheme) resourceGroupResolver := func(host string) (string, error) { return "group1", nil } c := &clusterServerServer{ baseServer: &baseServer{ @@ -60,7 +60,7 @@ func Test_etcd_scalingflow(t *testing.T) { }, } c.manager.AddResourceGroup("group1") - cloudClient := c.manager.GetResourceGroup("group1").GetClient() + inmemoryClient := c.manager.GetResourceGroup("group1").GetClient() for i := 1; i <= 3; i++ { etcdMember := fmt.Sprintf("etcd-%d", i) @@ -94,7 +94,7 @@ func Test_etcd_scalingflow(t *testing.T) { if i == 1 { etcdPod.Annotations[cloudv1.EtcdLeaderFromAnnotationName] = time.Date(2020, 07, 03, 14, 25, 58, 651387237, time.UTC).Format(time.RFC3339) } - g.Expect(cloudClient.Create(ctx, etcdPod)).To(Succeed()) + g.Expect(inmemoryClient.Create(ctx, etcdPod)).To(Succeed()) } var etcdMemberToRemove uint64 = 2 var etcdMemberToBeLeader uint64 = 3 @@ -107,11 +107,11 @@ func Test_etcd_scalingflow(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) // Expect the inspect call to fail on a member which has been removed. - _, _, err = c.inspectEtcd(ctx, cloudClient, fmt.Sprintf("%d", etcdMemberToRemove)) + _, _, err = c.inspectEtcd(ctx, inmemoryClient, fmt.Sprintf("%d", etcdMemberToRemove)) g.Expect(err).To(HaveOccurred()) // inspectEtcd should succeed when calling on a member that has not been removed. - members, status, err := c.inspectEtcd(ctx, cloudClient, fmt.Sprintf("%d", etcdMemberToBeLeader)) + members, status, err := c.inspectEtcd(ctx, inmemoryClient, fmt.Sprintf("%d", etcdMemberToBeLeader)) g.Expect(err).ToNot(HaveOccurred()) g.Expect(status.Leader).To(Equal(etcdMemberToBeLeader)) diff --git a/test/infrastructure/inmemory/internal/server/etcd/metrics.go b/test/infrastructure/inmemory/pkg/server/etcd/metrics.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/etcd/metrics.go rename to test/infrastructure/inmemory/pkg/server/etcd/metrics.go diff --git a/test/infrastructure/inmemory/internal/server/listener.go b/test/infrastructure/inmemory/pkg/server/listener.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/listener.go rename to test/infrastructure/inmemory/pkg/server/listener.go diff --git a/test/infrastructure/inmemory/internal/server/mux.go b/test/infrastructure/inmemory/pkg/server/mux.go similarity index 96% rename from test/infrastructure/inmemory/internal/server/mux.go rename to test/infrastructure/inmemory/pkg/server/mux.go index 361872d82419..173942738bb3 100644 --- a/test/infrastructure/inmemory/internal/server/mux.go +++ b/test/infrastructure/inmemory/pkg/server/mux.go @@ -39,9 +39,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/api/v1alpha1" - cmanager "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/manager" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server/api" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server/etcd" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryapi "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server/api" + inmemoryetcd "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server/etcd" "sigs.k8s.io/cluster-api/util/certs" ) @@ -103,7 +103,7 @@ type WorkloadClustersMux struct { maxPort int portIndex int - manager cmanager.Manager // TODO: figure out if we can have a smaller interface (GetResourceGroup, GetSchema) + manager inmemoryruntime.Manager // TODO: figure out if we can have a smaller interface (GetResourceGroup, GetSchema) debugServer http.Server muxServer http.Server @@ -116,7 +116,7 @@ type WorkloadClustersMux struct { } // NewWorkloadClustersMux returns a WorkloadClustersMux that handles requests for multiple workload clusters. -func NewWorkloadClustersMux(manager cmanager.Manager, host string, opts ...WorkloadClustersMuxOption) (*WorkloadClustersMux, error) { +func NewWorkloadClustersMux(manager inmemoryruntime.Manager, host string, opts ...WorkloadClustersMuxOption) (*WorkloadClustersMux, error) { options := WorkloadClustersMuxOptions{ MinPort: DefaultMinPort, MaxPort: DefaultMaxPort, @@ -151,7 +151,7 @@ func NewWorkloadClustersMux(manager cmanager.Manager, host string, opts ...Workl //nolint:gosec // Ignoring the following for now: "G112: Potential Slowloris Attack because ReadHeaderTimeout is not configured in the http.Server (gosec)" m.debugServer = http.Server{ - Handler: api.NewDebugHandler(manager, m.log, m), + Handler: inmemoryapi.NewDebugHandler(manager, m.log, m), } l, err := net.Listen("tcp", net.JoinHostPort(host, fmt.Sprintf("%d", options.DebugPort))) if err != nil { @@ -179,8 +179,8 @@ func (m *WorkloadClustersMux) mixedHandler() http.Handler { } // build the handlers for API server and etcd. - apiHandler := api.NewAPIServerHandler(m.manager, m.log, resourceGroupResolver) - etcdHandler := etcd.NewEtcdServerHandler(m.manager, m.log, resourceGroupResolver) + apiHandler := inmemoryapi.NewAPIServerHandler(m.manager, m.log, resourceGroupResolver) + etcdHandler := inmemoryetcd.NewEtcdServerHandler(m.manager, m.log, resourceGroupResolver) // Creates the mixed handler combining the two above depending on // the type of request being processed diff --git a/test/infrastructure/inmemory/internal/server/mux_test.go b/test/infrastructure/inmemory/pkg/server/mux_test.go similarity index 97% rename from test/infrastructure/inmemory/internal/server/mux_test.go rename to test/infrastructure/inmemory/pkg/server/mux_test.go index 875827dd9553..7aba29c2b524 100644 --- a/test/infrastructure/inmemory/internal/server/mux_test.go +++ b/test/infrastructure/inmemory/pkg/server/mux_test.go @@ -42,8 +42,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" cloudv1 "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/api/v1alpha1" - cmanager "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/cloud/runtime/manager" - "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/internal/server/proxy" + inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" + inmemoryproxy "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server/proxy" "sigs.k8s.io/cluster-api/util/certs" ) @@ -64,7 +64,7 @@ func TestMux(t *testing.T) { t.Parallel() g := NewWithT(t) - manager := cmanager.New(scheme) + manager := inmemoryruntime.NewManager(scheme) wcl := "workload-cluster" host := "127.0.0.1" @@ -251,7 +251,7 @@ func TestAPI_rbacv1_CRUD(t *testing.T) { func TestAPI_PortForward(t *testing.T) { t.Parallel() g := NewWithT(t) - manager := cmanager.New(scheme) + manager := inmemoryruntime.NewManager(scheme) // TODO: deduplicate this setup code with the test above host := "127.0.0.1" @@ -312,14 +312,14 @@ func TestAPI_PortForward(t *testing.T) { restConfig, err := listener.RESTConfig() g.Expect(err).ToNot(HaveOccurred()) - p1 := proxy.Proxy{ + p1 := inmemoryproxy.Proxy{ Kind: "pods", Namespace: metav1.NamespaceSystem, KubeConfig: restConfig, Port: 1234, } - dialer1, err := proxy.NewDialer(p1) + dialer1, err := inmemoryproxy.NewDialer(p1) g.Expect(err).ToNot(HaveOccurred()) rawConn, err := dialer1.DialContextWithAddr(ctx, "kube-apiserver-foo") @@ -343,14 +343,14 @@ func TestAPI_PortForward(t *testing.T) { clientCert, err := tls.X509KeyPair(certs.EncodeCertPEM(cert), certs.EncodePrivateKeyPEM(key)) g.Expect(err).ToNot(HaveOccurred()) - p2 := proxy.Proxy{ + p2 := inmemoryproxy.Proxy{ Kind: "pods", Namespace: metav1.NamespaceSystem, KubeConfig: restConfig, Port: 2379, } - dialer2, err := proxy.NewDialer(p2) + dialer2, err := inmemoryproxy.NewDialer(p2) g.Expect(err).ToNot(HaveOccurred()) etcdClient1, err := clientv3.New(clientv3.Config{ @@ -480,7 +480,7 @@ func TestAPI_corev1_Watch(t *testing.T) { } func setupWorkloadClusterListener(g Gomega, ports CustomPorts) (*WorkloadClustersMux, client.WithWatch) { - manager := cmanager.New(scheme) + manager := inmemoryruntime.NewManager(scheme) host := "127.0.0.1" wcmux, err := NewWorkloadClustersMux(manager, host, ports) diff --git a/test/infrastructure/inmemory/internal/server/proxy/addr.go b/test/infrastructure/inmemory/pkg/server/proxy/addr.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/proxy/addr.go rename to test/infrastructure/inmemory/pkg/server/proxy/addr.go diff --git a/test/infrastructure/inmemory/internal/server/proxy/conn.go b/test/infrastructure/inmemory/pkg/server/proxy/conn.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/proxy/conn.go rename to test/infrastructure/inmemory/pkg/server/proxy/conn.go diff --git a/test/infrastructure/inmemory/internal/server/proxy/dial.go b/test/infrastructure/inmemory/pkg/server/proxy/dial.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/proxy/dial.go rename to test/infrastructure/inmemory/pkg/server/proxy/dial.go diff --git a/test/infrastructure/inmemory/internal/server/proxy/doc.go b/test/infrastructure/inmemory/pkg/server/proxy/doc.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/proxy/doc.go rename to test/infrastructure/inmemory/pkg/server/proxy/doc.go diff --git a/test/infrastructure/inmemory/internal/server/proxy/proxy.go b/test/infrastructure/inmemory/pkg/server/proxy/proxy.go similarity index 100% rename from test/infrastructure/inmemory/internal/server/proxy/proxy.go rename to test/infrastructure/inmemory/pkg/server/proxy/proxy.go