diff --git a/pkg/cloudprovider/aws/aws.go b/pkg/cloudprovider/aws/aws.go index 182e724..3d48672 100644 --- a/pkg/cloudprovider/aws/aws.go +++ b/pkg/cloudprovider/aws/aws.go @@ -68,8 +68,8 @@ func verifyIfErrorOccurredWithDefaults(apiErr error, expectedMessage string) (bo } type provider struct { - autoScalingService *autoscaling.AutoScaling - ec2Service *ec2.EC2 + autoScalingService autoscalingiface.AutoScalingAPI + ec2Service ec2iface.EC2API logger logr.Logger } diff --git a/pkg/cloudprovider/aws/builder.go b/pkg/cloudprovider/aws/builder.go index 63c0dec..06a52af 100644 --- a/pkg/cloudprovider/aws/builder.go +++ b/pkg/cloudprovider/aws/builder.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/atlassian-labs/cyclops/pkg/cloudprovider" + fakeaws "github.com/atlassian-labs/cyclops/pkg/cloudprovider/aws/fake" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" @@ -40,3 +41,11 @@ func NewCloudProvider(logger logr.Logger) (cloudprovider.CloudProvider, error) { return p, nil } + +// NewGenericCloudProvider returns a new mock AWS cloud provider +func NewGenericCloudProvider(autoscalingiface *fakeaws.Autoscaling, ec2iface *fakeaws.Ec2) cloudprovider.CloudProvider { + return &provider{ + autoScalingService: autoscalingiface, + ec2Service: ec2iface, + } +} diff --git a/pkg/cloudprovider/aws/fake/fake.go b/pkg/cloudprovider/aws/fake/fake.go new file mode 100644 index 0000000..06aa72c --- /dev/null +++ b/pkg/cloudprovider/aws/fake/fake.go @@ -0,0 +1,142 @@ +package fakeaws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/ec2" + + "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" +) + +var ( + defaultAvailabilityZone = "us-east-1a" +) + +type Instance struct { + InstanceID string + AutoscalingGroupName string + State string +} + +type Ec2 struct { + ec2iface.EC2API + + Instances map[string]*Instance +} + +type Autoscaling struct { + autoscalingiface.AutoScalingAPI + + Instances map[string]*Instance +} + +func GenerateProviderID(instanceID string) string { + return fmt.Sprintf("aws:///%s/%s", + defaultAvailabilityZone, + instanceID, + ) +} + +func generateEc2Instance(instance *Instance) *ec2.Instance { + ec2Instance := &ec2.Instance{ + InstanceId: aws.String(instance.InstanceID), + State: &ec2.InstanceState{ + Name: aws.String(instance.State), + }, + } + + return ec2Instance +} + +func generateAutoscalingInstance(instance *Instance) *autoscaling.Instance { + autoscalingInstance := &autoscaling.Instance{ + InstanceId: aws.String(instance.InstanceID), + AvailabilityZone: aws.String(defaultAvailabilityZone), + } + + return autoscalingInstance +} + +// *************** Autoscaling *************** // + +func (m *Autoscaling) DescribeAutoScalingGroups(input *autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) { + var asgs = make(map[string]*autoscaling.Group, 0) + + var asgNameLookup = make(map[string]interface{}) + + for _, asgName := range input.AutoScalingGroupNames { + asgNameLookup[*asgName] = nil + } + + for _, instance := range m.Instances { + if instance.AutoscalingGroupName == "" { + continue + } + + if _, exists := asgNameLookup[instance.AutoscalingGroupName]; !exists { + continue + } + + asg, exists := asgs[instance.AutoscalingGroupName] + + if !exists { + asg = &autoscaling.Group{ + AutoScalingGroupName: aws.String(instance.AutoscalingGroupName), + Instances: []*autoscaling.Instance{}, + AvailabilityZones: []*string{ + aws.String(defaultAvailabilityZone), + }, + } + + asgs[instance.AutoscalingGroupName] = asg + } + + asg.Instances = append( + asg.Instances, + generateAutoscalingInstance(instance), + ) + } + + var asgList = make([]*autoscaling.Group, 0) + + for _, asg := range asgs { + asgList = append(asgList, asg) + } + + return &autoscaling.DescribeAutoScalingGroupsOutput{ + AutoScalingGroups: asgList, + }, nil +} + +// *************** EC2 *************** // + +func (m *Ec2) DescribeInstances(input *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) { + var instances = make([]*ec2.Instance, 0) + var instanceIds = make(map[string]interface{}) + + for _, instanceId := range input.InstanceIds { + instanceIds[*instanceId] = nil + } + + for _, instance := range m.Instances { + if _, ok := instanceIds[instance.InstanceID]; input.InstanceIds != nil && !ok { + continue + } + + instances = append( + instances, + generateEc2Instance(instance), + ) + } + + return &ec2.DescribeInstancesOutput{ + Reservations: []*ec2.Reservation{ + { + Instances: instances, + }, + }, + }, nil +} diff --git a/pkg/controller/cyclenoderequest/transitioner/test_helpers.go b/pkg/controller/cyclenoderequest/transitioner/test_helpers.go new file mode 100644 index 0000000..660ae16 --- /dev/null +++ b/pkg/controller/cyclenoderequest/transitioner/test_helpers.go @@ -0,0 +1,61 @@ +package transitioner + +import ( + "net/http" + + v1 "github.com/atlassian-labs/cyclops/pkg/apis/atlassian/v1" + "github.com/atlassian-labs/cyclops/pkg/controller" + "github.com/atlassian-labs/cyclops/pkg/mock" +) + +type Option func(t *Transitioner) + +func WithCloudProviderInstances(nodes []*mock.Node) Option { + return func(t *Transitioner) { + t.cloudProviderInstances = append(t.cloudProviderInstances, nodes...) + } +} + +func WithKubeNodes(nodes []*mock.Node) Option { + return func(t *Transitioner) { + t.kubeNodes = append(t.kubeNodes, nodes...) + } +} + +// ************************************************************************** // + +type Transitioner struct { + *CycleNodeRequestTransitioner + *mock.Client + + cloudProviderInstances []*mock.Node + kubeNodes []*mock.Node +} + +func NewFakeTransitioner(cnr *v1.CycleNodeRequest, opts ...Option) *Transitioner { + t := &Transitioner{ + // By default there are no nodes and each test will + // override these as needed + cloudProviderInstances: make([]*mock.Node, 0), + kubeNodes: make([]*mock.Node, 0), + } + + for _, opt := range opts { + opt(t) + } + + t.Client = mock.NewClient(t.kubeNodes, t.cloudProviderInstances, cnr) + + rm := &controller.ResourceManager{ + Client: t.K8sClient, + RawClient: t.RawClient, + HttpClient: http.DefaultClient, + CloudProvider: t.CloudProvider, + } + + t.CycleNodeRequestTransitioner = NewCycleNodeRequestTransitioner( + cnr, rm, Options{}, + ) + + return t +} diff --git a/pkg/controller/cyclenoderequest/transitioner/transitions.go b/pkg/controller/cyclenoderequest/transitioner/transitions.go index f56a455..0cc5d78 100644 --- a/pkg/controller/cyclenoderequest/transitioner/transitions.go +++ b/pkg/controller/cyclenoderequest/transitioner/transitions.go @@ -100,15 +100,23 @@ func (t *CycleNodeRequestTransitioner) transitionPending() (reconcile.Result, er return t.transitionToHealing(fmt.Errorf("no existing nodes in cloud provider matched selector")) } + nodeGroupNames := t.cycleNodeRequest.GetNodeGroupNames() + // Describe the node group for the request - t.rm.LogEvent(t.cycleNodeRequest, "FetchingNodeGroup", "Fetching node group: %v", t.cycleNodeRequest.GetNodeGroupNames()) - nodeGroups, err := t.rm.CloudProvider.GetNodeGroups(t.cycleNodeRequest.GetNodeGroupNames()) + t.rm.LogEvent(t.cycleNodeRequest, "FetchingNodeGroup", "Fetching node group: %v", nodeGroupNames) + + if len(nodeGroupNames) == 0 { + return t.transitionToHealing(fmt.Errorf("must have at least one nodegroup name defined")) + } + + nodeGroups, err := t.rm.CloudProvider.GetNodeGroups(nodeGroupNames) if err != nil { return t.transitionToHealing(err) } // get instances inside cloud provider node groups nodeGroupInstances := nodeGroups.Instances() + // Do some sanity checking before we start filtering things // Check the instance count of the node group matches the number of nodes found in Kubernetes if len(kubeNodes) != len(nodeGroupInstances) { diff --git a/pkg/controller/cyclenoderequest/transitioner/transitions_pending_test.go b/pkg/controller/cyclenoderequest/transitioner/transitions_pending_test.go new file mode 100644 index 0000000..9c85f63 --- /dev/null +++ b/pkg/controller/cyclenoderequest/transitioner/transitions_pending_test.go @@ -0,0 +1,368 @@ +package transitioner + +import ( + "testing" + "time" + + v1 "github.com/atlassian-labs/cyclops/pkg/apis/atlassian/v1" + "github.com/atlassian-labs/cyclops/pkg/mock" + "github.com/stretchr/testify/assert" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Basic test to ensure the base functionality of the Pending phase works. A +// predictable set of nodes with matching cloud provider instances attached to +// their nodegroups should get the CNR transitioned to the Initialized phase. +func TestPendingSimpleCase(t *testing.T) { + nodegroup, err := mock.NewNodegroup("ng-1", 2) + if err != nil { + assert.NoError(t, err) + } + + cnr := &v1.CycleNodeRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cnr-1", + Namespace: "kube-system", + }, + Spec: v1.CycleNodeRequestSpec{ + NodeGroupsList: []string{"ng-1"}, + CycleSettings: v1.CycleSettings{ + Concurrency: 1, + Method: v1.CycleNodeRequestMethodDrain, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "customer": "kitt", + }, + }, + }, + Status: v1.CycleNodeRequestStatus{ + Phase: v1.CycleNodeRequestPending, + }, + } + + fakeTransitioner := NewFakeTransitioner(cnr, + WithKubeNodes(nodegroup), + WithCloudProviderInstances(nodegroup), + ) + + result, err := fakeTransitioner.Run() + assert.NoError(t, err) + assert.True(t, result.Requeue) + + // It should move to the Initialised phase and set up the status of the CNR + // in a predictable manner + assert.Equal(t, v1.CycleNodeRequestInitialised, cnr.Status.Phase) + assert.Len(t, cnr.Status.NodesToTerminate, 2) + assert.Equal(t, cnr.Status.ActiveChildren, int64(0)) + assert.Equal(t, cnr.Status.NumNodesCycled, 0) +} + +// Test to ensure the Pending phase will accept a CNR with a correct named node. +func TestPendingWithNamedNode(t *testing.T) { + nodegroup, err := mock.NewNodegroup("ng-1", 2) + if err != nil { + assert.NoError(t, err) + } + + cnr := &v1.CycleNodeRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cnr-1", + Namespace: "kube-system", + }, + Spec: v1.CycleNodeRequestSpec{ + NodeGroupsList: []string{"ng-1"}, + CycleSettings: v1.CycleSettings{ + Concurrency: 1, + Method: v1.CycleNodeRequestMethodDrain, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "customer": "kitt", + }, + }, + NodeNames: []string{ + "ng-1-node-0", + }, + }, + Status: v1.CycleNodeRequestStatus{ + Phase: v1.CycleNodeRequestPending, + }, + } + + fakeTransitioner := NewFakeTransitioner(cnr, + WithKubeNodes(nodegroup), + WithCloudProviderInstances(nodegroup), + ) + + result, err := fakeTransitioner.Run() + assert.NoError(t, err) + assert.True(t, result.Requeue) + + // It should move to the Initialised phase and set up the status of the CNR + // in a predictable manner + assert.Equal(t, v1.CycleNodeRequestInitialised, cnr.Status.Phase) + assert.Len(t, cnr.Status.NodesToTerminate, 1) + assert.Equal(t, cnr.Status.ActiveChildren, int64(0)) + assert.Equal(t, cnr.Status.NumNodesCycled, 0) +} + +// Test to ensure the Pending phase will reject a CNR with a named node that +// does not match any of the nodes matching the node selector. It should error +// out immediately. +func TestPendingWrongNamedNode(t *testing.T) { + nodegroup, err := mock.NewNodegroup("ng-1", 2) + if err != nil { + assert.NoError(t, err) + } + + cnr := &v1.CycleNodeRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cnr-1", + Namespace: "kube-system", + }, + Spec: v1.CycleNodeRequestSpec{ + NodeGroupsList: []string{"ng-1"}, + CycleSettings: v1.CycleSettings{ + Concurrency: 1, + Method: v1.CycleNodeRequestMethodDrain, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "customer": "kitt", + }, + }, + NodeNames: []string{ + "ng-1-node-0", + "ng-1-node-2", + }, + }, + Status: v1.CycleNodeRequestStatus{ + Phase: v1.CycleNodeRequestPending, + }, + } + + fakeTransitioner := NewFakeTransitioner(cnr, + WithKubeNodes(nodegroup), + WithCloudProviderInstances(nodegroup), + ) + + _, err = fakeTransitioner.Run() + assert.Error(t, err) + assert.Equal(t, v1.CycleNodeRequestHealing, cnr.Status.Phase) +} + +// Test to ensure that if there's a mismatch between the instances found in the +// cloud provider and kube then the CNR will error out immediately rather than +// proceed. Specifically test missing cloud provider instances. +func TestPendingNoCloudProvierNodes(t *testing.T) { + nodegroup, err := mock.NewNodegroup("ng-1", 2) + if err != nil { + assert.NoError(t, err) + } + + cnr := &v1.CycleNodeRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cnr-1", + Namespace: "kube-system", + }, + Spec: v1.CycleNodeRequestSpec{ + NodeGroupsList: []string{"ng-1"}, + CycleSettings: v1.CycleSettings{ + Concurrency: 1, + Method: v1.CycleNodeRequestMethodDrain, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "customer": "kitt", + }, + }, + }, + Status: v1.CycleNodeRequestStatus{ + Phase: v1.CycleNodeRequestPending, + }, + } + + fakeTransitioner := NewFakeTransitioner(cnr, + WithKubeNodes(nodegroup), + ) + + _, err = fakeTransitioner.Run() + assert.Error(t, err) + assert.Equal(t, v1.CycleNodeRequestHealing, cnr.Status.Phase) +} + +// Test to ensure that if there's a mismatch between the instances found in the +// cloud provider and kube then the CNR will error out immediately rather than +// proceed. Specifically test missing kube nodes. +func TestPendingNoKubeNodes(t *testing.T) { + nodegroup, err := mock.NewNodegroup("ng-1", 2) + if err != nil { + assert.NoError(t, err) + } + + cnr := &v1.CycleNodeRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cnr-1", + Namespace: "kube-system", + }, + Spec: v1.CycleNodeRequestSpec{ + NodeGroupsList: []string{"ng-1"}, + CycleSettings: v1.CycleSettings{ + Concurrency: 1, + Method: v1.CycleNodeRequestMethodDrain, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "customer": "kitt", + }, + }, + }, + Status: v1.CycleNodeRequestStatus{ + Phase: v1.CycleNodeRequestPending, + }, + } + + fakeTransitioner := NewFakeTransitioner(cnr, + WithCloudProviderInstances(nodegroup), + ) + + _, err = fakeTransitioner.Run() + assert.Error(t, err) + assert.Equal(t, v1.CycleNodeRequestHealing, cnr.Status.Phase) +} + +// Test to ensure that Cyclops will not proceed if there is node detached from +// the nodegroup on the cloud provider. It should try to wait for the issue to +// resolve transition to the Healing phase if it doesn't. +func TestPendingDetachedCloudProviderNode(t *testing.T) { + nodegroup, err := mock.NewNodegroup("ng-1", 2) + if err != nil { + assert.NoError(t, err) + } + + // "detach" one instance from the asg + nodegroup[0].Nodegroup = "" + + cnr := &v1.CycleNodeRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cnr-1", + Namespace: "kube-system", + }, + Spec: v1.CycleNodeRequestSpec{ + NodeGroupsList: []string{"ng-1"}, + CycleSettings: v1.CycleSettings{ + Concurrency: 1, + Method: v1.CycleNodeRequestMethodDrain, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "customer": "kitt", + }, + }, + }, + Status: v1.CycleNodeRequestStatus{ + Phase: v1.CycleNodeRequestPending, + }, + } + + fakeTransitioner := NewFakeTransitioner(cnr, + WithKubeNodes(nodegroup), + WithCloudProviderInstances(nodegroup), + ) + + // Should requeue while it tries to wait + _, err = fakeTransitioner.Run() + assert.NoError(t, err) + assert.Equal(t, v1.CycleNodeRequestPending, cnr.Status.Phase) + + // Simulate waiting for 1s more than the wait limit + cnr.Status.EquilibriumWaitStarted = &metav1.Time{ + Time: time.Now().Add(-nodeEquilibriumWaitLimit - time.Second), + } + + // This time should transition to the healing phase + _, err = fakeTransitioner.Run() + assert.Error(t, err) + assert.Equal(t, v1.CycleNodeRequestHealing, cnr.Status.Phase) +} + +// Test that if no nodegroup names are given. The CNR should transition to the +// Healing phase since no nodes will match in the cloud provider. +func TestPendingNoNodegroupNamesGiven(t *testing.T) { + nodegroup, err := mock.NewNodegroup("ng-1", 2) + if err != nil { + assert.NoError(t, err) + } + + cnr := &v1.CycleNodeRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cnr-1", + Namespace: "kube-system", + }, + Spec: v1.CycleNodeRequestSpec{ + CycleSettings: v1.CycleSettings{ + Concurrency: 1, + Method: v1.CycleNodeRequestMethodDrain, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "customer": "kitt", + }, + }, + }, + Status: v1.CycleNodeRequestStatus{ + Phase: v1.CycleNodeRequestPending, + }, + } + + fakeTransitioner := NewFakeTransitioner(cnr, + WithKubeNodes(nodegroup), + WithCloudProviderInstances(nodegroup), + ) + + _, err = fakeTransitioner.Run() + assert.Error(t, err) + assert.Equal(t, v1.CycleNodeRequestHealing, cnr.Status.Phase) +} + +// Test that if there is a mismatching nodegroup name, the CNR should transition +// to the Healing phase since there will be no nodes matching. +func TestPendingMismatchingNodegroupName(t *testing.T) { + nodegroup, err := mock.NewNodegroup("ng-1", 2) + if err != nil { + assert.NoError(t, err) + } + + cnr := &v1.CycleNodeRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cnr-1", + Namespace: "kube-system", + }, + Spec: v1.CycleNodeRequestSpec{ + NodeGroupsList: []string{"ng-2"}, + CycleSettings: v1.CycleSettings{ + Concurrency: 1, + Method: v1.CycleNodeRequestMethodDrain, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "customer": "kitt", + }, + }, + }, + Status: v1.CycleNodeRequestStatus{ + Phase: v1.CycleNodeRequestPending, + }, + } + + fakeTransitioner := NewFakeTransitioner(cnr, + WithKubeNodes(nodegroup), + WithCloudProviderInstances(nodegroup), + ) + + _, err = fakeTransitioner.Run() + assert.Error(t, err) + assert.Equal(t, v1.CycleNodeRequestHealing, cnr.Status.Phase) +} diff --git a/pkg/controller/resource_manager.go b/pkg/controller/resource_manager.go index 04e8bc6..d2d65f4 100644 --- a/pkg/controller/resource_manager.go +++ b/pkg/controller/resource_manager.go @@ -61,10 +61,14 @@ func (rm *ResourceManager) UpdateObject(obj client.Object) error { // LogEvent creates an event on the current object func (rm *ResourceManager) LogEvent(obj runtime.Object, reason, messageFmt string, args ...interface{}) { - rm.Recorder.Eventf(obj, coreV1.EventTypeNormal, reason, messageFmt, args...) + if rm.Recorder != nil { + rm.Recorder.Eventf(obj, coreV1.EventTypeNormal, reason, messageFmt, args...) + } } // LogWarningEvent creates a warning event on the current object func (rm *ResourceManager) LogWarningEvent(obj runtime.Object, reason, messageFmt string, args ...interface{}) { - rm.Recorder.Eventf(obj, coreV1.EventTypeWarning, reason, messageFmt, args...) + if rm.Recorder != nil { + rm.Recorder.Eventf(obj, coreV1.EventTypeWarning, reason, messageFmt, args...) + } } diff --git a/pkg/mock/client.go b/pkg/mock/client.go new file mode 100644 index 0000000..0763af1 --- /dev/null +++ b/pkg/mock/client.go @@ -0,0 +1,188 @@ +package mock + +import ( + "fmt" + "time" + + v1 "github.com/atlassian-labs/cyclops/pkg/apis/atlassian/v1" + "github.com/atlassian-labs/cyclops/pkg/cloudprovider" + "github.com/atlassian-labs/cyclops/pkg/cloudprovider/aws" + fakeaws "github.com/atlassian-labs/cyclops/pkg/cloudprovider/aws/fake" + + "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" + + fakerawclient "k8s.io/client-go/kubernetes/fake" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Node struct { + Name string + LabelKey string + LabelValue string + Creation time.Time + Tainted bool + Nodegroup string + InstanceID string + + NodeReady corev1.ConditionStatus + + CPU int64 + Mem int64 + + CloudProviderState string + ProviderID string +} + +type Client struct { + // AWS + Autoscaling autoscalingiface.AutoScalingAPI + Ec2 ec2iface.EC2API + + cloudprovider.CloudProvider + + // KUBE + K8sClient client.Client + RawClient kubernetes.Interface +} + +func NewClient(kubeNodes []*Node, cloudProviderNodes []*Node, extraKubeObjects ...client.Object) *Client { + t := &Client{} + + // Add the providerID to all nodes + for _, node := range kubeNodes { + node.ProviderID = fakeaws.GenerateProviderID(node.InstanceID) + } + + for _, node := range cloudProviderNodes { + node.ProviderID = fakeaws.GenerateProviderID(node.InstanceID) + } + + runtimeNodes, clientNodes := generateKubeNodes(kubeNodes) + + scheme := runtime.NewScheme() + utilruntime.Must(addCustomSchemes(scheme)) + + kubeObjects := clientNodes + kubeObjects = append(kubeObjects, extraKubeObjects...) + + t.K8sClient = fakeclient.NewClientBuilder().WithScheme(scheme).WithObjects(kubeObjects...).Build() + t.RawClient = fakerawclient.NewSimpleClientset(runtimeNodes...) + + cloudProviderInstances := generateFakeInstances(cloudProviderNodes) + + autoscalingiface := &fakeaws.Autoscaling{ + Instances: cloudProviderInstances, + } + + ec2iface := &fakeaws.Ec2{ + Instances: cloudProviderInstances, + } + + t.Autoscaling = autoscalingiface + t.Ec2 = ec2iface + t.CloudProvider = aws.NewGenericCloudProvider(autoscalingiface, ec2iface) + + return t +} + +func addCustomSchemes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(v1.SchemeGroupVersion, &v1.CycleNodeRequest{}) + scheme.AddKnownTypes(v1.SchemeGroupVersion, &v1.CycleNodeRequestList{}) + scheme.AddKnownTypes(v1.SchemeGroupVersion, &v1.CycleNodeStatus{}) + scheme.AddKnownTypes(v1.SchemeGroupVersion, &v1.CycleNodeStatusList{}) + scheme.AddKnownTypes(v1.SchemeGroupVersion, &v1.NodeGroup{}) + scheme.AddKnownTypes(v1.SchemeGroupVersion, &v1.NodeGroupList{}) + scheme.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.Node{}) + scheme.AddKnownTypes(corev1.SchemeGroupVersion, &corev1.NodeList{}) + return nil +} + +func generateFakeInstances(nodes []*Node) map[string]*fakeaws.Instance { + var instances = make(map[string]*fakeaws.Instance, 0) + + for _, node := range nodes { + instances[node.InstanceID] = &fakeaws.Instance{ + InstanceID: node.InstanceID, + AutoscalingGroupName: node.Nodegroup, + State: node.CloudProviderState, + } + } + + return instances +} + +func generateKubeNodes(nodes []*Node) ([]runtime.Object, []client.Object) { + runtimeNodes := make([]runtime.Object, 0) + clientNodes := make([]client.Object, 0) + + for _, node := range nodes { + kubeNode := buildKubeNode(node) + runtimeNodes = append(runtimeNodes, kubeNode) + clientNodes = append(clientNodes, kubeNode) + } + + return runtimeNodes, clientNodes +} + +func buildKubeNode(node *Node) *corev1.Node { + var taints []corev1.Taint + + if node.Tainted { + taints = append(taints, corev1.Taint{ + Key: "atlassian.com/cyclops", + Value: fmt.Sprint(time.Now().Unix()), + Effect: corev1.TaintEffectNoSchedule, + }) + } + + kubeNode := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: node.Name, + SelfLink: fmt.Sprintf("/api/v1/nodes/%s", node.Name), + Labels: map[string]string{ + node.LabelKey: node.LabelValue, + }, + CreationTimestamp: metav1.NewTime(node.Creation), + }, + Spec: corev1.NodeSpec{ + ProviderID: node.ProviderID, + Taints: taints, + }, + Status: corev1.NodeStatus{ + Capacity: corev1.ResourceList{ + corev1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI), + }, + Conditions: []corev1.NodeCondition{ + { + Type: corev1.NodeReady, + Status: node.NodeReady, + }, + }, + }, + } + + if node.CPU >= 0 { + kubeNode.Status.Capacity[corev1.ResourceCPU] = *resource.NewMilliQuantity(node.CPU, resource.DecimalSI) + } + + if node.Mem >= 0 { + kubeNode.Status.Capacity[corev1.ResourceMemory] = *resource.NewQuantity(node.Mem, resource.DecimalSI) + } + + kubeNode.Status.Allocatable = corev1.ResourceList{} + + for k, v := range kubeNode.Status.Capacity { + kubeNode.Status.Allocatable[k] = v + } + + return kubeNode +} diff --git a/pkg/mock/test_helpers.go b/pkg/mock/test_helpers.go new file mode 100644 index 0000000..decbd91 --- /dev/null +++ b/pkg/mock/test_helpers.go @@ -0,0 +1,50 @@ +package mock + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" +) + +func GenerateRandomInstanceId() (string, error) { + numBytes := 9 + randomBytes := make([]byte, numBytes) + + if _, err := rand.Read(randomBytes); err != nil { + return "", err + } + + hexString := hex.EncodeToString(randomBytes) + hexString = hexString[:17] + return "i-" + hexString, nil +} + +func NewNodegroup(name string, num int) ([]*Node, error) { + nodes := make([]*Node, 0) + + for i := 0; i < num; i++ { + instanceID, err := GenerateRandomInstanceId() + if err != nil { + return nil, err + } + + node := &Node{ + Name: fmt.Sprintf("%s-node-%d", name, i), + LabelKey: "customer", + LabelValue: "kitt", + Creation: time.Now(), + Tainted: false, + NodeReady: corev1.ConditionTrue, + Nodegroup: name, + InstanceID: instanceID, + CloudProviderState: "running", + } + + nodes = append(nodes, node) + } + + return nodes, nil +} diff --git a/pkg/test/aws.go b/pkg/test/aws.go deleted file mode 100644 index 669910f..0000000 --- a/pkg/test/aws.go +++ /dev/null @@ -1,53 +0,0 @@ -package test - -import ( - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" -) - -// MockAutoscalingService is a mock implementation of a cloud provider interface -type MockAutoscalingService struct { - autoscalingiface.AutoScalingAPI - *client.Client - - DescribeAutoScalingGroupsOutput *autoscaling.DescribeAutoScalingGroupsOutput - DescribeAutoScalingGroupsErr error - - SetDesiredCapacityOutput *autoscaling.SetDesiredCapacityOutput - SetDesiredCapacityErr error - - TerminateInstanceInAutoScalingGroupOutput *autoscaling.TerminateInstanceInAutoScalingGroupOutput - TerminateInstanceInAutoScalingGroupErr error -} - -// DescribeAutoScalingGroups mock implementation for MockAutoscalingService -func (m MockAutoscalingService) DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) { - return m.DescribeAutoScalingGroupsOutput, m.DescribeAutoScalingGroupsErr -} - -// SetDesiredCapacity mock implementation for MockAutoscalingService -func (m MockAutoscalingService) SetDesiredCapacity(*autoscaling.SetDesiredCapacityInput) (*autoscaling.SetDesiredCapacityOutput, error) { - return m.SetDesiredCapacityOutput, m.SetDesiredCapacityErr -} - -// TerminateInstanceInAutoScalingGroup mock implementation for MockAutoscalingService -func (m MockAutoscalingService) TerminateInstanceInAutoScalingGroup(*autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) { - return m.TerminateInstanceInAutoScalingGroupOutput, m.TerminateInstanceInAutoScalingGroupErr -} - -// MockEc2Service mocks the EC2API for DescribeInstances -type MockEc2Service struct { - ec2iface.EC2API - *client.Client - - DescribeInstancesOutput *ec2.DescribeInstancesOutput - DescribeInstancesErr error -} - -// DescribeInstances mock implementation for MockAutoscalingService -func (m MockEc2Service) DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) { - return m.DescribeInstancesOutput, m.DescribeInstancesErr -}