Skip to content

Commit

Permalink
integration test foundational framework
Browse files Browse the repository at this point in the history
  • Loading branch information
AxiomSamarth committed Feb 3, 2022
1 parent 764b305 commit 9362125
Show file tree
Hide file tree
Showing 8 changed files with 548 additions and 1 deletion.
5 changes: 5 additions & 0 deletions .ci/local_integration_test
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
cd ./integration/

echo "Starting integration tests..."

ginkgo -v
2 changes: 1 addition & 1 deletion .ci/test
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ fi

##############################################################################

go test $(go list ./cluster-autoscaler/... | grep -v cloudprovider | grep -v vendor)
go test $(go list ./cluster-autoscaler/... | grep -v cloudprovider | grep -v vendor | grep -v integration)
go test $(go list ./cluster-autoscaler/cloudprovider/mcm/... | grep -v vendor)

#TODO: Return success failure properly
Expand Down
25 changes: 25 additions & 0 deletions cluster-autoscaler/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -109,3 +109,28 @@ test-in-docker: clean docker-builder
bash -c 'cd /gopath/src/k8s.io/autoscaler/cluster-autoscaler && GO111MODULE=off go test -race ./... ${TAGS_FLAG}'

.PHONY: all build test-unit clean format execute-release dev-release docker-builder build-in-docker release generate push-image push-manifest

.PHONY: start
start:
@GO111MODULE=on go run main.go \
--kubeconfig=$(TARGET_KUBECONFIG) \
--cloud-provider=mcm \
--nodes=1:1:$(MACHINE_DEPLOYMENT_ZONE_1) \
--nodes=0:1:$(MACHINE_DEPLOYMENT_ZONE_2) \
--nodes=0:1:$(MACHINE_DEPLOYMENT_ZONE_3) \
--skip-nodes-with-system-pods=false \
--skip-nodes-with-local-storage=false \
--scale-down-delay-after-add=10s \
--scale-down-delay-after-failure=2s \
--v=4 \
--expander=least-waste \
--scale-down-unneeded-time=5s \
--balance-similar-node-groups=true \
--max-node-provision-time=4m \
--leader-elect-retry-period="20s" \
--leader-elect-renew-deadline="30s" \
--leader-elect-lease-duration="40s"

.PHONY: test-integration
test-integration:
../.ci/local_integration_test
122 changes: 122 additions & 0 deletions cluster-autoscaler/integration/factory.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
package integration

import (
"fmt"
"os"
"path/filepath"

MCMClientset "github.com/gardener/machine-controller-manager/pkg/client/clientset/versioned"
"github.com/onsi/gomega/gexec"

"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)

var (
// path for storing log files of autoscaler process
targetDir = filepath.Join("logs")

// autoscaler log file
CALogFile = filepath.Join(targetDir, "autoscaler_processs.log")

// make processes/sessions started by gexec. available only if the controllers are running in local setup. updated during runtime
autoscalerSession *gexec.Session

// controlClusterNamespace is the Shoot namespace in the Seed
controlClusterNamespace = os.Getenv("CONTROL_NAMESPACE")
)

// Cluster holds the clients of a Cluster (like Control Cluster and Target Cluster)
type Cluster struct {
restConfig *rest.Config
Clientset *kubernetes.Clientset
MCMClient *MCMClientset.Clientset
KubeConfigFilePath string
}

// ClusterName retrieves cluster name from the kubeconfig
func (c *Cluster) ClusterName() (string, error) {
var clusterName string
config, err := clientcmd.LoadFromFile(c.KubeConfigFilePath)
if err != nil {
return clusterName, err
}
for contextName, context := range config.Contexts {
if contextName == config.CurrentContext {
clusterName = context.Cluster
}
}
return clusterName, err
}

// Driver is the driver used for executing various integration tests and utils
// interacting with both control and target clusters
type Driver struct {
// Control cluster resource containing ClientSets for accessing kubernetes resources
// And kubeconfig file path for the cluster
controlCluster *Cluster

// Target cluster resource containing ClientSets for accessing kubernetes resources
// And kubeconfig file path for the cluster
targetCluster *Cluster
}

// NewDriver is the construtor for the driver type
func NewDriver(controlKubeconfig, targetKubeconfig string) *Driver {
var (
driver = &Driver{}
err error
)

driver.controlCluster, err = NewCluster(controlKubeconfig)
if err != nil {
fmt.Printf("%s", err.Error())
return nil
}

driver.targetCluster, err = NewCluster(targetKubeconfig)
if err != nil {
return nil
}

if controlClusterNamespace == "" {
controlClusterNamespace, err := driver.targetCluster.ClusterName()
if err != nil {
return nil
}

err = os.Setenv("CONTROL_NAMESPACE", controlClusterNamespace)
if err != nil {
return nil
}
}

return driver
}

// NewCluster returns a Cluster struct
func NewCluster(kubeConfigPath string) (c *Cluster, e error) {
config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
if err == nil {
c = &Cluster{
KubeConfigFilePath: kubeConfigPath,
restConfig: config,
}
} else {
fmt.Printf("%s", err.Error())
c = &Cluster{}
}

clientset, err := kubernetes.NewForConfig(c.restConfig)
if err == nil {
c.Clientset = clientset
}

MCMClient, err := MCMClientset.NewForConfig(c.restConfig)
if err == nil {
c.MCMClient = MCMClient
}

return c, err
}
195 changes: 195 additions & 0 deletions cluster-autoscaler/integration/framework.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
package integration

import (
"context"
"fmt"
"os"
"os/exec"
"strings"

"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"

appv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
)

// rotateLogFile takes file name as input and returns a file object obtained by os.Create
// If the file exists already then it renames it so that a new file can be created
func rotateLogFile(fileName string) (*os.File, error) {

if _, err := os.Stat(fileName); err == nil { // !strings.Contains(err.Error(), "no such file or directory") {
for i := 9; i > 0; i-- {
os.Rename(fmt.Sprintf("%s.%d", fileName, i), fmt.Sprintf("%s.%d", fileName, i+1))
}
os.Rename(fileName, fmt.Sprintf("%s.%d", fileName, 1))
}

return os.Create(fileName)
}

func (driver *Driver) adjustNodeGroups() error {

if driver.targetCluster.getNumberOfReadyNodes() == 1 {
return nil
}

machineDeployments, err := driver.controlCluster.MCMClient.MachineV1alpha1().MachineDeployments(controlClusterNamespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return err
}

for index, machineDeployment := range machineDeployments.Items {
if index > 0 && machineDeployment.Spec.Replicas != 0 {
scaleDownMachineDeployment := machineDeployment.DeepCopy()
scaleDownMachineDeployment.Spec.Replicas = 0
_, err := driver.controlCluster.MCMClient.MachineV1alpha1().MachineDeployments(controlClusterNamespace).Update(context.Background(), scaleDownMachineDeployment, metav1.UpdateOptions{})
if err != nil {
return err
}
}
}
return nil
}

//getNumberOfReadyNodes tries to retrieve the list of node objects in the cluster.
func (c *Cluster) getNumberOfReadyNodes() int16 {
nodes, _ := c.Clientset.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
count := 0
for _, n := range nodes.Items {
for _, nodeCondition := range n.Status.Conditions {
if nodeCondition.Type == "Ready" && nodeCondition.Status == "True" {
count++
}
}
}
return int16(count)
}

func (driver *Driver) scaleAutoscaler(replicas int32) error {
autoScalerDeployment, err := driver.controlCluster.Clientset.AppsV1().Deployments(controlClusterNamespace).Get(context.Background(), "cluster-autoscaler", metav1.GetOptions{})
if err != nil {
return err
}

if replicas > 1 {
replicas = 1
}

if autoScalerDeployment.Spec.Replicas != pointer.Int32Ptr(replicas) {
autoScalerDeployment.Spec.Replicas = pointer.Int32Ptr(replicas)
fmt.Printf("Scaling Cluster Autoscaler to %d replicas\n", replicas)
_, err = driver.controlCluster.Clientset.AppsV1().Deployments(controlClusterNamespace).Update(context.Background(), autoScalerDeployment, metav1.UpdateOptions{})
if err != nil {
return err
}
}

// time.Sleep(30 * time.Second)

return nil
}

// runAutoscaler run the machine controller and machine controller manager binary locally
func (c *Driver) runAutoscaler() {

machineDeployments, err := c.controlCluster.MCMClient.MachineV1alpha1().MachineDeployments(controlClusterNamespace).List(context.Background(), metav1.ListOptions{})
if err != nil {
return
}

if len(machineDeployments.Items) > 3 {
fmt.Printf("Cluster node group configuration is improper. Setup Before Suite might not have successfully run. Please check!")
return
}

ginkgo.By("Starting Cluster Autoscaler....")
args := strings.Fields(
fmt.Sprintf(
"make --directory=%s start TARGET_KUBECONFIG=%s MACHINE_DEPLOYMENT_ZONE_1=%s MACHINE_DEPLOYMENT_ZONE_2=%s MACHINE_DEPLOYMENT_ZONE_3=%s",
"../",
c.targetCluster.KubeConfigFilePath,
fmt.Sprintf("%s.%s", controlClusterNamespace, machineDeployments.Items[0].Name),
fmt.Sprintf("%s.%s", controlClusterNamespace, machineDeployments.Items[1].Name),
fmt.Sprintf("%s.%s", controlClusterNamespace, machineDeployments.Items[2].Name)),
)

outputFile, err := rotateLogFile(CALogFile)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
autoscalerSession, err = gexec.Start(exec.Command(args[0], args[1:]...), outputFile, outputFile)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(autoscalerSession.ExitCode()).Should(gomega.Equal(-1))
}

func getDeploymentObject(replicas int32) *appv1.Deployment {
deployment := &appv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: scaleUpWorkload,
Namespace: "default",
},
Spec: appv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": "nginx",
},
},
Replicas: pointer.Int32Ptr(replicas),
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"app": "nginx",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "ngnix-container",
Image: "nginx:latest",
Ports: []v1.ContainerPort{
{
ContainerPort: 80,
},
},
// TODO: this is the object to be dyamically changed based on the machine type
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("150Mi"),
},
},
},
},
},
},
},
}
return deployment
}

func (driver *Driver) deployWorkload(replicas int32) error {
deployment := getDeploymentObject(replicas)
_, err := driver.targetCluster.Clientset.AppsV1().Deployments("default").Create(context.Background(), deployment, metav1.CreateOptions{})
if err != nil {
return err
}
return nil
}

func (driver *Driver) scaleWorkload(workloadName string, replicas int32) error {
deployment, err := driver.targetCluster.Clientset.AppsV1().Deployments("default").Get(context.Background(), workloadName, metav1.GetOptions{})
if err != nil {
return err
}

deployment.Spec.Replicas = pointer.Int32Ptr(replicas)

_, err = driver.targetCluster.Clientset.AppsV1().Deployments("default").Update(context.Background(), deployment, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
}
13 changes: 13 additions & 0 deletions cluster-autoscaler/integration/integration_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
package integration

import (
"testing"

ginkgo "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)

func TestIntegration(t *testing.T) {
gomega.RegisterFailHandler(ginkgo.Fail)
ginkgo.RunSpecs(t, "Integration Suite")
}
Loading

0 comments on commit 9362125

Please sign in to comment.