Skip to content

Commit

Permalink
Enable Calico on ARM64 and add configureable flags for Calico install…
Browse files Browse the repository at this point in the history
…ation (#2004)

* Enable Calico on ARM64 and add configureable flags for Calico
installation

* Add v to Calico version in release test script
  • Loading branch information
haouc authored Jun 11, 2022
1 parent 6416a0e commit 10f3aed
Show file tree
Hide file tree
Showing 5 changed files with 70 additions and 51 deletions.
31 changes: 22 additions & 9 deletions scripts/lib/integration.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,17 +36,30 @@ function run_calico_test() {
echo "Starting Helm installing Tigera operator and running Calico STAR tests"
pushd ./test
VPC_ID=$(eksctl get cluster $CLUSTER_NAME -oyaml | grep vpc | cut -d ":" -f 2 | awk '{$1=$1};1')
# we can automatically use latest version in Calico repo, or use the known highest version (currently v3.22.0)

calico_version=$CALICO_VERSION
if [[ $RUN_LATEST_CALICO_VERSION == true ]]; then
version_tag=$(curl -i https://api.github.com/repos/projectcalico/calico/releases/latest | grep "tag_name") || true
if [[ -n $version_tag ]]; then
calico_version=$(echo $version_tag | cut -d ":" -f 2 | cut -d '"' -f 2 )
else
echo "Getting Calico latest version failed, will fall back to default/set version $calico_version instead"
if [[ $1 == "true" ]]; then
# we can automatically use latest version in Calico repo, or use the known highest version (currently v3.23.0)
if [[ $RUN_LATEST_CALICO_VERSION == true ]]; then
version_tag=$(curl -i https://api.github.com/repos/projectcalico/calico/releases/latest | grep "tag_name") || true
if [[ -n $version_tag ]]; then
calico_version=$(echo $version_tag | cut -d ":" -f 2 | cut -d '"' -f 2 )
else
echo "Getting Calico latest version failed, will fall back to default/set version $calico_version instead"
fi
else echo "Using default Calico version"
fi
echo "Using Calico version $calico_version to test"
else
version=$(kubectl describe ds -n calico-system calico-node | grep "calico/node:" | cut -d ':' -f3)
echo "Calico has been installed in testing cluster, keep using the version $version"
fi
echo "Using Calico version $calico_version to test"
ginkgo -v e2e/calico -- --cluster-kubeconfig=$KUBECONFIG --cluster-name=$CLUSTER_NAME --aws-region=$AWS_DEFAULT_REGION --aws-vpc-id=$VPC_ID --calico-version=$calico_version

echo "Testing amd64"
instance_type="amd64"
ginkgo -v e2e/calico -- --cluster-kubeconfig=$KUBECONFIG --cluster-name=$CLUSTER_NAME --aws-region=$AWS_DEFAULT_REGION --aws-vpc-id=$VPC_ID --calico-version=$calico_version --instance-type=$instance_type --install-calico=$1
echo "Testing arm64"
instance_type="arm64"
ginkgo -v e2e/calico -- --cluster-kubeconfig=$KUBECONFIG --cluster-name=$CLUSTER_NAME --aws-region=$AWS_DEFAULT_REGION --aws-vpc-id=$VPC_ID --calico-version=$calico_version --instance-type=$instance_type --install-calico=false
popd
}
2 changes: 1 addition & 1 deletion scripts/run-cni-release-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ function run_integration_test() {

function run_calico_tests(){
# get version from run-integration-tests.sh
: "${CALICO_VERSION:=3.22.0}"
: "${CALICO_VERSION:=v3.23.0}"
echo "Running calico tests, version $CALICO_VERSION"
START=$SECONDS
TEST_RESULT=success
Expand Down
10 changes: 7 additions & 3 deletions scripts/run-integration-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ ARCH=$(go env GOARCH)
: "${RUNNING_PERFORMANCE:=false}"
: "${RUN_CALICO_TEST:=false}"
: "${RUN_LATEST_CALICO_VERSION:=false}"
: "${CALICO_VERSION:=3.22.0}"
: "${CALICO_VERSION:=v3.23.0}"
: "${RUN_CALICO_TEST_WITH_PD:=true}"


Expand Down Expand Up @@ -272,16 +272,20 @@ if [[ $TEST_PASS -eq 0 ]]; then
fi

if [[ $RUN_CALICO_TEST == true ]]; then
run_calico_test
# need to install Calico
run_calico_test "true"
if [[ "$RUN_CALICO_TEST_WITH_PD" == true ]]; then
# if we run prefix delegation tests as well, we need update CNI env and terminate all nodes to restore iptables rules for following tests
echo "Run Calico tests with Prefix Delegation enabled"
$KUBECTL_PATH set env daemonset aws-node -n kube-system ENABLE_PREFIX_DELEGATION=true
# we shouldn't rely on other tests to set this required ENV
$KUBECTL_PATH set env ds aws-node -n kube-system WARM_PREFIX_TARGET=1
ids=( $(aws ec2 describe-instances --filters Name=vpc-id,Values=$VPC_ID --query 'Reservations[*].Instances[*].InstanceId' --output text) )
aws ec2 terminate-instances --instance-ids $ids
echo "Waiting 15 minutes for new nodes being ready"
sleep 900
run_calico_test
# no longer need to install Calico again for PD
run_calico_test "false"
fi

emit_cloudwatch_metric "calico_test_status" "1"
Expand Down
74 changes: 37 additions & 37 deletions test/e2e/calico/calico_suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package calico
import (
"context"
"testing"
"time"

"github.com/aws/amazon-vpc-cni-k8s/test/framework"
"github.com/aws/amazon-vpc-cni-k8s/test/framework/resources/k8s/manifest"
Expand All @@ -13,22 +14,21 @@ import (
)

var (
f *framework.Framework
err error
uiNamespace = "management-ui"
clientNamespace = "client"
starsNamespace = "stars"
uiLabel = map[string]string{"role": "management-ui"}
clientLabel = map[string]string{"role": "client"}
feLabel = map[string]string{"role": "frontend"}
beLabel = map[string]string{"role": "backend"}
nodeArchKey = "kubernetes.io/arch"
nodeArchARMValue = "arm64"
nodeArchAMDValue = "amd64"
uiPod v1.Pod
clientPod v1.Pod
fePod v1.Pod
bePod v1.Pod
f *framework.Framework
err error
uiNamespace = "management-ui"
clientNamespace = "client"
starsNamespace = "stars"
uiLabel = map[string]string{"role": "management-ui"}
clientLabel = map[string]string{"role": "client"}
feLabel = map[string]string{"role": "frontend"}
beLabel = map[string]string{"role": "backend"}
nodeArchKey = "kubernetes.io/arch"
starImage = "calico/star-probe:multiarch"
uiPod v1.Pod
clientPod v1.Pod
fePod v1.Pod
bePod v1.Pod
)

func TestCalicoPoliciesWithVPCCNI(t *testing.T) {
Expand All @@ -38,15 +38,16 @@ func TestCalicoPoliciesWithVPCCNI(t *testing.T) {

var _ = BeforeSuite(func() {
f = framework.New(framework.GlobalOptions)
By("installing Calico operator")

tigeraVersion := f.Options.CalicoVersion
err := f.InstallationManager.InstallTigeraOperator(tigeraVersion)
Expect(err).ToNot(HaveOccurred())

By("Patching ARM64 node unschedulable")
err = updateNodesSchedulability(nodeArchKey, nodeArchARMValue, true)
Expect(err).ToNot(HaveOccurred())
if f.Options.InstallCalico {
By("installing Calico operator")
tigeraVersion := f.Options.CalicoVersion
err := f.InstallationManager.InstallTigeraOperator(tigeraVersion)
// wait for Calico resources being provisioned and setup.
// we don't have control on Calico pods metadata thus we may not poll the pods for waiting wisely.
time.Sleep(utils.DefaultDeploymentReadyTimeout / 2)
Expect(err).ToNot(HaveOccurred())
}

By("installing Calico Start Policy Tests Resources")
err = f.K8sResourceManagers.NamespaceManager().CreateNamespaceWithLabels(uiNamespace, map[string]string{"role": "management-ui"})
Expand All @@ -58,7 +59,7 @@ var _ = BeforeSuite(func() {

uiContainer := manifest.NewBaseContainer().
Name("management-ui").
Image("calico/star-collect:v0.1.0").
Image(starImage).
ImagePullPolicy(v1.PullAlways).
Port(v1.ContainerPort{ContainerPort: 9001}).
Build()
Expand All @@ -68,15 +69,15 @@ var _ = BeforeSuite(func() {
Container(uiContainer).
Replicas(1).
PodLabel("role", "management-ui").
NodeSelector(nodeArchKey, nodeArchAMDValue).
NodeSelector(nodeArchKey, f.Options.InstanceType).
Labels(map[string]string{"role": "management-ui"}).
Build()
_, err = f.K8sResourceManagers.DeploymentManager().CreateAndWaitTillDeploymentIsReady(uiDeployment, utils.DefaultDeploymentReadyTimeout)
Expect(err).ToNot(HaveOccurred())

clientContainer := manifest.NewBaseContainer().
Name("client").
Image("calico/star-probe:v0.1.0").
Image(starImage).
ImagePullPolicy(v1.PullAlways).
Command([]string{"probe", "--urls=http://frontend.stars:80/status,http://backend.stars:6379/status"}).
Port(v1.ContainerPort{ContainerPort: 9000}).
Expand All @@ -87,15 +88,15 @@ var _ = BeforeSuite(func() {
Container(clientContainer).
Replicas(1).
PodLabel("role", "client").
NodeSelector(nodeArchKey, nodeArchAMDValue).
NodeSelector(nodeArchKey, f.Options.InstanceType).
Labels(map[string]string{"role": "client"}).
Build()
_, err = f.K8sResourceManagers.DeploymentManager().CreateAndWaitTillDeploymentIsReady(clientDeployment, utils.DefaultDeploymentReadyTimeout)
Expect(err).ToNot(HaveOccurred())

feContainer := manifest.NewBaseContainer().
Name("frontend").
Image("calico/star-probe:v0.1.0").
Image(starImage).
ImagePullPolicy(v1.PullAlways).
Command([]string{
"probe",
Expand All @@ -110,15 +111,15 @@ var _ = BeforeSuite(func() {
Container(feContainer).
Replicas(1).
PodLabel("role", "frontend").
NodeSelector(nodeArchKey, nodeArchAMDValue).
NodeSelector(nodeArchKey, f.Options.InstanceType).
Labels(map[string]string{"role": "frontend"}).
Build()
_, err = f.K8sResourceManagers.DeploymentManager().CreateAndWaitTillDeploymentIsReady(feDeployment, utils.DefaultDeploymentReadyTimeout)
Expect(err).ToNot(HaveOccurred())

beContainer := manifest.NewBaseContainer().
Name("backend").
Image("calico/star-probe:v0.1.0").
Image(starImage).
ImagePullPolicy(v1.PullAlways).
Command([]string{
"probe",
Expand All @@ -133,7 +134,7 @@ var _ = BeforeSuite(func() {
Container(beContainer).
Replicas(1).
PodLabel("role", "backend").
NodeSelector(nodeArchKey, nodeArchAMDValue).
NodeSelector(nodeArchKey, f.Options.InstanceType).
Labels(map[string]string{"role": "backend"}).
Build()
_, err = f.K8sResourceManagers.DeploymentManager().CreateAndWaitTillDeploymentIsReady(beDeployment, utils.DefaultDeploymentReadyTimeout)
Expand Down Expand Up @@ -215,11 +216,10 @@ var _ = AfterSuite(func() {
f.K8sResourceManagers.NetworkPolicyManager().DeleteNetworkPolicy(&networkPolicyAllowFE)
f.K8sResourceManagers.NetworkPolicyManager().DeleteNetworkPolicy(&networkPolicyAllowClient)

By("Helm Uninstall Calico Installation")
f.InstallationManager.UninstallTigeraOperator()

By("Restore ARM64 Nodes Schedulability")
updateNodesSchedulability(nodeArchKey, nodeArchARMValue, false)
// TODO: disable Calico uninstallation for now. We can add this back after a number of successful test runs.
////we are using dynamic cluster to run the test. Not uninstalling Calico is fine.
//By("Helm Uninstall Calico Installation")
//f.InstallationManager.UninstallTigeraOperator()
})

func installNetcatToolInContainer(name string, namespace string) error {
Expand Down
4 changes: 3 additions & 1 deletion test/framework/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ type Options struct {
TargetAddon string
InitialManifest string
TargetManifest string
InstallCalico bool
}

func (options *Options) BindFlags() {
Expand All @@ -55,9 +56,10 @@ func (options *Options) BindFlags() {
flag.StringVar(&options.TargetAddon, "target-addon-version", "", "Target CNI addon version after upgrade applied")
flag.StringVar(&options.InitialManifest, "initial-manifest-file", "", "Initial CNI manifest, can be local file path or remote Url")
flag.StringVar(&options.TargetManifest, "target-manifest-file", "", "Target CNI manifest, can be local file path or remote Url")
flag.StringVar(&options.CalicoVersion, "calico-version", "3.22.0", "calico version to be tested")
flag.StringVar(&options.CalicoVersion, "calico-version", "v3.23.0", "calico version to be tested")
flag.StringVar(&options.ContainerRuntime, "container-runtime", "", "Optionally can specify it as 'containerd' for the test nodes")
flag.StringVar(&options.InstanceType, "instance-type", "amd64", "Optionally specify instance type as arm64 for the test nodes")
flag.BoolVar(&options.InstallCalico, "install-calico", true, "Install Calico operator before running e2e tests")
}

func (options *Options) Validate() error {
Expand Down

0 comments on commit 10f3aed

Please sign in to comment.