Skip to content

Commit

Permalink
Abuse deployments with extended test
Browse files Browse the repository at this point in the history
  • Loading branch information
smarterclayton committed Apr 13, 2016
1 parent ced29a2 commit 6b5fd75
Show file tree
Hide file tree
Showing 3 changed files with 317 additions and 41 deletions.
322 changes: 286 additions & 36 deletions test/extended/deployments/deployments.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,140 @@ package deployments

import (
"fmt"
"math/rand"
"sort"
"strings"
"time"

"k8s.io/kubernetes/pkg/util/wait"

g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"

kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e"

deployapi "github.com/openshift/origin/pkg/deploy/api"
deployutil "github.com/openshift/origin/pkg/deploy/util"
exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("deployments: parallel: test deployment", func() {
const deploymentRunTimeout = 5 * time.Minute

var _ = g.Describe("deploymentconfigs", func() {
defer g.GinkgoRecover()
var (
deploymentFixture = exutil.FixturePath("..", "extended", "fixtures", "test-deployment-test.yaml")
oc = exutil.NewCLI("cli-deployment", exutil.KubeConfigPath())
deploymentFixture = exutil.FixturePath("..", "extended", "fixtures", "test-deployment-test.yaml")
simpleDeploymentFixture = exutil.FixturePath("..", "extended", "fixtures", "deployment-simple.yaml")
oc = exutil.NewCLI("cli-deployment", exutil.KubeConfigPath())
)

g.Describe("test deployment", func() {
g.It("should run a deployment to completion and then scale to zero", func() {
g.Describe("when run iteratively", func() {
g.It("should only deploy the last deployment [Conformance]", func() {
// print some debugging output if the deploymeent fails
defer func() {
if !g.CurrentGinkgoTestDescription().Failed {
return
}
if dc, rcs, pods, err := deploymentInfo(oc, "deployment-simple"); err == nil {
e2e.Logf("DC: %#v", dc)
e2e.Logf(" RCs: %#v", rcs)
p, _ := deploymentPods(pods)
e2e.Logf(" Deployers: %#v", p)
}
}()

_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
o.Expect(err).NotTo(o.HaveOccurred())

iterations := 15
for i := 0; i < iterations; i++ {
if rand.Float32() < 0.2 {
time.Sleep(time.Duration(rand.Float32() * rand.Float32() * float32(time.Second)))
}
switch n := rand.Float32(); {

case n < 0.4:
// trigger a new deployment
e2e.Logf("%02d: triggering a new deployment with config change", i)
out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("updated"))

case n < 0.7:
// cancel any running deployment
e2e.Logf("%02d: cancelling deployment", i)
if out, err := oc.Run("deploy").Args("dc/deployment-simple", "--cancel").Output(); err != nil {
// TODO: we should fix this
if !strings.Contains(out, "the object has been modified") {
o.Expect(err).NotTo(o.HaveOccurred())
}
e2e.Logf("--cancel deployment failed due to conflict: %v", err)
}

case n < 0.0:
// delete the deployer pod - disabled because it forces the system to wait for the sync loop
e2e.Logf("%02d: deleting one or more deployer pods", i)
_, rcs, pods, err := deploymentInfo(oc, "deployment-simple")
if err != nil {
e2e.Logf("%02d: unable to get deployment info: %v", i, err)
continue
}
all, err := deploymentPods(pods)
if err != nil {
e2e.Logf("%02d: unable to get deployment pods: %v", i, err)
continue
}
if len(all) == 0 {
e2e.Logf("%02d: no deployer pods", i)
continue
}
top := len(rcs) - 1
for j := top; i >= top-1 && j >= 0; j-- {
pod, ok := all[rcs[j].Name]
if !ok {
e2e.Logf("%02d: no deployer pod for rc %q", i, rcs[j].Name)
continue
}
e2e.Logf("%02d: deleting deployer pod %s", i, pod.Name)
options := kapi.NewDeleteOptions(0)
if rand.Float32() < 0.5 {
options = nil
}
if err := oc.KubeREST().Pods(oc.Namespace()).Delete(pod.Name, options); err != nil {
e2e.Logf("%02d: unable to delete deployer pod %q: %v", i, pod.Name, err)
}
}
e2e.Logf("%02d: triggering a new deployment with config change", i)
out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("updated"))

default:
// wait for the deployment to be running
e2e.Logf("%02d: waiting for current deployment to start running", i)
o.Expect(waitForLatestCondition(oc, "deployment-simple", deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred())
}
}

// trigger one more deployment, just in case we cancelled the latest output
out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", iterations)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("updated"))

g.By("verifying all but terminal deployment is marked complete")
o.Expect(waitForLatestCondition(oc, "deployment-simple", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred())
})
})

g.Describe("with test deployments", func() {
g.It("should run a deployment to completion and then scale to zero [Conformance]", func() {
out, err := oc.Run("create").Args("-f", deploymentFixture).Output()
o.Expect(err).NotTo(o.HaveOccurred())

o.Expect(waitForLatestCondition(oc, "deployment-test", deploymentRunTimeout, deploymentRunning)).NotTo(o.HaveOccurred())

out, err = oc.Run("logs").Args("-f", "dc/deployment-test").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("checking the logs for substrings\n%s", out))
Expand All @@ -33,22 +144,7 @@ var _ = g.Describe("deployments: parallel: test deployment", func() {
o.Expect(out).To(o.ContainSubstring("Deployment deployment-test-1 successfully made active"))

g.By("verifying the deployment is marked complete and scaled to zero")
err = wait.Poll(100*time.Millisecond, 1*time.Minute, func() (bool, error) {
rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get("deployment-test-1")
o.Expect(err).NotTo(o.HaveOccurred())
status := rc.Annotations[deployapi.DeploymentStatusAnnotation]
if deployapi.DeploymentStatus(status) != deployapi.DeploymentStatusComplete {
return false, nil
}
if rc.Spec.Replicas != 0 {
return false, nil
}
if rc.Status.Replicas != 0 {
return false, nil
}
return true, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(waitForLatestCondition(oc, "deployment-test", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred())

g.By("verifying that scaling does not result in new pods")
out, err = oc.Run("scale").Args("dc/deployment-test", "--replicas=1").Output()
Expand Down Expand Up @@ -80,22 +176,176 @@ var _ = g.Describe("deployments: parallel: test deployment", func() {
o.Expect(out).To(o.ContainSubstring("Pre hook finished"))

g.By("verifying the second deployment is marked complete and scaled to zero")
err = wait.Poll(100*time.Millisecond, 1*time.Minute, func() (bool, error) {
rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get("deployment-test-2")
o.Expect(err).NotTo(o.HaveOccurred())
status := rc.Annotations[deployapi.DeploymentStatusAnnotation]
if deployapi.DeploymentStatus(status) != deployapi.DeploymentStatusComplete {
return false, nil

o.Expect(waitForLatestCondition(oc, "deployment-test", deploymentRunTimeout, deploymentReachedCompletion)).NotTo(o.HaveOccurred())
})
})
})

func deploymentStatuses(rcs []kapi.ReplicationController) []string {
statuses := []string{}
for _, rc := range rcs {
statuses = append(statuses, string(deployutil.DeploymentStatusFor(&rc)))
}
return statuses
}

func deploymentPods(pods []kapi.Pod) (map[string]*kapi.Pod, error) {
deployers := make(map[string]*kapi.Pod)
for i := range pods {
name, ok := pods[i].Labels[deployapi.DeployerPodForDeploymentLabel]
if !ok {
continue
}
if existing, ok := deployers[name]; ok {
return nil, fmt.Errorf("multiple deployer pods exist for %q - %s and %s", name, existing.Name, pods[i].Name)
}
deployers[name] = &pods[i]
}
return deployers, nil
}

var completedStatuses = sets.NewString(string(deployapi.DeploymentStatusComplete), string(deployapi.DeploymentStatusFailed))

func checkDeploymentInvariants(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController, pods []kapi.Pod) error {
deployers, err := deploymentPods(pods)
if err != nil {
return err
}
if len(deployers) > len(rcs) {
return fmt.Errorf("more deployer pods found than deployments: %#v %#v", deployers, rcs)
}
running := sets.NewString()
completed := 0
for k, v := range deployers {
switch v.Status.Phase {
case kapi.PodSucceeded, kapi.PodFailed:
completed++
default:
running.Insert(k)
}
}
if running.Len() > 1 {
return fmt.Errorf("found multiple running deployments: %v", running.List())
}
sawStatus := sets.NewString()
statuses := []string{}
for _, rc := range rcs {
status := deployutil.DeploymentStatusFor(&rc)
if sawStatus.Len() != 0 {
switch status {
case deployapi.DeploymentStatusComplete, deployapi.DeploymentStatusFailed:
if sawStatus.Difference(completedStatuses).Len() != 0 {
return fmt.Errorf("rc %s was %s, but earlier RCs were not completed: %v", rc.Name, status, statuses)
}
if rc.Spec.Replicas != 0 {
return false, nil
case deployapi.DeploymentStatusRunning, deployapi.DeploymentStatusPending:
if sawStatus.Has(string(status)) {
return fmt.Errorf("rc %s was %s, but so was an earlier RC: %v", rc.Name, status, statuses)
}
if rc.Status.Replicas != 0 {
return false, nil
if sawStatus.Difference(completedStatuses).Len() != 0 {
return fmt.Errorf("rc %s was %s, but earlier RCs were not completed: %v", rc.Name, status, statuses)
}
return true, nil
})
o.Expect(err).NotTo(o.HaveOccurred())
})
case deployapi.DeploymentStatusNew:
default:
return fmt.Errorf("rc %s has unexpected status %s: %v", rc.Name, status, statuses)
}
}
sawStatus.Insert(string(status))
statuses = append(statuses, string(status))
}
return nil
}

func deploymentReachedCompletion(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController) (bool, error) {
if len(rcs) == 0 {
return false, nil
}
rc := rcs[len(rcs)-1]
version := deployutil.DeploymentVersionFor(&rc)
if version != dc.Status.LatestVersion {
return false, nil
}

status := rc.Annotations[deployapi.DeploymentStatusAnnotation]
if deployapi.DeploymentStatus(status) != deployapi.DeploymentStatusComplete {
return false, nil
}
expectedReplicas := dc.Spec.Replicas
if dc.Spec.Test {
expectedReplicas = 0
}
if rc.Spec.Replicas != expectedReplicas {
return false, fmt.Errorf("deployment is complete but doesn't have expected spec replicas: %d %d", rc.Spec.Replicas, expectedReplicas)
}
if rc.Status.Replicas != expectedReplicas {
e2e.Logf("deployment is complete but doesn't have expected status replicas: %d %d", rc.Status.Replicas, expectedReplicas)
return false, nil
}
return true, nil
}

func deploymentRunning(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController) (bool, error) {
if len(rcs) == 0 {
return false, nil
}
rc := rcs[len(rcs)-1]
version := deployutil.DeploymentVersionFor(&rc)
if version != dc.Status.LatestVersion {
//e2e.Logf("deployment %s is not the latest version on DC: %d", rc.Name, version)
return false, nil
}

status := rc.Annotations[deployapi.DeploymentStatusAnnotation]
switch deployapi.DeploymentStatus(status) {
case deployapi.DeploymentStatusFailed:
if deployutil.IsDeploymentCancelled(&rc) {
return true, nil
}
reason := deployutil.DeploymentStatusReasonFor(&rc)
if reason == "deployer pod no longer exists" {
return true, nil
}
return false, fmt.Errorf("deployment failed: %v", deployutil.DeploymentStatusReasonFor(&rc))
case deployapi.DeploymentStatusRunning, deployapi.DeploymentStatusComplete:
return true, nil
default:
return false, nil
}
}

func deploymentInfo(oc *exutil.CLI, name string) (*deployapi.DeploymentConfig, []kapi.ReplicationController, []kapi.Pod, error) {
dc, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name)
if err != nil {
return nil, nil, nil, err
}
//e2e.Logf("dc: %#v", dc.Status.LatestVersion)
rcs, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).List(kapi.ListOptions{
LabelSelector: deployutil.ConfigSelector(name),
})
})
if err != nil {
return nil, nil, nil, err
}
sort.Sort(deployutil.ByLatestVersionAsc(rcs.Items))
//e2e.Logf("rcs: %d", len(rcs.Items))
//e2e.Logf("Current deployment status for %s: %v", name, deploymentStatuses(rcs.Items))
pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{})
if err != nil {
return nil, nil, nil, err
}
return dc, rcs.Items, pods.Items, nil
}

type deploymentConditionFunc func(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController) (bool, error)

func waitForLatestCondition(oc *exutil.CLI, name string, timeout time.Duration, fn deploymentConditionFunc) error {
return wait.Poll(200*time.Millisecond, timeout, func() (bool, error) {
dc, rcs, pods, err := deploymentInfo(oc, name)
if err != nil {
return false, err
}
if err := checkDeploymentInvariants(dc, rcs, pods); err != nil {
return false, err
}
return fn(dc, rcs)
})
}
27 changes: 27 additions & 0 deletions test/extended/fixtures/deployment-simple.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
apiVersion: v1
kind: DeploymentConfig
metadata:
name: deployment-simple
spec:
replicas: 2
selector:
name: deployment-simple
strategy:
type: Rolling
rollingParams:
template:
metadata:
labels:
name: deployment-simple
spec:
containers:
- image: "docker.io/openshift/deployment-example:v1"
imagePullPolicy: IfNotPresent
name: myapp
readinessProbe:
httpGet:
path: /
port: 8080
scheme: HTTP
triggers:
- type: ConfigChange
Loading

0 comments on commit 6b5fd75

Please sign in to comment.