Skip to content

Commit

Permalink
Abuse deployments with extended test
Browse files Browse the repository at this point in the history
  • Loading branch information
smarterclayton committed Apr 12, 2016
1 parent ced29a2 commit 83b8a06
Show file tree
Hide file tree
Showing 3 changed files with 259 additions and 10 deletions.
233 changes: 228 additions & 5 deletions test/extended/deployments/deployments.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,93 @@ package deployments

import (
"fmt"
"math/rand"
"sort"
"time"

"k8s.io/kubernetes/pkg/util/wait"

g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"

kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e"

deployapi "github.com/openshift/origin/pkg/deploy/api"
deployutil "github.com/openshift/origin/pkg/deploy/util"
exutil "github.com/openshift/origin/test/extended/util"
)

var _ = g.Describe("deployments: parallel: test deployment", func() {
const deploymentRunTimeout = 5 * time.Minute

var _ = g.Describe("deploymentconfigs", func() {
defer g.GinkgoRecover()
var (
deploymentFixture = exutil.FixturePath("..", "extended", "fixtures", "test-deployment-test.yaml")
oc = exutil.NewCLI("cli-deployment", exutil.KubeConfigPath())
deploymentFixture = exutil.FixturePath("..", "extended", "fixtures", "test-deployment-test.yaml")
simpleDeploymentFixture = exutil.FixturePath("..", "extended", "fixtures", "deployment-simple.yaml")
oc = exutil.NewCLI("cli-deployment", exutil.KubeConfigPath())
)

g.Describe("test deployment", func() {
g.Describe("when run iteratively", func() {
g.It("should only deploy the last deployment", func() {
// print some debugging output if the deploymeent fails
defer func() {
if !g.CurrentGinkgoTestDescription().Failed {
return
}
if dc, rcs, pods, err := deploymentInfo(oc, "deployment-simple"); err == nil {
e2e.Logf("DC: %#v", dc)
e2e.Logf(" RCs: %#v", rcs)
p, _ := deploymentPods(pods)
e2e.Logf(" Deployers: %#v", p)
}
}()

_, err := oc.Run("create").Args("-f", simpleDeploymentFixture).Output()
o.Expect(err).NotTo(o.HaveOccurred())

iterations := 15
for i := 0; i < iterations; i++ {
if rand.Float32() < 0.5 {
time.Sleep(1 * time.Second)
}
switch n := rand.Float32(); {
case n < 0.4:
// trigger a new deployment
e2e.Logf("%d: triggering a new deployment with config change", i)
out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", i)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("updated"))
case n < 0.6:
// cancel any running deployment
out, err := oc.Run("deploy").Args("dc/deployment-simple", "--cancel").Output()
o.Expect(err).NotTo(o.HaveOccurred())
e2e.Logf("%02d: cancelling deployment: %s", i, out)
default:
// wait for the deployment to be running
e2e.Logf("%02d: waiting for current deployment to start running", i)
o.Expect(waitForLatestCondition(oc, "deployment-simple", deploymentRunTimeout, deploymentRunning)).To(o.BeNil())
}
}

// trigger one more deployment, just in case we cancelled the latest output
out, err := oc.Run("set", "env").Args("dc/deployment-simple", fmt.Sprintf("A=%d", iterations)).Output()
o.Expect(err).NotTo(o.HaveOccurred())
o.Expect(out).To(o.ContainSubstring("updated"))

g.By("verifying all but terminal deployment is marked complete")
o.Expect(waitForLatestCondition(oc, "deployment-simple", deploymentRunTimeout, deploymentReachedCompletion)).To(o.BeNil())
})
})

g.Describe("with test deployments", func() {
g.It("should run a deployment to completion and then scale to zero", func() {
out, err := oc.Run("create").Args("-f", deploymentFixture).Output()
o.Expect(err).NotTo(o.HaveOccurred())

o.Expect(waitForLatestCondition(oc, "deployment-test", deploymentRunTimeout, deploymentRunning)).To(o.BeNil())

out, err = oc.Run("logs").Args("-f", "dc/deployment-test").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By(fmt.Sprintf("checking the logs for substrings\n%s", out))
Expand All @@ -33,7 +97,7 @@ var _ = g.Describe("deployments: parallel: test deployment", func() {
o.Expect(out).To(o.ContainSubstring("Deployment deployment-test-1 successfully made active"))

g.By("verifying the deployment is marked complete and scaled to zero")
err = wait.Poll(100*time.Millisecond, 1*time.Minute, func() (bool, error) {
err = wait.Poll(100*time.Millisecond, 5*time.Minute, func() (bool, error) {
rc, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).Get("deployment-test-1")
o.Expect(err).NotTo(o.HaveOccurred())
status := rc.Annotations[deployapi.DeploymentStatusAnnotation]
Expand Down Expand Up @@ -99,3 +163,162 @@ var _ = g.Describe("deployments: parallel: test deployment", func() {
})
})
})

func deploymentStatuses(rcs []kapi.ReplicationController) []string {
statuses := []string{}
for _, rc := range rcs {
statuses = append(statuses, string(deployutil.DeploymentStatusFor(&rc)))
}
return statuses
}

func deploymentPods(pods []kapi.Pod) (map[string]*kapi.Pod, error) {
deployers := make(map[string]*kapi.Pod)
for i := range pods {
name, ok := pods[i].Annotations[deployapi.DeployerPodForDeploymentLabel]
if !ok {
continue
}
if existing, ok := deployers[name]; ok {
return nil, fmt.Errorf("multiple deployer pods exist for %q - %s and %s", name, existing.Name, pods[i].Name)
}
deployers[name] = &pods[i]
}
return deployers, nil
}

var completedStatuses = sets.NewString(string(deployapi.DeploymentStatusComplete), string(deployapi.DeploymentStatusFailed))

func checkDeploymentInvariants(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController, pods []kapi.Pod) error {
deployers, err := deploymentPods(pods)
if err != nil {
return err
}
if len(deployers) > len(rcs) {
return fmt.Errorf("more deployer pods found than deployments: %#v %#v", deployers, rcs)
}
running := sets.NewString()
completed := 0
for k, v := range deployers {
switch v.Status.Phase {
case kapi.PodSucceeded, kapi.PodFailed:
completed++
default:
running.Insert(k)
}
}
if running.Len() > 1 {
return fmt.Errorf("found multiple running deployments: %v", running.List())
}
sawStatus := sets.NewString()
statuses := []string{}
for _, rc := range rcs {
status := deployutil.DeploymentStatusFor(&rc)
if sawStatus.Len() != 0 {
switch status {
case deployapi.DeploymentStatusComplete, deployapi.DeploymentStatusFailed:
if sawStatus.Difference(completedStatuses).Len() != 0 {
return fmt.Errorf("rc %s was %s, but earlier RCs were not completed: %v", rc.Name, status, statuses)
}
case deployapi.DeploymentStatusRunning, deployapi.DeploymentStatusPending:
if sawStatus.Has(string(status)) {
return fmt.Errorf("rc %s was %s, but so was an earlier RC: %v", rc.Name, status, statuses)
}
if sawStatus.Difference(completedStatuses).Len() != 0 {
return fmt.Errorf("rc %s was %s, but earlier RCs were not completed: %v", rc.Name, status, statuses)
}
case deployapi.DeploymentStatusNew:
default:
return fmt.Errorf("rc %s has unexpected status %s", rc.Name, status, statuses)
}
}
sawStatus.Insert(string(status))
statuses = append(statuses, string(status))
}
return nil
}

func deploymentReachedCompletion(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController) (bool, error) {
if len(rcs) == 0 {
return false, nil
}
rc := rcs[len(rcs)-1]
version := deployutil.DeploymentVersionFor(&rc)
if version != dc.Status.LatestVersion {
return false, nil
}

status := rc.Annotations[deployapi.DeploymentStatusAnnotation]
if deployapi.DeploymentStatus(status) != deployapi.DeploymentStatusComplete {
return false, nil
}
if rc.Spec.Replicas != dc.Spec.Replicas {
return false, fmt.Errorf("deployment is complete but doesn't have expected replicas: %d", rc.Spec.Replicas)
}
if rc.Status.Replicas != dc.Spec.Replicas {
return false, fmt.Errorf("deployment is complete but doesn't have expected replicas: %d", rc.Spec.Replicas)
}
return true, nil
}

func deploymentRunning(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController) (bool, error) {
if len(rcs) == 0 {
return false, nil
}
rc := rcs[len(rcs)-1]
version := deployutil.DeploymentVersionFor(&rc)
if version != dc.Status.LatestVersion {
//e2e.Logf("deployment %s is not the latest version on DC: %d", rc.Name, version)
return false, nil
}

status := rc.Annotations[deployapi.DeploymentStatusAnnotation]
switch deployapi.DeploymentStatus(status) {
case deployapi.DeploymentStatusFailed:
if deployutil.IsDeploymentCancelled(&rc) {
return true, nil
}
return false, fmt.Errorf("deployment failed: %v", deployutil.DeploymentStatusReasonFor(&rc))
case deployapi.DeploymentStatusRunning, deployapi.DeploymentStatusComplete:
return true, nil
default:
return false, nil
}
}

func deploymentInfo(oc *exutil.CLI, name string) (*deployapi.DeploymentConfig, []kapi.ReplicationController, []kapi.Pod, error) {
dc, err := oc.REST().DeploymentConfigs(oc.Namespace()).Get(name)
if err != nil {
return nil, nil, nil, err
}
//e2e.Logf("dc: %#v", dc.Status.LatestVersion)
rcs, err := oc.KubeREST().ReplicationControllers(oc.Namespace()).List(kapi.ListOptions{
LabelSelector: deployutil.ConfigSelector(name),
})
if err != nil {
return nil, nil, nil, err
}
sort.Sort(deployutil.ByLatestVersionAsc(rcs.Items))
//e2e.Logf("rcs: %d", len(rcs.Items))
//e2e.Logf("Current deployment status for %s: %v", name, deploymentStatuses(rcs.Items))
pods, err := oc.KubeREST().Pods(oc.Namespace()).List(kapi.ListOptions{})
if err != nil {
return nil, nil, nil, err
}
return dc, rcs.Items, pods.Items, nil
}

type deploymentConditionFunc func(dc *deployapi.DeploymentConfig, rcs []kapi.ReplicationController) (bool, error)

func waitForLatestCondition(oc *exutil.CLI, name string, timeout time.Duration, fn deploymentConditionFunc) error {
return wait.Poll(200*time.Millisecond, timeout, func() (bool, error) {
dc, rcs, pods, err := deploymentInfo(oc, name)
if err != nil {
return false, err
}
if err := checkDeploymentInvariants(dc, rcs, pods); err != nil {
return false, err
}
return fn(dc, rcs)
})
}
27 changes: 27 additions & 0 deletions test/extended/fixtures/deployment-simple.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
apiVersion: v1
kind: DeploymentConfig
metadata:
name: deployment-simple
spec:
replicas: 2
selector:
name: deployment-simple
strategy:
type: Rolling
rollingParams:
template:
metadata:
labels:
name: deployment-simple
spec:
containers:
- image: "docker.io/openshift/deployment-example:v1"
imagePullPolicy: IfNotPresent
name: myapp
readinessProbe:
httpGet:
path: /
port: 8080
scheme: HTTP
triggers:
- type: ConfigChange
9 changes: 4 additions & 5 deletions test/extended/util/cli.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,21 +214,20 @@ func (c *CLI) setOutput(out io.Writer) *CLI {
// Run executes given OpenShift CLI command verb (iow. "oc <verb>").
// This function also override the default 'stdout' to redirect all output
// to a buffer and prepare the global flags such as namespace and config path.
func (c *CLI) Run(verb string) *CLI {
func (c *CLI) Run(commands ...string) *CLI {
in, out, errout := &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}
nc := &CLI{
execPath: c.execPath,
verb: verb,
verb: commands[0],
kubeFramework: c.KubeFramework(),
adminConfigPath: c.adminConfigPath,
configPath: c.configPath,
username: c.username,
outputDir: c.outputDir,
globalArgs: []string{
verb,
globalArgs: append(commands, []string{
fmt.Sprintf("--namespace=%s", c.Namespace()),
fmt.Sprintf("--config=%s", c.configPath),
},
}...),
}
nc.stdin, nc.stdout, nc.stderr = in, out, errout
return nc.setOutput(c.stdout)
Expand Down

0 comments on commit 83b8a06

Please sign in to comment.