Skip to content

Commit

Permalink
Factor up add pod logs to the build log
Browse files Browse the repository at this point in the history
Two of the E2E tests include the pod logs in the build log in case
of failure. Factor that up to the tearDown in init_test so that is
available to all existing and future E2E tests.
  • Loading branch information
afrittoli authored and tekton-robot committed Jun 7, 2019
1 parent b6b9050 commit e130dd1
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 19 deletions.
9 changes: 0 additions & 9 deletions test/artifact_bucket_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,15 +174,6 @@ func TestStorageBucketPipelineRun(t *testing.T) {
// Verify status of PipelineRun (wait for it)
if err := WaitForPipelineRunState(c, bucketTestPipelineRunName, timeout, PipelineRunSucceed(bucketTestPipelineRunName), "PipelineRunCompleted"); err != nil {
t.Errorf("Error waiting for PipelineRun %s to finish: %s", bucketTestPipelineRunName, err)
taskruns, err := c.TaskRunClient.List(metav1.ListOptions{})
if err != nil {
t.Errorf("Error getting TaskRun list for PipelineRun %s %s", bucketTestPipelineRunName, err)
}
for _, tr := range taskruns.Items {
if tr.Status.PodName != "" {
CollectPodLogs(c, tr.Status.PodName, namespace, t.Logf)
}
}
t.Fatalf("PipelineRun execution failed")
}
}
Expand Down
2 changes: 1 addition & 1 deletion test/build_logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (
func CollectPodLogs(c *clients, podName, namespace string, logf logging.FormatLogger) {
logs, err := getContainerLogsFromPod(c.KubeClient.Kube, podName, namespace)
if err != nil {
logf("Expected there to be logs for pod %s: %s", podName, err)
logf("Could not get logs for pod %s: %s", podName, err)
}
logf("build logs %s", logs)
}
Expand Down
9 changes: 0 additions & 9 deletions test/helm_task_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,15 +92,6 @@ func TestHelmDeployPipelineRun(t *testing.T) {
// Verify status of PipelineRun (wait for it)
if err := WaitForPipelineRunState(c, helmDeployPipelineRunName, timeout, PipelineRunSucceed(helmDeployPipelineRunName), "PipelineRunCompleted"); err != nil {
t.Errorf("Error waiting for PipelineRun %s to finish: %s", helmDeployPipelineRunName, err)
taskruns, err := c.TaskRunClient.List(metav1.ListOptions{})
if err != nil {
t.Errorf("Error getting TaskRun list for PipelineRun %s %s", helmDeployPipelineRunName, err)
}
for _, tr := range taskruns.Items {
if tr.Status.PodName != "" {
CollectPodLogs(c, tr.Status.PodName, namespace, t.Logf)
}
}
t.Fatalf("PipelineRun execution failed; helm may or may not have been installed :(")
}

Expand Down
10 changes: 10 additions & 0 deletions test/init_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,16 @@ func tearDown(t *testing.T, cs *clients, namespace string) {
} else {
t.Log(string(bs))
}
header(t.Logf, fmt.Sprintf("Dumping logs from Pods in the %s", namespace))
taskruns, err := cs.TaskRunClient.List(metav1.ListOptions{})
if err != nil {
t.Errorf("Error getting TaskRun list %s", err)
}
for _, tr := range taskruns.Items {
if tr.Status.PodName != "" {
CollectPodLogs(cs, tr.Status.PodName, namespace, t.Logf)
}
}
}

t.Logf("Deleting namespace %s", namespace)
Expand Down

0 comments on commit e130dd1

Please sign in to comment.