From 133db1cf169ae29f893fd86dff9a3c6a41bf8880 Mon Sep 17 00:00:00 2001 From: Andrew Lock Date: Fri, 20 Oct 2023 10:36:47 +0100 Subject: [PATCH] Always upload throughput results with the same name This solves problems around retrying throughput tests --- .azure-pipelines/ultimate-pipeline.yml | 40 +++++++++++++------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/.azure-pipelines/ultimate-pipeline.yml b/.azure-pipelines/ultimate-pipeline.yml index cc7f6eb8a90e..bd6f468432a9 100644 --- a/.azure-pipelines/ultimate-pipeline.yml +++ b/.azure-pipelines/ultimate-pipeline.yml @@ -3813,14 +3813,14 @@ stages: - script: | cp $(CrankDir)/*.json $(CrankDir)/results displayName: Copy the results to results dir - condition: succeededOrFailed() - continueOnError: true - publish: "$(CrankDir)/results" displayName: Publish results - artifact: crank_linux_x64_$(System.JobAttempt) - condition: succeededOrFailed() - continueOnError: true + # We don't include the JobAttempt in this case, because we rely on a specific name + # and an error in the throughput tests probably means no usable data, so dont + # bother trying to upload these in case of failure, which means we can retry the + # stages without issue + artifact: crank_linux_x64_1 - job: Windows64 timeoutInMinutes: 60 @@ -3857,14 +3857,14 @@ stages: - script: | cp $(CrankDir)/*.json $(CrankDir)/results displayName: Copy the results to results dir - condition: succeededOrFailed() - continueOnError: true - publish: "$(CrankDir)/results" displayName: Publish results - artifact: crank_windows_x64_$(System.JobAttempt) - condition: succeededOrFailed() - continueOnError: true + # We don't include the JobAttempt in this case, because we rely on a specific name + # and an error in the throughput tests probably means no usable data, so dont + # bother trying to upload these in case of failure, which means we can retry the + # stages without issue + artifact: crank_windows_x64_1 - job: LinuxArm64 timeoutInMinutes: 60 @@ -3900,14 +3900,14 @@ stages: - script: | cp $(CrankDir)/*.json $(CrankDir)/results displayName: Copy the results to results dir - condition: succeededOrFailed() - continueOnError: true - publish: "$(CrankDir)/results" displayName: Publish results - artifact: crank_linux_arm64_$(System.JobAttempt) - condition: succeededOrFailed() - continueOnError: true + # We don't include the JobAttempt in this case, because we rely on a specific name + # and an error in the throughput tests probably means no usable data, so dont + # bother trying to upload these in case of failure, which means we can retry the + # stages without issue + artifact: crank_linux_arm64_1 - stage: throughput_profiler condition: > @@ -4062,14 +4062,14 @@ stages: - script: | cp $(CrankDir)/*.json $(CrankDir)/results displayName: Copy the results to results dir - condition: succeededOrFailed() - continueOnError: true - publish: "$(CrankDir)/results" displayName: Publish results - artifact: crank_linux_x64_asm_$(System.JobAttempt) - condition: succeededOrFailed() - continueOnError: true + # We don't include the JobAttempt in this case, because we rely on a specific name + # and an error in the throughput tests probably means no usable data, so dont + # bother trying to upload these in case of failure, which means we can retry the + # stages without issue + artifact: crank_linux_x64_asm_1 - stage: coverage condition: and(succeeded(), eq(variables['isBenchmarksOnlyBuild'], 'False'), eq(variables['runCodeCoverage'], 'True'))