diff --git a/.github/workflows/image-reuse.yaml b/.github/workflows/image-reuse.yaml index 067f017484..85a337b988 100644 --- a/.github/workflows/image-reuse.yaml +++ b/.github/workflows/image-reuse.yaml @@ -79,7 +79,7 @@ jobs: cosign-release: 'v2.2.0' - uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - - uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 + - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Setup tags for container image as a CSV type run: | diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index a71f20f40f..863e7da737 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -98,7 +98,7 @@ jobs: uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Generate release artifacts run: | diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index d9862e5726..7c4282bea7 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -72,7 +72,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: coverage-output-unit - path: coverage-output-unit/coverage.out + path: coverage-output-unit test-e2e: strategy: @@ -123,8 +123,17 @@ jobs: uses: mxschmitt/action-tmate@v3 if: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled == 'true'}} - name: Run e2e tests - run: make test-e2e + run: | + make test-e2e if: ${{ !(github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled == 'true') }} + - name: Stop e2e tests controller + run: | + pgrep -f go-build -a + pkill -f go-build + sleep 5 + echo "done stopping process" + ls -lah coverage-output-e2e/ + if: ${{ !(github.event_name == 'workflow_dispatch' && github.event.inputs.debug_enabled == 'true' && matrix.kubernetes.latest)}} - name: Output Rerun Overview run: | [[ -f rerunreport.txt ]] && cat rerunreport.txt || echo "No rerun report found" @@ -160,6 +169,7 @@ jobs: with: go-version: ${{ env.GOLANG_VERSION }} id: go + - uses: actions/checkout@v4 - name: Get e2e code coverage uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 with: @@ -172,11 +182,14 @@ jobs: path: coverage-output-unit - name: combine-go-coverage run: | - go tool covdata percent -i=coverage-output-unit/,coverage-output-e2e/ -o full-coverage.out + go tool covdata textfmt -i=coverage-output-unit/,coverage-output-e2e/ -o full-coverage.out - name: Upload code coverage information to codecov.io uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0 with: file: full-coverage.out fail_ci_if_error: false + codecov_yml_path: .codecov.yml + disable_search: true + verbose: true env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} \ No newline at end of file diff --git a/Makefile b/Makefile index 5f4ae3ded2..55b21af87c 100644 --- a/Makefile +++ b/Makefile @@ -245,7 +245,7 @@ test-e2e: install-devtools-local .PHONY: test-unit test-unit: install-devtools-local ## run unit tests mkdir -p coverage-output-unit - ${DIST_DIR}/gotestsum --junitfile=junit-unit-test.xml --format=testname -- -covermode=count -coverprofile=coverage-output-unit/coverage.out `go list ./... | grep -v ./test/cmd/metrics-plugin-sample` + ${DIST_DIR}/gotestsum --junitfile=junit-unit-test.xml --format=testname -- `go list ./... | grep -v ./test/cmd/metrics-plugin-sample` -cover -test.gocoverdir=$(CURDIR)/coverage-output-unit .PHONY: coverage diff --git a/docs/FAQ.md b/docs/FAQ.md index 861e4b3a80..caa9df475f 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -1,5 +1,7 @@ # FAQ +Be sure to read the [Best practices page](../best-practices) as well. + ## General ### Does Argo Rollouts depend on Argo CD or any other Argo project? diff --git a/docs/best-practices.md b/docs/best-practices.md index c268bb5d92..0ed32efbec 100644 --- a/docs/best-practices.md +++ b/docs/best-practices.md @@ -29,7 +29,23 @@ You should *NOT* use Argo Rollouts for preview/ephemeral environments. For that The recommended way to use Argo Rollouts is for brief deployments that take 15-20 minutes or maximum 1-2 hours. If you want to run new versions for days or weeks before deciding to promote, then Argo Rollouts is probably not the best solution for you. -Also, if you want to run a wave of multiple versions at the same time (i.e. have 1.1 and 1.2 and 1.3 running at the same time), know that Argo Rollouts was not designed for this scenario. +Keeping parallel releases for long times, complicates the deployment process a lot and opens several questions where different people have different views on how Argo Rollouts should work. + +For example let's say that you are testing for a week version 1.3 as stable and 1.4 as preview. +Then somebody deploys 1.5 + +1. Some people believe that the new state should be 1.3 stable and 1.5 as preview +1. Some people believe that the new state should be 1.4 stable and 1.5 as preview + +Currently Argo Rollouts follows the first approach, under the assumption that something was really wrong with 1.4 and 1.5 is the hotfix. + +And then let's say that 1.5 has an issue. Some people believe that Argo rollouts should "rollback" to 1.3 while other people think it should rollback to 1.4 + +Currently Argo Rollouts assumes that the version to rollback is always 1.3 regardless of how many "hotfixes" have been previewed in-between. + +All these problems are not present if you make the assumption that each release stays active only for a minimal time and you always create one new version when the previous one has finished. + +Also, if you want to run a wave of multiple versions at the same time (i.e. have 1.1 and 1.2 and 1.3 running at the same time), know that Argo Rollouts was not designed for this scenario. Argo Rollouts always works with the assumption that there is one stable/previous version and one preview/next version. A version that has just been promoted is assumed to be ready for production and has already passed all your tests (either manual or automated). @@ -41,6 +57,8 @@ While Argo Rollouts supports manual promotions and other manual pauses, these ar Ideally you should have proper metrics that tell you in 5-15 minutes if a deployment is successful or not. If you don't have those metrics, then you will miss a lot of value from Argo Rollouts. +If you are doing a deployment right now and then have an actual human looking at logs/metrics/traces for the next 2 hours, adopting Argo Rollouts is not going to help you a lot with automated deployments. + Get your [metrics](../features/analysis) in place first and test them with dry-runs before applying them to production deployments.