Skip to content

frontera-app prod:v0.0.72 deployed #5

frontera-app prod:v0.0.72 deployed

frontera-app prod:v0.0.72 deployed #5

name: Vitest Tests
on:
workflow_dispatch:
repository_dispatch:
types:
- frontera-app * deployed
- customer-os-api * deployed
concurrency:
group: ${{ github.workflow }}-global
cancel-in-progress: false
jobs:
test:
runs-on: ubicloud-standard-2
steps:
- uses: actions/checkout@v4
- name: Use Node.js
uses: ubicloud/setup-node@v4
with:
node-version: '18'
- name: Determine trigger source and versions
id: trigger
run: |
if [[ "${{ github.event_name }}" == "repository_dispatch" ]]; then
TRIGGER_SOURCE=$(echo "${{ github.event.action }}" | cut -d ' ' -f 1)
NEW_VERSION=$(echo "${{ github.event.client_payload.version }}")
OLD_VERSION=$(echo "${{ github.event.client_payload.old_version }}")
echo "trigger_source=$TRIGGER_SOURCE" >> $GITHUB_ENV
echo "new_version=$NEW_VERSION" >> $GITHUB_ENV
echo "old_version=$OLD_VERSION" >> $GITHUB_ENV
else
echo "trigger_source=manual" >> $GITHUB_ENV
fi
- name: Install dependencies
if: steps.npm-cache.outputs.cache-hit != 'true'
run: npm ci
working-directory: .
env:
VITEST_SKIP_INSTALL_CHECKS: 1
- name: Run Vitest tests
working-directory: .
env:
VITE_TEST_API_URL: ${{ secrets.PROD_VITE_TEST_API_URL }}
VITE_TEST_API_KEY: ${{ secrets.PROD_VITE_TEST_API_KEY }}
VITE_TEST_TENANT: ${{ secrets.PROD_VITE_TEST_TENANT }}
VITE_TEST_USERNAME: ${{ secrets.PROD_VITE_TEST_USERNAME }}
VITE_REALTIME_WS_API_KEY: ${{ secrets.PROD_VITE_REALTIME_WS_API_KEY }}
VITE_REALTIME_WS_PATH: ${{ secrets.PROD_VITE_TEST_DUMMY_VITE_REALTIME_WS_PATH }}
run: |
set -o pipefail # Ensure pipe status is propagated
npm run test:integration 2>&1 | tee test-output.txt
TEST_EXIT_CODE=$?
echo "Test run completed with exit code $TEST_EXIT_CODE"
exit $TEST_EXIT_CODE
- name: Parse Vitest results
working-directory: .
id: parse_results
if: always() # Run this step even if tests fail
run: |
# Ensure the test-output.txt file exists
if [ ! -f "test-output.txt" ]; then
echo "Error: test-output.txt not found."
exit 1
fi
# Look for the final test summary line
SUMMARY_LINE=$(grep -E "Tests.*\([0-9]+\)" test-output.txt | tail -n 1)
if [ -z "$SUMMARY_LINE" ]; then
echo "Error: Could not find test summary line in output."
exit 1
fi
echo "Found summary line: $SUMMARY_LINE"
# Initialize variables
FAILED_TESTS=0
PASSED_TESTS=0
# Extract the total tests (number in parentheses)
TOTAL_TESTS=$(echo "$SUMMARY_LINE" | grep -oP '\((\d+)\)' | grep -oP '\d+')
# Check if there are failed tests
if echo "$SUMMARY_LINE" | grep -q "failed"; then
FAILED_COUNT=$(echo "$SUMMARY_LINE" | grep -oP '\d+(?=\sfailed)')
FAILED_TESTS=${FAILED_COUNT:-0}
# Check if there are also passed tests
if echo "$SUMMARY_LINE" | grep -q "passed"; then
PASSED_COUNT=$(echo "$SUMMARY_LINE" | grep -oP '\d+(?=\spassed)')
PASSED_TESTS=${PASSED_COUNT:-0}
else
# All tests failed
PASSED_TESTS=0
fi
else
# Check if there are passed tests
if echo "$SUMMARY_LINE" | grep -q "passed"; then
PASSED_COUNT=$(echo "$SUMMARY_LINE" | grep -oP '\d+(?=\spassed)')
PASSED_TESTS=${PASSED_COUNT:-0}
FAILED_TESTS=0
else
echo "Error: Cannot determine pass/fail counts from summary line: $SUMMARY_LINE"
exit 1
fi
fi
# Validate the numbers
if [ -z "$TOTAL_TESTS" ]; then
echo "Error: Failed to parse total test count from summary line: $SUMMARY_LINE"
exit 1
fi
# Verify that passed + failed equals total
CALCULATED_TOTAL=$((PASSED_TESTS + FAILED_TESTS))
if [ "$CALCULATED_TOTAL" -ne "$TOTAL_TESTS" ]; then
echo "Warning: Mismatch in test counts. Passed ($PASSED_TESTS) + Failed ($FAILED_TESTS) = $CALCULATED_TOTAL, but reported total was $TOTAL_TESTS"
# Use calculated total for consistency
TOTAL_TESTS=$CALCULATED_TOTAL
fi
echo "Tests detected - Total: $TOTAL_TESTS, Passed: $PASSED_TESTS, Failed: $FAILED_TESTS"
# Output results for further use
echo "total_tests=$TOTAL_TESTS" >> $GITHUB_ENV
echo "passed_tests=$PASSED_TESTS" >> $GITHUB_ENV
echo "failed_tests=$FAILED_TESTS" >> $GITHUB_ENV
- name: Send test results to Slack
if: always()
env:
TOTAL_TESTS: ${{ env.total_tests }}
PASSED_TESTS: ${{ env.passed_tests }}
FAILED_TESTS: ${{ env.failed_tests }}
TRIGGER_SOURCE: ${{ env.trigger_source }}
NEW_VERSION: ${{ env.new_version }}
OLD_VERSION: ${{ env.old_version }}
run: |
BUILD_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
# Create trigger message with version information
TRIGGER_MSG=""
if [ "$TRIGGER_SOURCE" != "manual" ]; then
VERSION_INFO=""
if [ ! -z "$NEW_VERSION" ] && [ ! -z "$OLD_VERSION" ]; then
VERSION_INFO=" - new version: $NEW_VERSION / old version: v$OLD_VERSION"
fi
TRIGGER_MSG="\nTriggered by \`$TRIGGER_SOURCE\`$VERSION_INFO"
fi
SUMMARY="<$BUILD_URL|Vitest Test Results>:\n✅ $PASSED_TESTS passed, ❌ $FAILED_TESTS failed ($TOTAL_TESTS total)$TRIGGER_MSG"
curl --fail -X POST -H "Content-type: application/json" --data "{
\"text\": \"$SUMMARY\"
}" ${{ secrets.SLACK_FE_TESTS_WEBHOOK_URL }}