From c89704a410f76786de216b15149d67017e013000 Mon Sep 17 00:00:00 2001 From: Jozef Izso Date: Sun, 14 Dec 2025 15:11:36 +0100 Subject: [PATCH 1/3] Add integration tests for fail-on-error and fail-on-empty scenarios (#217) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add workflow and fixtures to test the behavior of fail-on-error and fail-on-empty parameters across different scenarios: - Passing tests with fail-on-error true/false - Failing tests with fail-on-error true/false - Empty results with fail-on-empty true/false Scenario 4 (failing tests + fail-on-error=false) is expected to fail until issue #217 is fixed, documenting the bug where check conclusion shows 'success' even when tests fail. The workflow outputs a GitHub Actions summary with a markdown table showing all test results. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .../workflows/integration-tests-issue-217.yml | 298 ++++++++++++++++++ .../fixtures/integration/empty-tests.xml | 5 + .../fixtures/integration/failing-tests.xml | 14 + .../fixtures/integration/passing-tests.xml | 8 + 4 files changed, 325 insertions(+) create mode 100644 .github/workflows/integration-tests-issue-217.yml create mode 100644 __tests__/fixtures/integration/empty-tests.xml create mode 100644 __tests__/fixtures/integration/failing-tests.xml create mode 100644 __tests__/fixtures/integration/passing-tests.xml diff --git a/.github/workflows/integration-tests-issue-217.yml b/.github/workflows/integration-tests-issue-217.yml new file mode 100644 index 0000000..c02af56 --- /dev/null +++ b/.github/workflows/integration-tests-issue-217.yml @@ -0,0 +1,298 @@ +name: Integration Tests (#217) - fail-on-error/fail-on-empty + +on: + workflow_dispatch: + push: + pull_request: + paths: + - 'src/**' + - 'dist/**' + - 'action.yml' + - '.github/workflows/integration-tests.yml' + - '__tests__/fixtures/integration/**' + +jobs: + # ============================================ + # Scenario 1: Passing tests, fail-on-error=true + # Expected: Step passes, conclusion=success + # ============================================ + test-passing-fail-on-error-true: + name: "Passing tests | fail-on-error=true" + runs-on: ubuntu-slim + steps: + - uses: actions/checkout@v6 + + - name: Run test reporter + id: report + uses: ./ + with: + name: 'Integration Test - Passing (fail-on-error=true)' + path: '__tests__/fixtures/integration/passing-tests.xml' + reporter: java-junit + fail-on-error: 'true' + fail-on-empty: 'true' + + - name: Validate results + run: | + echo "=== Test Results ===" + echo "Step outcome: success (would have failed otherwise)" + echo "Conclusion: ${{ steps.report.outputs.conclusion }}" + echo "Passed: ${{ steps.report.outputs.passed }}" + echo "Failed: ${{ steps.report.outputs.failed }}" + + if [ "${{ steps.report.outputs.conclusion }}" != "success" ]; then + echo "FAIL: Expected conclusion 'success' but got '${{ steps.report.outputs.conclusion }}'" + exit 1 + fi + echo "PASS: All validations passed" + + # ============================================ + # Scenario 2: Passing tests, fail-on-error=false + # Expected: Step passes, conclusion=success + # ============================================ + test-passing-fail-on-error-false: + name: "Passing tests | fail-on-error=false" + runs-on: ubuntu-slim + steps: + - uses: actions/checkout@v6 + + - name: Run test reporter + id: report + uses: ./ + with: + name: 'Integration Test - Passing (fail-on-error=false)' + path: '__tests__/fixtures/integration/passing-tests.xml' + reporter: java-junit + fail-on-error: 'false' + fail-on-empty: 'true' + + - name: Validate results + run: | + echo "=== Test Results ===" + echo "Conclusion: ${{ steps.report.outputs.conclusion }}" + + if [ "${{ steps.report.outputs.conclusion }}" != "success" ]; then + echo "FAIL: Expected conclusion 'success' but got '${{ steps.report.outputs.conclusion }}'" + exit 1 + fi + echo "PASS: All validations passed" + + # ============================================ + # Scenario 3: Failing tests, fail-on-error=true + # Expected: Step FAILS, conclusion=failure + # ============================================ + test-failing-fail-on-error-true: + name: "Failing tests | fail-on-error=true" + runs-on: ubuntu-slim + steps: + - uses: actions/checkout@v6 + + - name: Run test reporter + id: report + continue-on-error: true + uses: ./ + with: + name: 'Integration Test - Failing (fail-on-error=true)' + path: '__tests__/fixtures/integration/failing-tests.xml' + reporter: java-junit + fail-on-error: 'true' + fail-on-empty: 'true' + + - name: Validate results + run: | + echo "=== Test Results ===" + echo "Step outcome: ${{ steps.report.outcome }}" + echo "Conclusion: ${{ steps.report.outputs.conclusion }}" + echo "Failed count: ${{ steps.report.outputs.failed }}" + + # Step should fail + if [ "${{ steps.report.outcome }}" != "failure" ]; then + echo "FAIL: Expected step to fail but got '${{ steps.report.outcome }}'" + exit 1 + fi + + # Conclusion should be failure + if [ "${{ steps.report.outputs.conclusion }}" != "failure" ]; then + echo "FAIL: Expected conclusion 'failure' but got '${{ steps.report.outputs.conclusion }}'" + exit 1 + fi + echo "PASS: All validations passed" + + # ============================================ + # Scenario 4: Failing tests, fail-on-error=false + # Expected: Step passes, conclusion=failure + # BUG: Currently conclusion=success (issue #217) + # ============================================ + test-failing-fail-on-error-false: + name: "Failing tests | fail-on-error=false [BUG #217]" + runs-on: ubuntu-slim + steps: + - uses: actions/checkout@v6 + + - name: Run test reporter + id: report + continue-on-error: true + uses: ./ + with: + name: 'Integration Test - Failing (fail-on-error=false)' + path: '__tests__/fixtures/integration/failing-tests.xml' + reporter: java-junit + fail-on-error: 'false' + fail-on-empty: 'true' + + - name: Validate results + run: | + echo "=== Test Results ===" + echo "Step outcome: ${{ steps.report.outcome }}" + echo "Conclusion: ${{ steps.report.outputs.conclusion }}" + echo "Failed count: ${{ steps.report.outputs.failed }}" + + # Step should pass (fail-on-error is false) + if [ "${{ steps.report.outcome }}" != "success" ]; then + echo "FAIL: Expected step to pass but got '${{ steps.report.outcome }}'" + exit 1 + fi + + # Conclusion SHOULD be 'failure' because tests failed + # BUG: Currently returns 'success' - see issue #217 + if [ "${{ steps.report.outputs.conclusion }}" != "failure" ]; then + echo "========================================" + echo "BUG DETECTED (Issue #217)" + echo "========================================" + echo "Expected conclusion 'failure' but got '${{ steps.report.outputs.conclusion }}'" + echo "The check conclusion should reflect test results," + echo "independent of the fail-on-error setting." + echo "========================================" + exit 1 + fi + echo "PASS: All validations passed (bug is fixed!)" + + # ============================================ + # Scenario 5: Empty results, fail-on-empty=true + # Expected: Step FAILS + # ============================================ + test-empty-fail-on-empty-true: + name: "Empty results | fail-on-empty=true" + runs-on: ubuntu-slim + steps: + - uses: actions/checkout@v6 + + - name: Run test reporter + id: report + continue-on-error: true + uses: ./ + with: + name: 'Integration Test - Empty (fail-on-empty=true)' + path: '__tests__/fixtures/integration/nonexistent-*.xml' + reporter: java-junit + fail-on-error: 'true' + fail-on-empty: 'true' + + - name: Validate results + run: | + echo "=== Test Results ===" + echo "Step outcome: ${{ steps.report.outcome }}" + + # Step should fail (no files found) + if [ "${{ steps.report.outcome }}" != "failure" ]; then + echo "FAIL: Expected step to fail but got '${{ steps.report.outcome }}'" + exit 1 + fi + echo "PASS: Step correctly failed on empty results" + + # ============================================ + # Scenario 6: Empty results, fail-on-empty=false + # Expected: Step passes, conclusion=success + # ============================================ + test-empty-fail-on-empty-false: + name: "Empty results | fail-on-empty=false" + runs-on: ubuntu-slim + steps: + - uses: actions/checkout@v6 + + - name: Run test reporter + id: report + continue-on-error: true + uses: ./ + with: + name: 'Integration Test - Empty (fail-on-empty=false)' + path: '__tests__/fixtures/integration/nonexistent-*.xml' + reporter: java-junit + fail-on-error: 'true' + fail-on-empty: 'false' + + - name: Validate results + run: | + echo "=== Test Results ===" + echo "Step outcome: ${{ steps.report.outcome }}" + + # Step should pass (fail-on-empty is false) + if [ "${{ steps.report.outcome }}" != "success" ]; then + echo "FAIL: Expected step to pass but got '${{ steps.report.outcome }}'" + exit 1 + fi + echo "PASS: Step correctly passed with empty results" + + # ============================================ + # Summary job to report overall status + # ============================================ + summary: + name: "Test Summary" + needs: + - test-passing-fail-on-error-true + - test-passing-fail-on-error-false + - test-failing-fail-on-error-true + - test-failing-fail-on-error-false + - test-empty-fail-on-empty-true + - test-empty-fail-on-empty-false + runs-on: ubuntu-slim + if: always() + steps: + - name: Generate summary + run: | + # Helper function to convert result to emoji + result_to_emoji() { + case "$1" in + success) echo "✅ Pass" ;; + failure) echo "❌ Fail" ;; + cancelled) echo "⚪ Cancelled" ;; + skipped) echo "⏭️ Skipped" ;; + *) echo "❓ Unknown" ;; + esac + } + + # Generate markdown summary + cat >> $GITHUB_STEP_SUMMARY << 'EOF' + # Integration Test Results + + ## fail-on-error / fail-on-empty Scenarios + + | Scenario | Test Results | fail-on-error | fail-on-empty | Expected | Result | + |----------|--------------|---------------|---------------|----------|--------| + EOF + + echo "| 1 | All pass | \`true\` | \`true\` | Step: pass, Check: success | $(result_to_emoji "${{ needs.test-passing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 2 | All pass | \`false\` | \`true\` | Step: pass, Check: success | $(result_to_emoji "${{ needs.test-passing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 3 | Some fail | \`true\` | \`true\` | Step: fail, Check: failure | $(result_to_emoji "${{ needs.test-failing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 4 ⚠️ | Some fail | \`false\` | \`true\` | Step: pass, Check: failure | $(result_to_emoji "${{ needs.test-failing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 5 | Empty | \`true\` | \`true\` | Step: fail | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-true.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 6 | Empty | \`true\` | \`false\` | Step: pass | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-false.result }}") |" >> $GITHUB_STEP_SUMMARY + + cat >> $GITHUB_STEP_SUMMARY << 'EOF' + + --- + + > ⚠️ **Scenario 4** is expected to fail until [issue #217](https://github.com/dorny/test-reporter/issues/217) is fixed. + > The bug: when `fail-on-error=false`, the check conclusion shows `success` even when tests fail. + > Expected behavior: check conclusion should reflect actual test results, independent of `fail-on-error`. + + EOF + + # Also print to console + echo "=== Integration Test Summary ===" + echo "Scenario 1 (pass, fail-on-error=true): ${{ needs.test-passing-fail-on-error-true.result }}" + echo "Scenario 2 (pass, fail-on-error=false): ${{ needs.test-passing-fail-on-error-false.result }}" + echo "Scenario 3 (fail, fail-on-error=true): ${{ needs.test-failing-fail-on-error-true.result }}" + echo "Scenario 4 (fail, fail-on-error=false): ${{ needs.test-failing-fail-on-error-false.result }} (expected to fail until #217 fixed)" + echo "Scenario 5 (empty, fail-on-error=true): ${{ needs.test-empty-fail-on-empty-true.result }}" + echo "Scenario 6 (empty, fail-on-error=false): ${{ needs.test-empty-fail-on-empty-false.result }}" diff --git a/__tests__/fixtures/integration/empty-tests.xml b/__tests__/fixtures/integration/empty-tests.xml new file mode 100644 index 0000000..7384052 --- /dev/null +++ b/__tests__/fixtures/integration/empty-tests.xml @@ -0,0 +1,5 @@ + + + + + diff --git a/__tests__/fixtures/integration/failing-tests.xml b/__tests__/fixtures/integration/failing-tests.xml new file mode 100644 index 0000000..7d4a3bf --- /dev/null +++ b/__tests__/fixtures/integration/failing-tests.xml @@ -0,0 +1,14 @@ + + + + + + + Expected: true + Received: false + at Object.test (/test/example.test.js:10:5) + + + + + diff --git a/__tests__/fixtures/integration/passing-tests.xml b/__tests__/fixtures/integration/passing-tests.xml new file mode 100644 index 0000000..f537f80 --- /dev/null +++ b/__tests__/fixtures/integration/passing-tests.xml @@ -0,0 +1,8 @@ + + + + + + + + From 3b5ad0231b156603f1810009e34b86087f2ed759 Mon Sep 17 00:00:00 2001 From: Jozef Izso Date: Sun, 14 Dec 2025 12:46:07 +0100 Subject: [PATCH 2/3] Update scenario 4 to be a regression test for issue #217 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bug has been fixed - conclusion output now correctly reflects test failures independent of fail-on-error setting. Updated comments and summary to indicate this is now a regression test. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .../workflows/integration-tests-issue-217.yml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/integration-tests-issue-217.yml b/.github/workflows/integration-tests-issue-217.yml index c02af56..17f7ae4 100644 --- a/.github/workflows/integration-tests-issue-217.yml +++ b/.github/workflows/integration-tests-issue-217.yml @@ -121,10 +121,10 @@ jobs: # ============================================ # Scenario 4: Failing tests, fail-on-error=false # Expected: Step passes, conclusion=failure - # BUG: Currently conclusion=success (issue #217) + # Regression test for issue #217 # ============================================ test-failing-fail-on-error-false: - name: "Failing tests | fail-on-error=false [BUG #217]" + name: "Failing tests | fail-on-error=false [#217]" runs-on: ubuntu-slim steps: - uses: actions/checkout@v6 @@ -154,10 +154,10 @@ jobs: fi # Conclusion SHOULD be 'failure' because tests failed - # BUG: Currently returns 'success' - see issue #217 + # Regression test for issue #217 if [ "${{ steps.report.outputs.conclusion }}" != "failure" ]; then echo "========================================" - echo "BUG DETECTED (Issue #217)" + echo "REGRESSION DETECTED (Issue #217)" echo "========================================" echo "Expected conclusion 'failure' but got '${{ steps.report.outputs.conclusion }}'" echo "The check conclusion should reflect test results," @@ -165,7 +165,7 @@ jobs: echo "========================================" exit 1 fi - echo "PASS: All validations passed (bug is fixed!)" + echo "PASS: All validations passed" # ============================================ # Scenario 5: Empty results, fail-on-empty=true @@ -274,7 +274,7 @@ jobs: echo "| 1 | All pass | \`true\` | \`true\` | Step: pass, Check: success | $(result_to_emoji "${{ needs.test-passing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY echo "| 2 | All pass | \`false\` | \`true\` | Step: pass, Check: success | $(result_to_emoji "${{ needs.test-passing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY echo "| 3 | Some fail | \`true\` | \`true\` | Step: fail, Check: failure | $(result_to_emoji "${{ needs.test-failing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY - echo "| 4 ⚠️ | Some fail | \`false\` | \`true\` | Step: pass, Check: failure | $(result_to_emoji "${{ needs.test-failing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 4 | Some fail | \`false\` | \`true\` | Step: pass, Check: failure | $(result_to_emoji "${{ needs.test-failing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY echo "| 5 | Empty | \`true\` | \`true\` | Step: fail | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-true.result }}") |" >> $GITHUB_STEP_SUMMARY echo "| 6 | Empty | \`true\` | \`false\` | Step: pass | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-false.result }}") |" >> $GITHUB_STEP_SUMMARY @@ -282,9 +282,9 @@ jobs: --- - > ⚠️ **Scenario 4** is expected to fail until [issue #217](https://github.com/dorny/test-reporter/issues/217) is fixed. - > The bug: when `fail-on-error=false`, the check conclusion shows `success` even when tests fail. - > Expected behavior: check conclusion should reflect actual test results, independent of `fail-on-error`. + > **Scenario 4** is a regression test for [issue #217](https://github.com/dorny/test-reporter/issues/217). + > It verifies that `conclusion` output correctly reflects test failures, independent of `fail-on-error` setting. + > When `fail-on-error=false`, the step should pass but `conclusion` should still be `failure` if tests failed. EOF @@ -293,6 +293,6 @@ jobs: echo "Scenario 1 (pass, fail-on-error=true): ${{ needs.test-passing-fail-on-error-true.result }}" echo "Scenario 2 (pass, fail-on-error=false): ${{ needs.test-passing-fail-on-error-false.result }}" echo "Scenario 3 (fail, fail-on-error=true): ${{ needs.test-failing-fail-on-error-true.result }}" - echo "Scenario 4 (fail, fail-on-error=false): ${{ needs.test-failing-fail-on-error-false.result }} (expected to fail until #217 fixed)" - echo "Scenario 5 (empty, fail-on-error=true): ${{ needs.test-empty-fail-on-empty-true.result }}" - echo "Scenario 6 (empty, fail-on-error=false): ${{ needs.test-empty-fail-on-empty-false.result }}" + echo "Scenario 4 (fail, fail-on-error=false): ${{ needs.test-failing-fail-on-error-false.result }} (regression test for #217)" + echo "Scenario 5 (empty, fail-on-empty=true): ${{ needs.test-empty-fail-on-empty-true.result }}" + echo "Scenario 6 (empty, fail-on-empty=false): ${{ needs.test-empty-fail-on-empty-false.result }}" From 12c7abe9ab003a99726d9da7f5b4597f9f8ad86d Mon Sep 17 00:00:00 2001 From: Jozef Izso Date: Sun, 14 Dec 2025 13:08:12 +0100 Subject: [PATCH 3/3] Add conclusion output column to integration test summary table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Added job outputs to expose conclusion from each test scenario - Added new "Conclusion" column to summary table with colored badges - Shows actual conclusion output (🟢 success / 🔴 failure / ⚫ N/A) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .../workflows/integration-tests-issue-217.yml | 38 +++++++++++++++---- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/.github/workflows/integration-tests-issue-217.yml b/.github/workflows/integration-tests-issue-217.yml index 17f7ae4..3ed5095 100644 --- a/.github/workflows/integration-tests-issue-217.yml +++ b/.github/workflows/integration-tests-issue-217.yml @@ -19,6 +19,8 @@ jobs: test-passing-fail-on-error-true: name: "Passing tests | fail-on-error=true" runs-on: ubuntu-slim + outputs: + conclusion: ${{ steps.report.outputs.conclusion }} steps: - uses: actions/checkout@v6 @@ -53,6 +55,8 @@ jobs: test-passing-fail-on-error-false: name: "Passing tests | fail-on-error=false" runs-on: ubuntu-slim + outputs: + conclusion: ${{ steps.report.outputs.conclusion }} steps: - uses: actions/checkout@v6 @@ -84,6 +88,8 @@ jobs: test-failing-fail-on-error-true: name: "Failing tests | fail-on-error=true" runs-on: ubuntu-slim + outputs: + conclusion: ${{ steps.report.outputs.conclusion }} steps: - uses: actions/checkout@v6 @@ -126,6 +132,8 @@ jobs: test-failing-fail-on-error-false: name: "Failing tests | fail-on-error=false [#217]" runs-on: ubuntu-slim + outputs: + conclusion: ${{ steps.report.outputs.conclusion }} steps: - uses: actions/checkout@v6 @@ -174,6 +182,8 @@ jobs: test-empty-fail-on-empty-true: name: "Empty results | fail-on-empty=true" runs-on: ubuntu-slim + outputs: + conclusion: ${{ steps.report.outputs.conclusion || 'N/A' }} steps: - uses: actions/checkout@v6 @@ -207,6 +217,8 @@ jobs: test-empty-fail-on-empty-false: name: "Empty results | fail-on-empty=false" runs-on: ubuntu-slim + outputs: + conclusion: ${{ steps.report.outputs.conclusion || 'N/A' }} steps: - uses: actions/checkout@v6 @@ -261,22 +273,32 @@ jobs: esac } + # Helper function to format conclusion + conclusion_to_badge() { + case "$1" in + success) echo "🟢 success" ;; + failure) echo "🔴 failure" ;; + N/A) echo "⚫ N/A" ;; + *) echo "⚪ $1" ;; + esac + } + # Generate markdown summary cat >> $GITHUB_STEP_SUMMARY << 'EOF' # Integration Test Results ## fail-on-error / fail-on-empty Scenarios - | Scenario | Test Results | fail-on-error | fail-on-empty | Expected | Result | - |----------|--------------|---------------|---------------|----------|--------| + | Scenario | Test Results | fail-on-error | fail-on-empty | Expected | Conclusion | Result | + |----------|--------------|---------------|---------------|----------|------------|--------| EOF - echo "| 1 | All pass | \`true\` | \`true\` | Step: pass, Check: success | $(result_to_emoji "${{ needs.test-passing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY - echo "| 2 | All pass | \`false\` | \`true\` | Step: pass, Check: success | $(result_to_emoji "${{ needs.test-passing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY - echo "| 3 | Some fail | \`true\` | \`true\` | Step: fail, Check: failure | $(result_to_emoji "${{ needs.test-failing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY - echo "| 4 | Some fail | \`false\` | \`true\` | Step: pass, Check: failure | $(result_to_emoji "${{ needs.test-failing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY - echo "| 5 | Empty | \`true\` | \`true\` | Step: fail | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-true.result }}") |" >> $GITHUB_STEP_SUMMARY - echo "| 6 | Empty | \`true\` | \`false\` | Step: pass | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-false.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 1 | All pass | \`true\` | \`true\` | Step: pass, Check: success | $(conclusion_to_badge "${{ needs.test-passing-fail-on-error-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-passing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 2 | All pass | \`false\` | \`true\` | Step: pass, Check: success | $(conclusion_to_badge "${{ needs.test-passing-fail-on-error-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-passing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 3 | Some fail | \`true\` | \`true\` | Step: fail, Check: failure | $(conclusion_to_badge "${{ needs.test-failing-fail-on-error-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-failing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 4 | Some fail | \`false\` | \`true\` | Step: pass, Check: failure | $(conclusion_to_badge "${{ needs.test-failing-fail-on-error-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-failing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 5 | Empty | \`true\` | \`true\` | Step: fail | $(conclusion_to_badge "${{ needs.test-empty-fail-on-empty-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-true.result }}") |" >> $GITHUB_STEP_SUMMARY + echo "| 6 | Empty | \`true\` | \`false\` | Step: pass | $(conclusion_to_badge "${{ needs.test-empty-fail-on-empty-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-false.result }}") |" >> $GITHUB_STEP_SUMMARY cat >> $GITHUB_STEP_SUMMARY << 'EOF'