diff --git a/.github/workflows/integration-tests-issue-217.yml b/.github/workflows/integration-tests-issue-217.yml
new file mode 100644
index 0000000..3ed5095
--- /dev/null
+++ b/.github/workflows/integration-tests-issue-217.yml
@@ -0,0 +1,320 @@
+name: Integration Tests (#217) - fail-on-error/fail-on-empty
+
+on:
+ workflow_dispatch:
+ push:
+ pull_request:
+ paths:
+ - 'src/**'
+ - 'dist/**'
+ - 'action.yml'
+ - '.github/workflows/integration-tests.yml'
+ - '__tests__/fixtures/integration/**'
+
+jobs:
+ # ============================================
+ # Scenario 1: Passing tests, fail-on-error=true
+ # Expected: Step passes, conclusion=success
+ # ============================================
+ test-passing-fail-on-error-true:
+ name: "Passing tests | fail-on-error=true"
+ runs-on: ubuntu-slim
+ outputs:
+ conclusion: ${{ steps.report.outputs.conclusion }}
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Run test reporter
+ id: report
+ uses: ./
+ with:
+ name: 'Integration Test - Passing (fail-on-error=true)'
+ path: '__tests__/fixtures/integration/passing-tests.xml'
+ reporter: java-junit
+ fail-on-error: 'true'
+ fail-on-empty: 'true'
+
+ - name: Validate results
+ run: |
+ echo "=== Test Results ==="
+ echo "Step outcome: success (would have failed otherwise)"
+ echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
+ echo "Passed: ${{ steps.report.outputs.passed }}"
+ echo "Failed: ${{ steps.report.outputs.failed }}"
+
+ if [ "${{ steps.report.outputs.conclusion }}" != "success" ]; then
+ echo "FAIL: Expected conclusion 'success' but got '${{ steps.report.outputs.conclusion }}'"
+ exit 1
+ fi
+ echo "PASS: All validations passed"
+
+ # ============================================
+ # Scenario 2: Passing tests, fail-on-error=false
+ # Expected: Step passes, conclusion=success
+ # ============================================
+ test-passing-fail-on-error-false:
+ name: "Passing tests | fail-on-error=false"
+ runs-on: ubuntu-slim
+ outputs:
+ conclusion: ${{ steps.report.outputs.conclusion }}
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Run test reporter
+ id: report
+ uses: ./
+ with:
+ name: 'Integration Test - Passing (fail-on-error=false)'
+ path: '__tests__/fixtures/integration/passing-tests.xml'
+ reporter: java-junit
+ fail-on-error: 'false'
+ fail-on-empty: 'true'
+
+ - name: Validate results
+ run: |
+ echo "=== Test Results ==="
+ echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
+
+ if [ "${{ steps.report.outputs.conclusion }}" != "success" ]; then
+ echo "FAIL: Expected conclusion 'success' but got '${{ steps.report.outputs.conclusion }}'"
+ exit 1
+ fi
+ echo "PASS: All validations passed"
+
+ # ============================================
+ # Scenario 3: Failing tests, fail-on-error=true
+ # Expected: Step FAILS, conclusion=failure
+ # ============================================
+ test-failing-fail-on-error-true:
+ name: "Failing tests | fail-on-error=true"
+ runs-on: ubuntu-slim
+ outputs:
+ conclusion: ${{ steps.report.outputs.conclusion }}
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Run test reporter
+ id: report
+ continue-on-error: true
+ uses: ./
+ with:
+ name: 'Integration Test - Failing (fail-on-error=true)'
+ path: '__tests__/fixtures/integration/failing-tests.xml'
+ reporter: java-junit
+ fail-on-error: 'true'
+ fail-on-empty: 'true'
+
+ - name: Validate results
+ run: |
+ echo "=== Test Results ==="
+ echo "Step outcome: ${{ steps.report.outcome }}"
+ echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
+ echo "Failed count: ${{ steps.report.outputs.failed }}"
+
+ # Step should fail
+ if [ "${{ steps.report.outcome }}" != "failure" ]; then
+ echo "FAIL: Expected step to fail but got '${{ steps.report.outcome }}'"
+ exit 1
+ fi
+
+ # Conclusion should be failure
+ if [ "${{ steps.report.outputs.conclusion }}" != "failure" ]; then
+ echo "FAIL: Expected conclusion 'failure' but got '${{ steps.report.outputs.conclusion }}'"
+ exit 1
+ fi
+ echo "PASS: All validations passed"
+
+ # ============================================
+ # Scenario 4: Failing tests, fail-on-error=false
+ # Expected: Step passes, conclusion=failure
+ # Regression test for issue #217
+ # ============================================
+ test-failing-fail-on-error-false:
+ name: "Failing tests | fail-on-error=false [#217]"
+ runs-on: ubuntu-slim
+ outputs:
+ conclusion: ${{ steps.report.outputs.conclusion }}
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Run test reporter
+ id: report
+ continue-on-error: true
+ uses: ./
+ with:
+ name: 'Integration Test - Failing (fail-on-error=false)'
+ path: '__tests__/fixtures/integration/failing-tests.xml'
+ reporter: java-junit
+ fail-on-error: 'false'
+ fail-on-empty: 'true'
+
+ - name: Validate results
+ run: |
+ echo "=== Test Results ==="
+ echo "Step outcome: ${{ steps.report.outcome }}"
+ echo "Conclusion: ${{ steps.report.outputs.conclusion }}"
+ echo "Failed count: ${{ steps.report.outputs.failed }}"
+
+ # Step should pass (fail-on-error is false)
+ if [ "${{ steps.report.outcome }}" != "success" ]; then
+ echo "FAIL: Expected step to pass but got '${{ steps.report.outcome }}'"
+ exit 1
+ fi
+
+ # Conclusion SHOULD be 'failure' because tests failed
+ # Regression test for issue #217
+ if [ "${{ steps.report.outputs.conclusion }}" != "failure" ]; then
+ echo "========================================"
+ echo "REGRESSION DETECTED (Issue #217)"
+ echo "========================================"
+ echo "Expected conclusion 'failure' but got '${{ steps.report.outputs.conclusion }}'"
+ echo "The check conclusion should reflect test results,"
+ echo "independent of the fail-on-error setting."
+ echo "========================================"
+ exit 1
+ fi
+ echo "PASS: All validations passed"
+
+ # ============================================
+ # Scenario 5: Empty results, fail-on-empty=true
+ # Expected: Step FAILS
+ # ============================================
+ test-empty-fail-on-empty-true:
+ name: "Empty results | fail-on-empty=true"
+ runs-on: ubuntu-slim
+ outputs:
+ conclusion: ${{ steps.report.outputs.conclusion || 'N/A' }}
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Run test reporter
+ id: report
+ continue-on-error: true
+ uses: ./
+ with:
+ name: 'Integration Test - Empty (fail-on-empty=true)'
+ path: '__tests__/fixtures/integration/nonexistent-*.xml'
+ reporter: java-junit
+ fail-on-error: 'true'
+ fail-on-empty: 'true'
+
+ - name: Validate results
+ run: |
+ echo "=== Test Results ==="
+ echo "Step outcome: ${{ steps.report.outcome }}"
+
+ # Step should fail (no files found)
+ if [ "${{ steps.report.outcome }}" != "failure" ]; then
+ echo "FAIL: Expected step to fail but got '${{ steps.report.outcome }}'"
+ exit 1
+ fi
+ echo "PASS: Step correctly failed on empty results"
+
+ # ============================================
+ # Scenario 6: Empty results, fail-on-empty=false
+ # Expected: Step passes, conclusion=success
+ # ============================================
+ test-empty-fail-on-empty-false:
+ name: "Empty results | fail-on-empty=false"
+ runs-on: ubuntu-slim
+ outputs:
+ conclusion: ${{ steps.report.outputs.conclusion || 'N/A' }}
+ steps:
+ - uses: actions/checkout@v6
+
+ - name: Run test reporter
+ id: report
+ continue-on-error: true
+ uses: ./
+ with:
+ name: 'Integration Test - Empty (fail-on-empty=false)'
+ path: '__tests__/fixtures/integration/nonexistent-*.xml'
+ reporter: java-junit
+ fail-on-error: 'true'
+ fail-on-empty: 'false'
+
+ - name: Validate results
+ run: |
+ echo "=== Test Results ==="
+ echo "Step outcome: ${{ steps.report.outcome }}"
+
+ # Step should pass (fail-on-empty is false)
+ if [ "${{ steps.report.outcome }}" != "success" ]; then
+ echo "FAIL: Expected step to pass but got '${{ steps.report.outcome }}'"
+ exit 1
+ fi
+ echo "PASS: Step correctly passed with empty results"
+
+ # ============================================
+ # Summary job to report overall status
+ # ============================================
+ summary:
+ name: "Test Summary"
+ needs:
+ - test-passing-fail-on-error-true
+ - test-passing-fail-on-error-false
+ - test-failing-fail-on-error-true
+ - test-failing-fail-on-error-false
+ - test-empty-fail-on-empty-true
+ - test-empty-fail-on-empty-false
+ runs-on: ubuntu-slim
+ if: always()
+ steps:
+ - name: Generate summary
+ run: |
+ # Helper function to convert result to emoji
+ result_to_emoji() {
+ case "$1" in
+ success) echo "✅ Pass" ;;
+ failure) echo "❌ Fail" ;;
+ cancelled) echo "⚪ Cancelled" ;;
+ skipped) echo "⏭️ Skipped" ;;
+ *) echo "❓ Unknown" ;;
+ esac
+ }
+
+ # Helper function to format conclusion
+ conclusion_to_badge() {
+ case "$1" in
+ success) echo "🟢 success" ;;
+ failure) echo "🔴 failure" ;;
+ N/A) echo "⚫ N/A" ;;
+ *) echo "⚪ $1" ;;
+ esac
+ }
+
+ # Generate markdown summary
+ cat >> $GITHUB_STEP_SUMMARY << 'EOF'
+ # Integration Test Results
+
+ ## fail-on-error / fail-on-empty Scenarios
+
+ | Scenario | Test Results | fail-on-error | fail-on-empty | Expected | Conclusion | Result |
+ |----------|--------------|---------------|---------------|----------|------------|--------|
+ EOF
+
+ echo "| 1 | All pass | \`true\` | \`true\` | Step: pass, Check: success | $(conclusion_to_badge "${{ needs.test-passing-fail-on-error-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-passing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY
+ echo "| 2 | All pass | \`false\` | \`true\` | Step: pass, Check: success | $(conclusion_to_badge "${{ needs.test-passing-fail-on-error-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-passing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY
+ echo "| 3 | Some fail | \`true\` | \`true\` | Step: fail, Check: failure | $(conclusion_to_badge "${{ needs.test-failing-fail-on-error-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-failing-fail-on-error-true.result }}") |" >> $GITHUB_STEP_SUMMARY
+ echo "| 4 | Some fail | \`false\` | \`true\` | Step: pass, Check: failure | $(conclusion_to_badge "${{ needs.test-failing-fail-on-error-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-failing-fail-on-error-false.result }}") |" >> $GITHUB_STEP_SUMMARY
+ echo "| 5 | Empty | \`true\` | \`true\` | Step: fail | $(conclusion_to_badge "${{ needs.test-empty-fail-on-empty-true.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-true.result }}") |" >> $GITHUB_STEP_SUMMARY
+ echo "| 6 | Empty | \`true\` | \`false\` | Step: pass | $(conclusion_to_badge "${{ needs.test-empty-fail-on-empty-false.outputs.conclusion }}") | $(result_to_emoji "${{ needs.test-empty-fail-on-empty-false.result }}") |" >> $GITHUB_STEP_SUMMARY
+
+ cat >> $GITHUB_STEP_SUMMARY << 'EOF'
+
+ ---
+
+ > **Scenario 4** is a regression test for [issue #217](https://github.com/dorny/test-reporter/issues/217).
+ > It verifies that `conclusion` output correctly reflects test failures, independent of `fail-on-error` setting.
+ > When `fail-on-error=false`, the step should pass but `conclusion` should still be `failure` if tests failed.
+
+ EOF
+
+ # Also print to console
+ echo "=== Integration Test Summary ==="
+ echo "Scenario 1 (pass, fail-on-error=true): ${{ needs.test-passing-fail-on-error-true.result }}"
+ echo "Scenario 2 (pass, fail-on-error=false): ${{ needs.test-passing-fail-on-error-false.result }}"
+ echo "Scenario 3 (fail, fail-on-error=true): ${{ needs.test-failing-fail-on-error-true.result }}"
+ echo "Scenario 4 (fail, fail-on-error=false): ${{ needs.test-failing-fail-on-error-false.result }} (regression test for #217)"
+ echo "Scenario 5 (empty, fail-on-empty=true): ${{ needs.test-empty-fail-on-empty-true.result }}"
+ echo "Scenario 6 (empty, fail-on-empty=false): ${{ needs.test-empty-fail-on-empty-false.result }}"
diff --git a/__tests__/fixtures/integration/empty-tests.xml b/__tests__/fixtures/integration/empty-tests.xml
new file mode 100644
index 0000000..7384052
--- /dev/null
+++ b/__tests__/fixtures/integration/empty-tests.xml
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/__tests__/fixtures/integration/failing-tests.xml b/__tests__/fixtures/integration/failing-tests.xml
new file mode 100644
index 0000000..7d4a3bf
--- /dev/null
+++ b/__tests__/fixtures/integration/failing-tests.xml
@@ -0,0 +1,14 @@
+
+
+
+
+
+
+ Expected: true
+ Received: false
+ at Object.test (/test/example.test.js:10:5)
+
+
+
+
+
diff --git a/__tests__/fixtures/integration/passing-tests.xml b/__tests__/fixtures/integration/passing-tests.xml
new file mode 100644
index 0000000..f537f80
--- /dev/null
+++ b/__tests__/fixtures/integration/passing-tests.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+