1
0
Fork 0
mirror of https://github.com/dorny/test-reporter.git synced 2026-02-04 05:27:55 +01:00

Add new parameter list-files for when there are multiple files in a report

This commit is contained in:
Felix Chapman 2025-12-23 18:25:29 +00:00
parent ee446707ff
commit 20f3d370e2
5 changed files with 180 additions and 6 deletions

View file

@ -182,6 +182,12 @@ jobs:
# none # none
list-tests: 'all' list-tests: 'all'
# Limits which test result files are listed:
# all
# failed
# none
list-files: 'all'
# Limits number of created annotations with error message and stack trace captured during test execution. # Limits number of created annotations with error message and stack trace captured during test execution.
# Must be less or equal to 50. # Must be less or equal to 50.
max-annotations: '10' max-annotations: '10'
@ -413,7 +419,7 @@ Support for Swift test results in xUnit format is experimental - should work but
Unfortunately, there are some known issues and limitations caused by GitHub API: Unfortunately, there are some known issues and limitations caused by GitHub API:
- Test report (i.e. build summary) is Markdown text. No custom styling or HTML is possible. - Test report (i.e. build summary) is Markdown text. No custom styling or HTML is possible.
- Maximum report size is 65535 bytes. Input parameters `list-suites` and `list-tests` will be automatically adjusted if max size is exceeded. - Maximum report size is 65535 bytes. Input parameters `list-suites`, `list-tests`, and `list-files` will be automatically adjusted if max size is exceeded.
- Test report can't reference any additional files (e.g. screenshots). You can use `actions/upload-artifact@v4` to upload them and inspect them manually. - Test report can't reference any additional files (e.g. screenshots). You can use `actions/upload-artifact@v4` to upload them and inspect them manually.
- Check Runs are created for specific commit SHA. It's not possible to specify under which workflow test report should belong if more - Check Runs are created for specific commit SHA. It's not possible to specify under which workflow test report should belong if more
workflows are running for the same SHA. Thanks to this GitHub "feature" it's possible your test report will appear in an unexpected place in GitHub UI. workflows are running for the same SHA. Thanks to this GitHub "feature" it's possible your test report will appear in an unexpected place in GitHub UI.

View file

@ -1,4 +1,5 @@
import {getBadge, DEFAULT_OPTIONS, ReportOptions} from '../../src/report/get-report' import {DEFAULT_OPTIONS, getBadge, getReport, ReportOptions} from '../../src/report/get-report'
import {TestCaseResult, TestGroupResult, TestRunResult, TestSuiteResult} from '../../src/test-results'
describe('getBadge', () => { describe('getBadge', () => {
describe('URI encoding with special characters', () => { describe('URI encoding with special characters', () => {
@ -118,3 +119,144 @@ describe('getBadge', () => {
}) })
}) })
describe('getReport', () => {
// Helper function to create test results
function createTestResult(path: string, passed: number, failed: number, skipped: number): TestRunResult {
const tests: TestCaseResult[] = []
for (let i = 0; i < passed; i++) {
tests.push(new TestCaseResult(`passed-test-${i}`, 'success', 100))
}
for (let i = 0; i < failed; i++) {
tests.push(new TestCaseResult(`failed-test-${i}`, 'failed', 100, {
details: 'Test failed',
message: 'Assertion error'
}))
}
for (let i = 0; i < skipped; i++) {
tests.push(new TestCaseResult(`skipped-test-${i}`, 'skipped', 0))
}
const group = new TestGroupResult('test-group', tests)
const suite = new TestSuiteResult('test-suite', [group])
return new TestRunResult(path, [suite])
}
describe('list-files parameter', () => {
const results = [
createTestResult('passing-file.spec.ts', 5, 0, 0),
createTestResult('failing-file.spec.ts', 3, 2, 1),
createTestResult('passing-with-skipped-file.spec.ts', 8, 0, 2)
]
it('shows all files when list-files is "all"', () => {
const report = getReport(results, {
...DEFAULT_OPTIONS,
listFiles: 'all',
listSuites: 'none',
listTests: 'none'
})
expect(report).toContain('passing-file.spec.ts')
expect(report).toContain('failing-file.spec.ts')
expect(report).toContain('passing-with-skipped-file.spec.ts')
})
it('shows only failed files when list-files is "failed"', () => {
const report = getReport(results, {
...DEFAULT_OPTIONS,
listFiles: 'failed',
listSuites: 'none',
listTests: 'none'
})
expect(report).not.toContain('passing-file.spec.ts')
expect(report).toContain('failing-file.spec.ts')
expect(report).not.toContain('passing-with-skipped-file.spec.ts')
})
it('shows no file details when list-files is "none"', () => {
const report = getReport(results, {
...DEFAULT_OPTIONS,
listFiles: 'none',
listSuites: 'none',
listTests: 'none'
})
// Should still have badge
expect(report).toContain('![')
// Should not have file names in detail sections
expect(report).not.toContain('passing-file.spec.ts')
expect(report).not.toContain('failing-file.spec.ts')
expect(report).not.toContain('passing-with-skipped-file.spec.ts')
})
it('includes summary table even with list-files "none"', () => {
const report = getReport(results, {
...DEFAULT_OPTIONS,
listFiles: 'none',
listSuites: 'all',
listTests: 'none'
})
// Badge should still be present
expect(report).toContain('![')
expect(report).toContain('badge')
// File names should not be present
expect(report).not.toContain('passing-file.spec.ts')
expect(report).not.toContain('failing-file.spec.ts')
expect(report).not.toContain('passing-with-skipped-file.spec.ts')
})
it('works correctly with list-suites and list-tests when list-files is "failed"', () => {
const report = getReport(results, {
...DEFAULT_OPTIONS,
listFiles: 'failed',
listSuites: 'all',
listTests: 'all'
})
expect(report).not.toContain('passing-file.spec.ts')
expect(report).toContain('failing-file.spec.ts')
expect(report).not.toContain('passing-with-skipped-file.spec.ts')
// Should show suite details for the failed file
expect(report).toContain('test-suite')
})
it('filters correctly when all files pass and list-files is "failed"', () => {
const allPassingResults = [
createTestResult('passing-file-1.spec.ts', 5, 0, 0),
createTestResult('passing-file-2.spec.ts', 8, 0, 2)
]
const report = getReport(allPassingResults, {
...DEFAULT_OPTIONS,
listFiles: 'failed',
listSuites: 'all',
listTests: 'none'
})
expect(report).not.toContain('passing-file-1.spec.ts')
expect(report).not.toContain('passing-file-2.spec.ts')
// Badge should still be present
expect(report).toContain('![')
expect(report).toContain('badge')
})
it('filters correctly when all files fail and list-files is "failed"', () => {
const allFailingResults = [
createTestResult('failing-file-1.spec.ts', 0, 5, 0),
createTestResult('failing-file-2.spec.ts', 1, 2, 1)
]
const report = getReport(allFailingResults, {
...DEFAULT_OPTIONS,
listFiles: 'failed',
listSuites: 'all',
listTests: 'none'
})
expect(report).toContain('failing-file-1.spec.ts')
expect(report).toContain('failing-file-2.spec.ts')
})
})
})

View file

@ -52,6 +52,14 @@ inputs:
- none - none
required: false required: false
default: 'all' default: 'all'
list-files:
description: |
Limits which test result files are listed. Supported options:
- all
- failed
- none
required: false
default: 'all'
max-annotations: max-annotations:
description: | description: |
Limits number of created annotations with error message and stack trace captured during test execution. Limits number of created annotations with error message and stack trace captured during test execution.

View file

@ -41,6 +41,7 @@ class TestReporter {
readonly reporter = core.getInput('reporter', {required: true}) readonly reporter = core.getInput('reporter', {required: true})
readonly listSuites = core.getInput('list-suites', {required: true}) as 'all' | 'failed' | 'none' readonly listSuites = core.getInput('list-suites', {required: true}) as 'all' | 'failed' | 'none'
readonly listTests = core.getInput('list-tests', {required: true}) as 'all' | 'failed' | 'none' readonly listTests = core.getInput('list-tests', {required: true}) as 'all' | 'failed' | 'none'
readonly listFiles = core.getInput('list-files', {required: true}) as 'all' | 'failed' | 'none'
readonly maxAnnotations = parseInt(core.getInput('max-annotations', {required: true})) readonly maxAnnotations = parseInt(core.getInput('max-annotations', {required: true}))
readonly failOnError = core.getInput('fail-on-error', {required: true}) === 'true' readonly failOnError = core.getInput('fail-on-error', {required: true}) === 'true'
readonly failOnEmpty = core.getInput('fail-on-empty', {required: true}) === 'true' readonly failOnEmpty = core.getInput('fail-on-empty', {required: true}) === 'true'
@ -67,6 +68,11 @@ class TestReporter {
return return
} }
if (this.listFiles !== 'all' && this.listFiles !== 'failed' && this.listFiles !== 'none') {
core.setFailed(`Input parameter 'list-files' has invalid value`)
return
}
if (this.collapsed !== 'auto' && this.collapsed !== 'always' && this.collapsed !== 'never') { if (this.collapsed !== 'auto' && this.collapsed !== 'always' && this.collapsed !== 'never') {
core.setFailed(`Input parameter 'collapsed' has invalid value`) core.setFailed(`Input parameter 'collapsed' has invalid value`)
return return
@ -172,7 +178,7 @@ class TestReporter {
} }
} }
const {listSuites, listTests, onlySummary, useActionsSummary, badgeTitle, reportTitle, collapsed} = this const {listSuites, listTests, listFiles, onlySummary, useActionsSummary, badgeTitle, reportTitle, collapsed} = this
const passed = results.reduce((sum, tr) => sum + tr.passed, 0) const passed = results.reduce((sum, tr) => sum + tr.passed, 0)
const failed = results.reduce((sum, tr) => sum + tr.failed, 0) const failed = results.reduce((sum, tr) => sum + tr.failed, 0)
@ -186,6 +192,7 @@ class TestReporter {
{ {
listSuites, listSuites,
listTests, listTests,
listFiles,
baseUrl, baseUrl,
onlySummary, onlySummary,
useActionsSummary, useActionsSummary,
@ -217,6 +224,7 @@ class TestReporter {
const summary = getReport(results, { const summary = getReport(results, {
listSuites, listSuites,
listTests, listTests,
listFiles,
baseUrl, baseUrl,
onlySummary, onlySummary,
useActionsSummary, useActionsSummary,

View file

@ -11,6 +11,7 @@ const MAX_ACTIONS_SUMMARY_LENGTH = 1048576
export interface ReportOptions { export interface ReportOptions {
listSuites: 'all' | 'failed' | 'none' listSuites: 'all' | 'failed' | 'none'
listTests: 'all' | 'failed' | 'none' listTests: 'all' | 'failed' | 'none'
listFiles: 'all' | 'failed' | 'none'
baseUrl: string baseUrl: string
onlySummary: boolean onlySummary: boolean
useActionsSummary: boolean useActionsSummary: boolean
@ -22,6 +23,7 @@ export interface ReportOptions {
export const DEFAULT_OPTIONS: ReportOptions = { export const DEFAULT_OPTIONS: ReportOptions = {
listSuites: 'all', listSuites: 'all',
listTests: 'all', listTests: 'all',
listFiles: 'all',
baseUrl: '', baseUrl: '',
onlySummary: false, onlySummary: false,
useActionsSummary: true, useActionsSummary: true,
@ -171,8 +173,16 @@ function getTestRunsReport(testRuns: TestRunResult[], options: ReportOptions): s
sections.push(` `) sections.push(` `)
} }
if (testRuns.length > 0 || options.onlySummary) { // Filter test runs based on list-files option
const tableData = testRuns const filteredTestRuns =
options.listFiles === 'failed'
? testRuns.filter(tr => tr.result === 'failed')
: options.listFiles === 'none'
? []
: testRuns
if (filteredTestRuns.length > 0 || options.onlySummary) {
const tableData = filteredTestRuns
.map((tr, originalIndex) => ({tr, originalIndex})) .map((tr, originalIndex) => ({tr, originalIndex}))
.filter(({tr}) => tr.passed > 0 || tr.failed > 0 || tr.skipped > 0) .filter(({tr}) => tr.passed > 0 || tr.failed > 0 || tr.skipped > 0)
.map(({tr, originalIndex}) => { .map(({tr, originalIndex}) => {
@ -195,7 +205,7 @@ function getTestRunsReport(testRuns: TestRunResult[], options: ReportOptions): s
} }
if (options.onlySummary === false) { if (options.onlySummary === false) {
const suitesReports = testRuns.map((tr, i) => getSuitesReport(tr, i, options)).flat() const suitesReports = filteredTestRuns.map((tr, i) => getSuitesReport(tr, i, options)).flat()
sections.push(...suitesReports) sections.push(...suitesReports)
} }