Add list-suites and list-tests options to limit report size

This commit is contained in:
Michal Dorner 2021-01-25 12:53:45 +01:00
parent 0919385c06
commit 3744805866
No known key found for this signature in database
GPG key ID: 9EEE04B48DA36786
20 changed files with 28593 additions and 18534 deletions

View file

@ -61,6 +61,17 @@ jobs:
# jest-junit
reporter: ''
# Limits which test suites are listed:
# all
# only-failed
list-suites: 'all'
# Limits which test cases are listed:
# all
# failed
# none
list-tests: 'all'
# Limits number of created annotations with error message and stack trace captured during test execution.
# Must be less or equal to 50.
max-annotations: '10'

View file

@ -6,12 +6,12 @@
| Result | Suite | Tests | Time | Passed ✔️ | Skipped ✖️ | Failed ❌ |
| :---: | :--- | ---: | ---: | ---: | ---: | ---: |
| ❌ | [test\main_test.dart](#ts-0-test-maintest-dart) | 4 | 74ms | 1 | 0 | 3 |
| ❌ | [test\second_test.dart](#ts-1-test-secondtest-dart) | 2 | 51ms | 0 | 1 | 1 |
| ❌ | [test\main_test.dart](#r0s0-test-maintest-dart) | 4 | 74ms | 1 | 0 | 3 |
| ❌ | [test\second_test.dart](#r0s1-test-secondtest-dart) | 2 | 51ms | 0 | 1 | 1 |
# Test Suites
## <a id="user-content-ts-0-test-maintest-dart" href="#ts-0-test-maintest-dart">test\main_test.dart</a>
## <a id="user-content-r0s0-test-maintest-dart" href="#r0s0-test-maintest-dart">test\main_test.dart</a>
### Test 1
@ -32,7 +32,7 @@
| :---: | :--- | ---: |
| ❌ | Test 2 Exception in test | 12ms |
## <a id="user-content-ts-1-test-secondtest-dart" href="#ts-1-test-secondtest-dart">test\second_test.dart</a>
## <a id="user-content-r0s1-test-secondtest-dart" href="#r0s1-test-secondtest-dart">test\second_test.dart</a>
| Result | Test | Time |
| :---: | :--- | ---: |

View file

@ -6,11 +6,11 @@
| Result | Suite | Tests | Time | Passed ✔️ | Skipped ✖️ | Failed ❌ |
| :---: | :--- | ---: | ---: | ---: | ---: | ---: |
| ❌ | [DotnetTests.XUnitTests.CalculatorTests](#ts-0-DotnetTests-XUnitTests-CalculatorTests) | 7 | 109.5761ms | 3 | 1 | 3 |
| ❌ | [DotnetTests.XUnitTests.CalculatorTests](#r0s0-DotnetTests-XUnitTests-CalculatorTests) | 7 | 109.5761ms | 3 | 1 | 3 |
# Test Suites
## <a id="user-content-ts-0-DotnetTests-XUnitTests-CalculatorTests" href="#ts-0-DotnetTests-XUnitTests-CalculatorTests">DotnetTests.XUnitTests.CalculatorTests</a>
## <a id="user-content-r0s0-DotnetTests-XUnitTests-CalculatorTests" href="#r0s0-DotnetTests-XUnitTests-CalculatorTests">DotnetTests.XUnitTests.CalculatorTests</a>
| Result | Test | Time |
| :---: | :--- | ---: |

View file

@ -6,12 +6,12 @@
| Result | Suite | Tests | Time | Passed ✔️ | Skipped ✖️ | Failed ❌ |
| :---: | :--- | ---: | ---: | ---: | ---: | ---: |
| ❌ | [__tests__\main.test.js](#ts-0-tests-main-test-js) | 4 | 486ms | 1 | 0 | 3 |
| ❌ | [__tests__\second.test.js](#ts-1-tests-second-test-js) | 2 | 82ms | 0 | 1 | 1 |
| ❌ | [__tests__\main.test.js](#r0s0-tests-main-test-js) | 4 | 486ms | 1 | 0 | 3 |
| ❌ | [__tests__\second.test.js](#r0s1-tests-second-test-js) | 2 | 82ms | 0 | 1 | 1 |
# Test Suites
## <a id="user-content-ts-0-tests-main-test-js" href="#ts-0-tests-main-test-js">__tests__\main.test.js</a>
## <a id="user-content-r0s0-tests-main-test-js" href="#r0s0-tests-main-test-js">__tests__\main.test.js</a>
### Test 1
@ -32,7 +32,7 @@
| :---: | :--- | ---: |
| ❌ | Exception in test | 0ms |
## <a id="user-content-ts-1-tests-second-test-js" href="#ts-1-tests-second-test-js">__tests__\second.test.js</a>
## <a id="user-content-r0s1-tests-second-test-js" href="#r0s1-tests-second-test-js">__tests__\second.test.js</a>
| Result | Test | Time |
| :---: | :--- | ---: |

File diff suppressed because it is too large Load diff

View file

@ -52,47 +52,75 @@ dart:isolate _RawReceivePortImpl._handleMessage
"title": "[test\\\\second_test.dart] Timeout test",
},
],
"summary": "![Tests failed](https://img.shields.io/badge/tests-1%20passed%2C%201%20skipped%2C%204%20failed-critical)
### fixtures/dart-json.json
**6** tests were completed in **3.760s** with **1** passed, **1** skipped and **4** failed.
| Result | Suite | Tests | Time | Passed ✔️ | Skipped ✖️ | Failed ❌ |
| :---: | :--- | ---: | ---: | ---: | ---: | ---: |
| ❌ | [test\\\\main_test.dart](#ts-0-test-maintest-dart) | 4 | 74ms | 1 | 0 | 3 |
| ❌ | [test\\\\second_test.dart](#ts-1-test-secondtest-dart) | 2 | 51ms | 0 | 1 | 1 |
# Test Suites
## <a id=\\"user-content-ts-0-test-maintest-dart\\" href=\\"#ts-0-test-maintest-dart\\">test\\\\main_test.dart</a> ❌
### Test 1
| Result | Test | Time |
| :---: | :--- | ---: |
| ✔️ | Test 1 Passing test | 36ms |
### Test 1 Test 1.1
| Result | Test | Time |
| :---: | :--- | ---: |
| ❌ | Test 1 Test 1.1 Failing test | 20ms |
| ❌ | Test 1 Test 1.1 Exception in target unit | 6ms |
### Test 2
| Result | Test | Time |
| :---: | :--- | ---: |
| ❌ | Test 2 Exception in test | 12ms |
## <a id=\\"user-content-ts-1-test-secondtest-dart\\" href=\\"#ts-1-test-secondtest-dart\\">test\\\\second_test.dart</a> ❌
| Result | Test | Time |
| :---: | :--- | ---: |
| ❌ | Timeout test | 37ms |
| ✖️ | Skipped test | 14ms |
",
"title": "Dart tests ❌",
"testRuns": Array [
TestRunResult {
"path": "fixtures/dart-json.json",
"suites": Array [
TestSuiteResult {
"groups": Array [
TestGroupResult {
"name": "Test 1",
"tests": Array [
TestCaseResult {
"name": "Test 1 Passing test",
"result": "success",
"time": 36,
},
],
},
TestGroupResult {
"name": "Test 1 Test 1.1",
"tests": Array [
TestCaseResult {
"name": "Test 1 Test 1.1 Failing test",
"result": "failed",
"time": 20,
},
TestCaseResult {
"name": "Test 1 Test 1.1 Exception in target unit",
"result": "failed",
"time": 6,
},
],
},
TestGroupResult {
"name": "Test 2",
"tests": Array [
TestCaseResult {
"name": "Test 2 Exception in test",
"result": "failed",
"time": 12,
},
],
},
],
"name": "test\\\\main_test.dart",
"totalTime": undefined,
},
TestSuiteResult {
"groups": Array [
TestGroupResult {
"name": null,
"tests": Array [
TestCaseResult {
"name": "Timeout test",
"result": "failed",
"time": 37,
},
TestCaseResult {
"name": "Skipped test",
"result": "skipped",
"time": 14,
},
],
},
],
"name": "test\\\\second_test.dart",
"totalTime": undefined,
},
],
"totalTime": 3760,
},
],
}
`;

View file

@ -30,30 +30,59 @@ Actual: 2",
"title": "[DotnetTests.XUnitTests.CalculatorTests] Failing_Test",
},
],
"summary": "![Tests failed](https://img.shields.io/badge/tests-3%20passed%2C%201%20skipped%2C%203%20failed-critical)
### fixtures/dotnet-trx.trx
**7** tests were completed in **1.061s** with **3** passed, **1** skipped and **3** failed.
| Result | Suite | Tests | Time | Passed ✔️ | Skipped ✖️ | Failed ❌ |
| :---: | :--- | ---: | ---: | ---: | ---: | ---: |
| ❌ | [DotnetTests.XUnitTests.CalculatorTests](#ts-0-DotnetTests-XUnitTests-CalculatorTests) | 7 | 109.5761ms | 3 | 1 | 3 |
# Test Suites
## <a id=\\"user-content-ts-0-DotnetTests-XUnitTests-CalculatorTests\\" href=\\"#ts-0-DotnetTests-XUnitTests-CalculatorTests\\">DotnetTests.XUnitTests.CalculatorTests</a> ❌
| Result | Test | Time |
| :---: | :--- | ---: |
| ❌ | Exception_In_TargetTest | 0.4975ms |
| ❌ | Exception_In_Test | 2.2728ms |
| ❌ | Failing_Test | 3.2953ms |
| ✔️ | Passing_Test | 0.1254ms |
| ✔️ | Passing_Test_With_Name | 0.103ms |
| ✖️ | Skipped_Test | 1ms |
| ✔️ | Timeout_Test | 102.2821ms |
",
"title": "Dotnet TRX tests ❌",
"testRuns": Array [
TestRunResult {
"path": "fixtures/dotnet-trx.trx",
"suites": Array [
TestSuiteResult {
"groups": Array [
TestGroupResult {
"name": null,
"tests": Array [
TestCaseResult {
"name": "Exception_In_TargetTest",
"result": "failed",
"time": 0.4975,
},
TestCaseResult {
"name": "Exception_In_Test",
"result": "failed",
"time": 2.2728,
},
TestCaseResult {
"name": "Failing_Test",
"result": "failed",
"time": 3.2953,
},
TestCaseResult {
"name": "Passing_Test",
"result": "success",
"time": 0.1254,
},
TestCaseResult {
"name": "Passing_Test_With_Name",
"result": "success",
"time": 0.103,
},
TestCaseResult {
"name": "Skipped_Test",
"result": "skipped",
"time": 1,
},
TestCaseResult {
"name": "Timeout_Test",
"result": "success",
"time": 102.2821,
},
],
},
],
"name": "DotnetTests.XUnitTests.CalculatorTests",
"totalTime": undefined,
},
],
"totalTime": 1061,
},
],
}
`;

File diff suppressed because it is too large Load diff

View file

@ -3,6 +3,7 @@ import * as path from 'path'
import {parseDartJson} from '../src/parsers/dart-json/dart-json-parser'
import {ParseOptions} from '../src/parsers/parser-types'
import {getReport} from '../src/report/get-report'
import {normalizeFilePath} from '../src/utils/file-utils'
const fixturePath = path.join(__dirname, 'fixtures', 'dart-json.json')
@ -15,17 +16,16 @@ const xmlFixture = {
describe('dart-json tests', () => {
it('matches report snapshot', async () => {
const opts: ParseOptions = {
name: 'Dart tests',
annotations: true,
trackedFiles: ['lib/main.dart', 'test/main_test.dart', 'test/second_test.dart'],
workDir: 'C:/Users/Michal/Workspace/dorny/test-check/reports/dart/'
}
const result = await parseDartJson([xmlFixture], opts)
fs.mkdirSync(path.dirname(outputPath), {recursive: true})
fs.writeFileSync(outputPath, result?.output?.summary ?? '')
expect(result).toMatchSnapshot()
expect(result.success).toBeFalsy()
expect(result?.output).toMatchSnapshot()
const report = getReport(result.testRuns)
fs.mkdirSync(path.dirname(outputPath), {recursive: true})
fs.writeFileSync(outputPath, report)
})
})

View file

@ -3,6 +3,7 @@ import * as path from 'path'
import {parseDotnetTrx} from '../src/parsers/dotnet-trx/dotnet-trx-parser'
import {ParseOptions} from '../src/parsers/parser-types'
import {getReport} from '../src/report/get-report'
import {normalizeFilePath} from '../src/utils/file-utils'
const fixturePath = path.join(__dirname, 'fixtures', 'dotnet-trx.trx')
@ -15,17 +16,16 @@ const xmlFixture = {
describe('dotnet-trx tests', () => {
it('matches report snapshot', async () => {
const opts: ParseOptions = {
name: 'Dotnet TRX tests',
annotations: true,
trackedFiles: ['DotnetTests.Unit/Calculator.cs', 'DotnetTests.XUnitTests/CalculatorTests.cs'],
workDir: 'C:/Users/Michal/Workspace/dorny/test-check/reports/dotnet/'
}
const result = await parseDotnetTrx([xmlFixture], opts)
fs.mkdirSync(path.dirname(outputPath), {recursive: true})
fs.writeFileSync(outputPath, result?.output?.summary ?? '')
expect(result).toMatchSnapshot()
expect(result.success).toBeFalsy()
expect(result?.output).toMatchSnapshot()
const report = getReport(result.testRuns)
fs.mkdirSync(path.dirname(outputPath), {recursive: true})
fs.writeFileSync(outputPath, report)
})
})

View file

@ -3,10 +3,10 @@ import * as path from 'path'
import {parseJestJunit} from '../src/parsers/jest-junit/jest-junit-parser'
import {ParseOptions} from '../src/parsers/parser-types'
import {getReport} from '../src/report/get-report'
import {normalizeFilePath} from '../src/utils/file-utils'
describe('jest-junit tests', () => {
it('report from ./reports/jest test results matches snapshot', async () => {
const fixturePath = path.join(__dirname, 'fixtures', 'jest-junit.xml')
const outputPath = path.join(__dirname, '__outputs__', 'jest-junit.md')
@ -16,18 +16,17 @@ describe('jest-junit tests', () => {
}
const opts: ParseOptions = {
name: 'jest tests',
annotations: true,
trackedFiles: ['__tests__/main.test.js', '__tests__/second.test.js', 'lib/main.js'],
workDir: 'C:/Users/Michal/Workspace/dorny/test-check/reports/jest/'
}
const result = await parseJestJunit([xmlFixture], opts)
fs.mkdirSync(path.dirname(outputPath), {recursive: true})
fs.writeFileSync(outputPath, result?.output?.summary ?? '')
expect(result).toMatchSnapshot()
expect(result.success).toBeFalsy()
expect(result?.output).toMatchSnapshot()
const report = getReport(result.testRuns)
fs.mkdirSync(path.dirname(outputPath), {recursive: true})
fs.writeFileSync(outputPath, report)
})
it('report from facebook/jest test results matches snapshot', async () => {
@ -42,16 +41,15 @@ describe('jest-junit tests', () => {
const trackedFiles = fs.readFileSync(filesPath, {encoding: 'utf8'}).split(/\n\r?/g)
const opts: ParseOptions = {
trackedFiles,
name: 'jest tests',
annotations: true,
workDir: '/home/dorny/dorny/jest/'
}
const result = await parseJestJunit([xmlFixture], opts)
fs.mkdirSync(path.dirname(outputPath), {recursive: true})
fs.writeFileSync(outputPath, result?.output?.summary ?? '')
expect(result).toMatchSnapshot()
expect(result.success).toBeFalsy()
expect(result?.output).toMatchSnapshot()
const report = getReport(result.testRuns, {listTests: 'only-failed'})
fs.mkdirSync(path.dirname(outputPath), {recursive: true})
fs.writeFileSync(outputPath, report)
})
})

View file

@ -4,16 +4,6 @@ description: |
Supports .NET (xUnit, NUnit, MSTest), Dart, Flutter and JavaScript (JEST).
author: Michal Dorner <dorner.michal@gmail.com>
inputs:
fail-on-error:
description: Set this action as failed if test report contain any failed test
required: true
default: 'true'
max-annotations:
description: |
Limits number of created annotations with error message and stack trace captured during test execution.
Must be less or equal to 50.
required: true
default: '10'
name:
description: Name of the check run
required: true
@ -31,13 +21,38 @@ inputs:
- flutter-machine
- jest-junit
required: true
list-suites:
description: |
Limits which test suites are listed. Supported options:
- all
- only-failed
required: true
default: 'all'
list-tests:
description: |
Limits which test cases are listed. Supported options:
- all
- only-failed
- none
required: true
default: 'all'
max-annotations:
description: |
Limits number of created annotations with error message and stack trace captured during test execution.
Must be less or equal to 50.
required: true
default: '10'
fail-on-error:
description: Set this action as failed if test report contain any failed test
required: true
default: 'true'
working-directory:
description: Relative path under $GITHUB_WORKSPACE where the repository was checked out
required: false
token:
description: GitHub Access Token
required: false
default: ${{ github.token }}
working-directory:
description: Relative path under $GITHUB_WORKSPACE where the repository was checked out
required: false
outputs:
conclusion:
description: |

View file

@ -5,10 +5,12 @@ import glob from 'fast-glob'
import {parseDartJson} from './parsers/dart-json/dart-json-parser'
import {parseDotnetTrx} from './parsers/dotnet-trx/dotnet-trx-parser'
import {parseJestJunit} from './parsers/jest-junit/jest-junit-parser'
import {getReport} from './report/get-report'
import {FileContent, ParseOptions, ParseTestResult} from './parsers/parser-types'
import {normalizeDirPath} from './utils/file-utils'
import {listFiles} from './utils/git'
import {enforceCheckRunLimits, getCheckRunSha} from './utils/github-utils'
import {Icon} from './utils/markdown-utils'
async function run(): Promise<void> {
try {
@ -19,13 +21,25 @@ async function run(): Promise<void> {
}
async function main(): Promise<void> {
const maxAnnotations = parseInt(core.getInput('max-annotations', {required: true}))
const failOnError = core.getInput('fail-on-error', {required: true}) === 'true'
const name = core.getInput('name', {required: true})
const path = core.getInput('path', {required: true})
const reporter = core.getInput('reporter', {required: true})
const token = core.getInput('token', {required: true})
const listSuites = core.getInput('list-suites', {required: true})
const listTests = core.getInput('list-tests', {required: true})
const maxAnnotations = parseInt(core.getInput('max-annotations', {required: true}))
const failOnError = core.getInput('fail-on-error', {required: true}) === 'true'
const workDirInput = core.getInput('working-directory', {required: false})
const token = core.getInput('token', {required: true})
if (listSuites !== 'all' && listSuites !== 'only-failed') {
core.setFailed(`Input parameter 'list-suites' has invalid value`)
return
}
if (listTests !== 'all' && listTests !== 'only-failed' && listTests !== 'none') {
core.setFailed(`Input parameter 'list-tests' has invalid value`)
return
}
if (isNaN(maxAnnotations) || maxAnnotations < 0 || maxAnnotations > 50) {
core.setFailed(`Input parameter 'max-annotations' has invalid value`)
@ -46,7 +60,6 @@ async function main(): Promise<void> {
const trackedFiles = annotations ? await listFiles() : []
const opts: ParseOptions = {
name,
trackedFiles,
workDir,
annotations
@ -62,9 +75,11 @@ async function main(): Promise<void> {
core.info(`Using test report parser '${reporter}'`)
const result = await parser(files, opts)
const conclusion = result.success ? 'success' : 'failure'
enforceCheckRunLimits(result, maxAnnotations)
const isFailed = result.testRuns.some(tr => tr.result === 'failed')
const conclusion = isFailed ? 'failure' : 'success'
const icon = isFailed ? Icon.fail : Icon.success
core.info(`Creating check run '${name}' with conclusion '${conclusion}'`)
await octokit.checks.create({
@ -72,12 +87,16 @@ async function main(): Promise<void> {
name,
conclusion,
status: 'completed',
output: result.output,
output: {
title: `${name} ${icon}`,
summary: getReport(result.testRuns, {listSuites, listTests}),
annotations: result.annotations
},
...github.context.repo
})
core.setOutput('conclusion', conclusion)
if (failOnError && !result.success) {
if (failOnError && isFailed) {
core.setFailed(`Failed test has been found and 'fail-on-error' option is set to ${failOnError}`)
}
}

View file

@ -1,9 +1,8 @@
import * as core from '@actions/core'
import {Annotation, FileContent, ParseOptions, TestResult} from '../parser-types'
import getReport from '../../report/get-report'
import {normalizeFilePath} from '../../utils/file-utils'
import {Icon, fixEol} from '../../utils/markdown-utils'
import {fixEol} from '../../utils/markdown-utils'
import {
ReportEvent,
@ -72,16 +71,10 @@ class TestCase {
export async function parseDartJson(files: FileContent[], options: ParseOptions): Promise<TestResult> {
const testRuns = files.map(f => getTestRun(f.path, f.content))
const testRunsResults = testRuns.map(getTestRunResult)
const success = testRuns.every(tr => tr.success)
const icon = success ? Icon.success : Icon.fail
return {
success,
output: {
title: `${options.name.trim()} ${icon}`,
summary: getReport(testRunsResults),
annotations: options.annotations ? getAnnotations(testRuns, options.workDir, options.trackedFiles) : undefined
}
testRuns: testRunsResults,
annotations: options.annotations ? getAnnotations(testRuns, options.workDir, options.trackedFiles) : []
}
}

View file

@ -5,7 +5,7 @@ import {Annotation, FileContent, ParseOptions, TestResult} from '../parser-types
import {parseStringPromise} from 'xml2js'
import {normalizeFilePath} from '../../utils/file-utils'
import {Icon, fixEol} from '../../utils/markdown-utils'
import {fixEol} from '../../utils/markdown-utils'
import {parseIsoDate, parseNetDuration} from '../../utils/parse-utils'
import {
@ -15,7 +15,6 @@ import {
TestGroupResult,
TestCaseResult
} from '../../report/test-results'
import getReport from '../../report/get-report'
class TestClass {
constructor(readonly name: string) {}
@ -54,16 +53,9 @@ export async function parseDotnetTrx(files: FileContent[], options: ParseOptions
testClasses.push(...tc)
}
const success = testRuns.every(tr => tr.result === 'success')
const icon = success ? Icon.success : Icon.fail
return {
success,
output: {
title: `${options.name.trim()} ${icon}`,
summary: getReport(testRuns),
annotations: options.annotations ? getAnnotations(testClasses, options.workDir, options.trackedFiles) : undefined
}
testRuns,
annotations: options.annotations ? getAnnotations(testClasses, options.workDir, options.trackedFiles) : []
}
}

View file

@ -3,7 +3,7 @@ import {Annotation, FileContent, ParseOptions, TestResult} from '../parser-types
import {parseStringPromise} from 'xml2js'
import {JunitReport, TestCase, TestSuite} from './jest-junit-types'
import {fixEol, Icon} from '../../utils/markdown-utils'
import {fixEol} from '../../utils/markdown-utils'
import {normalizeFilePath} from '../../utils/file-utils'
import {
@ -13,7 +13,6 @@ import {
TestGroupResult,
TestCaseResult
} from '../../report/test-results'
import getReport from '../../report/get-report'
export async function parseJestJunit(files: FileContent[], options: ParseOptions): Promise<TestResult> {
const junit: JunitReport[] = []
@ -26,16 +25,9 @@ export async function parseJestJunit(files: FileContent[], options: ParseOptions
testRuns.push(tr)
}
const success = testRuns.every(tr => tr.result === 'success')
const icon = success ? Icon.success : Icon.fail
return {
success,
output: {
title: `${options.name.trim()} ${icon}`,
summary: getReport(testRuns),
annotations: options.annotations ? getAnnotations(junit, options.workDir, options.trackedFiles) : undefined
}
testRuns,
annotations: options.annotations ? getAnnotations(junit, options.workDir, options.trackedFiles) : []
}
}

View file

@ -1,6 +1,5 @@
import {Endpoints} from '@octokit/types'
import {TestRunResult} from '../report/test-results'
export type OutputParameters = Endpoints['POST /repos/{owner}/{repo}/check-runs']['parameters']['output']
export type Annotation = {
path: string
start_line: number
@ -18,13 +17,12 @@ export type ParseTestResult = (files: FileContent[], options: ParseOptions) => P
export type FileContent = {path: string; content: string}
export interface ParseOptions {
name: string
annotations: boolean
workDir: string
trackedFiles: string[]
}
export interface TestResult {
success: boolean
output: OutputParameters
testRuns: TestRunResult[]
annotations: Annotation[]
}

View file

@ -3,16 +3,50 @@ import {TestExecutionResult, TestRunResult, TestSuiteResult} from './test-result
import {Align, Icon, link, table} from '../utils/markdown-utils'
import {slug} from '../utils/slugger'
export default function getReport(results: TestRunResult[]): string {
const badge = getBadge(results)
const runsSummary = results.map(getRunSummary).join('\n\n')
const suites = results
.flatMap(tr => tr.suites)
.map((ts, i) => getSuiteSummary(ts, i))
.join('\n')
export interface ReportOptions {
listSuites?: 'all' | 'only-failed'
listTests?: 'all' | 'only-failed' | 'none'
}
const suitesSection = `# Test Suites\n\n${suites}`
return [badge, runsSummary, suitesSection].join('\n\n')
export function getReport(results: TestRunResult[], options: ReportOptions = {}): string {
const maxReportLength = 65535
const sections: string[] = []
const badge = getBadge(results)
sections.push(badge)
const runsSummary = results.map((tr, i) => getRunSummary(tr, i, options)).join('\n\n')
sections.push(runsSummary)
if (options.listTests !== 'none') {
const suitesSummary = results
.map((tr, runIndex) => {
const suites = options.listSuites === 'only-failed' ? tr.failedSuites : tr.suites
return suites
.map((ts, suiteIndex) => getSuiteSummary(ts, runIndex, suiteIndex, options))
.filter(str => str !== '')
})
.flat()
.join('\n')
const suitesSection = `# Test Suites\n\n${suitesSummary}`
sections.push(suitesSection)
}
const report = sections.join('\n\n')
if (report.length > maxReportLength) {
let msg = `**Check Run summary limit of ${maxReportLength} chars was exceed**`
if (options.listTests !== 'all') {
msg += '\n- Consider setting `list-tests` option to `only-failed` or `none`'
}
if (options.listSuites !== 'all') {
msg += '\n- Consider setting `list-suites` option to `only-failed`'
}
return `${badge}\n\n${msg}`
}
return report
}
function getBadge(results: TestRunResult[]): string {
@ -36,36 +70,49 @@ function getBadge(results: TestRunResult[]): string {
return `![${text}](https://img.shields.io/badge/${uri})`
}
function getRunSummary(tr: TestRunResult): string {
function getRunSummary(tr: TestRunResult, runIndex: number, options: ReportOptions): string {
core.info('Generating check run summary')
const time = `${(tr.time / 1000).toFixed(3)}s`
const headingLine1 = `### ${tr.path}`
const headingLine2 = `**${tr.tests}** tests were completed in **${time}** with **${tr.passed}** passed, **${tr.skipped}** skipped and **${tr.failed}** failed.`
const suitesSummary = tr.suites.map((s, i) => {
const suites = options.listSuites === 'only-failed' ? tr.failedSuites : tr.suites
const suitesSummary = suites.map((s, suiteIndex) => {
const icon = getResultIcon(s.result)
const tsTime = `${s.time}ms`
const tsName = s.name
const tsAddr = makeSuiteSlug(i, tsName).link
const tsAddr = makeSuiteSlug(runIndex, suiteIndex, tsName).link
const tsNameLink = link(tsName, tsAddr)
return [icon, tsNameLink, s.tests, tsTime, s.passed, s.skipped, s.failed]
})
const summary = table(
['Result', 'Suite', 'Tests', 'Time', `Passed ${Icon.success}`, `Skipped ${Icon.skip}`, `Failed ${Icon.fail}`],
[Align.Center, Align.Left, Align.Right, Align.Right, Align.Right, Align.Right, Align.Right],
...suitesSummary
)
const summary =
suites.length === 0
? ''
: table(
['Result', 'Suite', 'Tests', 'Time', `Passed ${Icon.success}`, `Skipped ${Icon.skip}`, `Failed ${Icon.fail}`],
[Align.Center, Align.Left, Align.Right, Align.Right, Align.Right, Align.Right, Align.Right],
...suitesSummary
)
return [headingLine1, headingLine2, summary].join('\n\n')
}
function getSuiteSummary(ts: TestSuiteResult, index: number): string {
function getSuiteSummary(ts: TestSuiteResult, runIndex: number, suiteIndex: number, options: ReportOptions): string {
const groups = options.listTests === 'only-failed' ? ts.failedGroups : ts.groups
if (groups.length === 0) {
return ''
}
const icon = getResultIcon(ts.result)
const content = ts.groups
const content = groups
.map(grp => {
const tests = options.listTests === 'only-failed' ? grp.failedTests : grp.tests
if (tests.length === 0) {
return ''
}
const header = grp.name ? `### ${grp.name}\n\n` : ''
const tests = table(
const testsTable = table(
['Result', 'Test', 'Time'],
[Align.Center, Align.Left, Align.Right],
...grp.tests.map(tc => {
@ -76,19 +123,19 @@ function getSuiteSummary(ts: TestSuiteResult, index: number): string {
})
)
return `${header}${tests}\n`
return `${header}${testsTable}\n`
})
.join('\n')
const tsName = ts.name
const tsSlug = makeSuiteSlug(index, tsName)
const tsSlug = makeSuiteSlug(runIndex, suiteIndex, tsName)
const tsNameLink = `<a id="${tsSlug.id}" href="${tsSlug.link}">${tsName}</a>`
return `## ${tsNameLink} ${icon}\n\n${content}`
}
function makeSuiteSlug(index: number, name: string): {id: string; link: string} {
// use "ts-$index-" as prefix to avoid slug conflicts after escaping the paths
return slug(`ts-${index}-${name}`)
function makeSuiteSlug(runIndex: number, suiteIndex: number, name: string): {id: string; link: string} {
// use prefix to avoid slug conflicts after escaping the paths
return slug(`r${runIndex}s${suiteIndex}-${name}`)
}
function getResultIcon(result: TestExecutionResult): string {

View file

@ -22,6 +22,10 @@ export class TestRunResult {
get result(): TestExecutionResult {
return this.suites.some(t => t.result === 'failed') ? 'failed' : 'success'
}
get failedSuites(): TestSuiteResult[] {
return this.suites.filter(s => s.result === 'failed')
}
}
export class TestSuiteResult {
@ -47,6 +51,10 @@ export class TestSuiteResult {
get result(): TestExecutionResult {
return this.groups.some(t => t.result === 'failed') ? 'failed' : 'success'
}
get failedGroups(): TestGroupResult[] {
return this.groups.filter(grp => grp.result === 'failed')
}
}
export class TestGroupResult {
@ -68,6 +76,10 @@ export class TestGroupResult {
get result(): TestExecutionResult {
return this.tests.some(t => t.result === 'failed') ? 'failed' : 'success'
}
get failedTests(): TestCaseResult[] {
return this.tests.filter(tc => tc.result === 'failed')
}
}
export class TestCaseResult {

View file

@ -14,16 +14,11 @@ export function getCheckRunSha(): string {
}
export function enforceCheckRunLimits(result: TestResult, maxAnnotations: number): void {
const output = result.output
if (!output) {
return
}
// Limit number of created annotations
output.annotations?.splice(maxAnnotations + 1)
result.annotations.splice(maxAnnotations + 1)
// Limit number of characters in annotation fields
for (const err of output.annotations ?? []) {
for (const err of result.annotations) {
err.title = ellipsis(err.title || '', 255)
err.message = ellipsis(err.message, 65535)
}